Loading [MathJax]/extensions/TeX/AMSsymbols.js
LALSimulation 6.2.0.1-8a6b96f
All Data Structures Namespaces Files Functions Variables Typedefs Enumerations Enumerator Macros Modules Pages
LALSimIMRPhenomX_intermediate.c
Go to the documentation of this file.
1/*
2 * Copyright (C) 2018 Geraint Pratten
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with with program; see the file COPYING. If not, write to the
16 * Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301 USA
18 */
19
20
21/**
22 * \author Geraint Pratten
23 *
24 * \file
25 *
26 * \brief Internal function for IMRPhenomX phenomenological waveform model, arXiv:2001.11412
27 *
28 */
29
31
32
33/******************************* IMRPhenomX Amplitude Functions *******************************/
34
35/******************************* Amplitude Functions: Merger *******************************/
36
37/*
38 See section VI. B of arXiv:2001.11412
39
40 Value of intermediate amplitude collocation vA point at f_2, defined in Table II of arXiv:2001.11412
41
42 Effective spin parameterization used = StotR
43*/
44static double IMRPhenomX_Intermediate_Amp_22_vA(double eta, double S, double dchi, double delta, int IntAmpFlag){
45
46 double eta2 = (eta*eta);
47 double eta3 = (eta2*eta);
48
49 double S2 = S*S;
50
51 double noSpin, eqSpin, uneqSpin;
52
53 switch ( IntAmpFlag )
54 {
55 case 104:
56 {
57
58 noSpin = (1.4873184918202145 + 1974.6112656679577*eta + 27563.641024162127*eta2 - 19837.908020966777*eta3)/(1. + 143.29004876335128*eta + 458.4097306093354*eta2);
59
60 eqSpin = (S*(27.952730865904343 + eta*(-365.55631765202895 - 260.3494489873286*S) + 3.2646808851249016*S + 3011.446602208493*eta2*S - 19.38970173389662*S2 + eta3*(1612.2681322644232 - 6962.675551371755*S + 1486.4658089990298*S2)))/(12.647425554323242 - 10.540154508599963*S + 1.*S2);
61
62 uneqSpin = dchi*delta*(-0.016404056649860943 - 296.473359655246*eta)*eta2;
63
64 break;
65 }
66 default:
67 {
68 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_vA: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104. \n");
69 }
70 }
71
72 return (noSpin + eqSpin + uneqSpin);
73
74}
75
76/*
77 See section VI. B of arXiv:2001.11412. Collocation point for 5th order polynomial ansatz.
78
79 Value of intermediate amplitude collocation point v2 at f_2, defined in Table I of arXiv:2001.11412
80
81 Effective spin parameterization used = StotR
82*/
83static double IMRPhenomX_Intermediate_Amp_22_v2(double eta, double S, double dchi, double delta, int IntAmpFlag){
84
85 double eta2 = (eta*eta);
86 double eta3 = (eta2*eta);
87
88 double S2 = S*S;
89
90 double noSpin, eqSpin, uneqSpin;
91
92 switch ( IntAmpFlag )
93 {
94 case 1043: // 1043 is used in IMRPhenomXHM
95 case 105:
96 {
97 noSpin = (2.2436523786378983 + 2162.4749081764216*eta + 24460.158604784723*eta2 - 12112.140570900956*eta3)/(1. + 120.78623282522702*eta + 416.4179522274108*eta2);
98
99 eqSpin = (S*(6.727511603827924 + eta2*(414.1400701039126 - 234.3754066885935*S) - 5.399284768639545*S
100 + eta*(-186.87972530996245 + 128.7402290554767*S)))/(3.24359204029217 - 3.975650468231452*S + 1.*S2);
101
102 uneqSpin = dchi*delta*(-59.52510939953099 + 13.12679437100751*eta)*eta2;
103
104 break;
105 }
106 default:
107 {
108 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v2: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104 (should not call *_Amp_22_v2). \n");
109 }
110 }
111
112 return (noSpin + eqSpin + uneqSpin);
113
114}
115
116/*
117 See section VI. B of arXiv:2001.11412. Collocation point for 5th order polynomial ansatz.
118
119 Value of intermediate amplitude collocation point v3 at f_3, defined in Table I of arXiv:2001.11412
120
121 Effective spin parameterization used = StotR
122*/
123static double IMRPhenomX_Intermediate_Amp_22_v3(double eta, double S, double dchi, double delta, int IntAmpFlag){
124
125 double eta2 = (eta*eta);
126 double eta3 = (eta2*eta);
127
128 double S2 = S*S;
129
130 double noSpin, eqSpin, uneqSpin;
131
132 switch ( IntAmpFlag )
133 {
134 case 1043: // 1043 is used in IMRPhenomXHM
135 case 105:
136 {
137 noSpin = (1.195392410912163 + 1677.2558976605421*eta + 24838.37133975971*eta2 - 17277.938868280915*eta3)/(1. + 144.78606839716073*eta + 428.8155916011666*eta2);
138
139 eqSpin = (S*(-2.1413952025647793 + 0.5719137940424858*S + eta*(46.61350006858767 + 0.40917927503842105*S - 11.526500209146906*S2)
140 + 1.1833965566688387*S2 + eta2*(-84.82318288272965 - 34.90591158988979*S + 19.494962340530186*S2)))/(-1.4786392693666195 + 1.*S);
141
142 uneqSpin = dchi*delta*(-333.7662575986524 + 532.2475589084717*eta)*eta3;
143
144 break;
145 }
146 default:
147 {
148 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104 (should not call *_Amp_22_v3). \n");
149 }
150 }
151
152 return (noSpin + eqSpin + uneqSpin);
153}
154
155
156/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
157static double IMRPhenomX_Intermediate_Amp_22_delta0(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
158{
159 double retVal;
160
161 switch ( IntAmpFlag )
162 {
163 case 1043: //no left derivative; 1043 is used in IMRPhenomXHM
164 {
165 UNUSED double f12 = f1*f1;
166 UNUSED double f13 = f12*f1;
167 UNUSED double f14 = f13*f1;
168
169 UNUSED double f22 = f2*f2;
170 UNUSED double f23 = f22*f2;
171 UNUSED double f24 = f23*f2;
172 UNUSED double f25 = f24*f2;
173
174 UNUSED double f32 = f3*f3;
175 UNUSED double f33 = f32*f3;
176 UNUSED double f34 = f33*f3;
177
178 UNUSED double f42 = f4*f4;
179 UNUSED double f43 = f42*f4;
180 UNUSED double f44 = f43*f4;
181 UNUSED double f45 = f44*f4;
182
183 UNUSED double f1mf2 = f1-f2;
184 UNUSED double f1mf3 = f1-f3;
185 UNUSED double f1mf4 = f1-f4;
186 UNUSED double f2mf3 = f2-f3;
187 UNUSED double f2mf4 = f2-f4;
188 UNUSED double f3mf4 = f3-f4;
189
190 UNUSED double f1mf42 = f1mf4*f1mf4;
191 UNUSED double f3mf42 = f3mf4*f3mf4;
192 UNUSED double f2mf32 = f2mf3*f2mf3;
193 UNUSED double f2mf42 = f2mf4*f2mf4;
194
195 retVal = (-(d4*f1*f1mf2*f1mf3*f1mf4*f2*f2mf3*f2mf4*f3*f3mf4*f4) - f1*f1mf3*f1mf42*f3*f3mf42*f42*v2 +
196 f24*(-(f1*f1mf42*f42*v3) + f33*(f42*v1 + f12*v4 - 2*f1*f4*v4) + f3*f4*(f43*v1 + 2*f13*v4 - 3*f12*f4*v4) - f32*(2*f43*v1 + f13*v4 - 3*f1*f42*v4)) +
197 f2*f4*(f12*f1mf42*f43*v3 - f34*(f43*v1 + 2*f13*v4 - 3*f12*f4*v4) - f32*f4*(f44*v1 + 3*f14*v4 - 4*f13*f4*v4) + 2*f33*(f44*v1 + f14*v4 - 2*f12*f42*v4)) +
198 f22*(-(f1*f1mf42*(2*f1 + f4)*f43*v3) + f3*f42*(f44*v1 + 3*f14*v4 - 4*f13*f4*v4) + f34*(2*f43*v1 + f13*v4 - 3*f1*f42*v4) - f33*(3*f44*v1 + f14*v4 - 4*f1*f43*v4)) +
199 f23*(f1*f1mf42*(f1 + 2*f4)*f42*v3 - f34*(f42*v1 + f12*v4 - 2*f1*f4*v4) + f32*(3*f44*v1 + f14*v4 - 4*f1*f43*v4) - 2*f3*(f45*v1 + f14*f4*v4 - 2*f12*f43*v4)))/(f1mf2*f1mf3*f1mf42*f2mf3*f2mf42*f3mf42);
200 break;
201 }
202 case 104:
203 {
204
205 UNUSED double f12 = f1*f1;
206 UNUSED double f13 = f12*f1;
207 UNUSED double f14 = f13*f1;
208
209 UNUSED double f22 = f2*f2;
210 UNUSED double f23 = f22*f2;
211 UNUSED double f24 = f23*f2;
212
213 UNUSED double f32 = f3*f3;
214 UNUSED double f33 = f32*f3;
215 UNUSED double f34 = f33*f3;
216
217 UNUSED double f42 = f4*f4;
218 UNUSED double f43 = f42*f4;
219 UNUSED double f44 = f43*f4;
220
221 UNUSED double f1mf2 = f1-f2;
222 UNUSED double f1mf3 = f1-f3;
223 UNUSED double f1mf4 = f1-f4;
224 UNUSED double f2mf3 = f2-f3;
225 UNUSED double f2mf4 = f2-f4;
226 UNUSED double f3mf4 = f3-f4;
227
228 UNUSED double f1mf22 = f1mf2*f1mf2;
229 UNUSED double f1mf32 = f1mf3*f1mf3;
230 UNUSED double f1mf42 = f1mf4*f1mf4;
231 UNUSED double f2mf32 = f2mf3*f2mf3;
232 UNUSED double f2mf42 = f2mf4*f2mf4;
233 UNUSED double f3mf42 = f3mf4*f3mf4;
234 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
235
236 retVal = ((-(d4*f12*f1mf22*f1mf4*f2*f2mf4*f4) + d1*f1*f1mf2*f1mf4*f2*f2mf42*f42 + f42*(f2*f2mf42*(-4*f12 + 3*f1*f2 + 2*f1*f4 - f2*f4)*v1 + f12*f1mf43*v2) +
237 f12*f1mf22*f2*(f1*f2 - 2*f1*f4 - 3*f2*f4 + 4*f42)*v4)/(f1mf22*f1mf43*f2mf42));
238
239 break;
240 }
241 case 105:
242 {
243 UNUSED double f12 = f1*f1;
244 UNUSED double f13 = f12*f1;
245 UNUSED double f14 = f13*f1;
246 UNUSED double f15 = f14*f1;
247 UNUSED double f16 = f15*f1;
248 UNUSED double f17 = f16*f1;
249
250 UNUSED double f22 = f2*f2;
251 UNUSED double f23 = f22*f2;
252 UNUSED double f24 = f23*f2;
253 UNUSED double f25 = f24*f2;
254 UNUSED double f26 = f25*f2;
255 UNUSED double f27 = f26*f2;
256
257 UNUSED double f32 = f3*f3;
258 UNUSED double f33 = f32*f3;
259 UNUSED double f34 = f33*f3;
260 UNUSED double f35 = f34*f3;
261 UNUSED double f36 = f35*f3;
262 UNUSED double f37 = f36*f3;
263
264 UNUSED double f42 = f4*f4;
265 UNUSED double f43 = f42*f4;
266 UNUSED double f44 = f43*f4;
267 UNUSED double f45 = f44*f4;
268 UNUSED double f46 = f45*f4;
269 UNUSED double f47 = f46*f4;
270
271 UNUSED double f1mf2 = f1-f2;
272 UNUSED double f1mf3 = f1-f3;
273 UNUSED double f1mf4 = f1-f4;
274 UNUSED double f2mf3 = f2-f3;
275 UNUSED double f2mf4 = f2-f4;
276 UNUSED double f3mf4 = f3-f4;
277
278 UNUSED double f1mf22 = f1mf2*f1mf2;
279 UNUSED double f1mf32 = f1mf3*f1mf3;
280 UNUSED double f1mf42 = f1mf4*f1mf4;
281 UNUSED double f2mf32 = f2mf3*f2mf3;
282 UNUSED double f2mf42 = f2mf4*f2mf4;
283 UNUSED double f3mf42 = f3mf4*f3mf4;
284 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
285
286 retVal = (
287 (-(d4*f12*f1mf22*f1mf32*f1mf4*f2*f2mf3*f2mf4*f3*f3mf4*f4) - d1*f1*f1mf2*f1mf3*f1mf4*f2*f2mf3*f2mf42*f3*f3mf42*f42 + 5*f13*f24*f33*f42*v1 - 4*f12*f25*f33*f42*v1 - 5*f13*f23*f34*f42*v1 +
288 3*f1*f25*f34*f42*v1 + 4*f12*f23*f35*f42*v1 - 3*f1*f24*f35*f42*v1 - 10*f13*f24*f32*f43*v1 + 8*f12*f25*f32*f43*v1 + 5*f12*f24*f33*f43*v1 - 4*f1*f25*f33*f43*v1 + 10*f13*f22*f34*f43*v1 -
289 5*f12*f23*f34*f43*v1 - f25*f34*f43*v1 - 8*f12*f22*f35*f43*v1 + 4*f1*f23*f35*f43*v1 + f24*f35*f43*v1 + 5*f13*f24*f3*f44*v1 - 4*f12*f25*f3*f44*v1 + 15*f13*f23*f32*f44*v1 -
290 10*f12*f24*f32*f44*v1 - f1*f25*f32*f44*v1 - 15*f13*f22*f33*f44*v1 + 5*f1*f24*f33*f44*v1 + 2*f25*f33*f44*v1 - 5*f13*f2*f34*f44*v1 + 10*f12*f22*f34*f44*v1 - 5*f1*f23*f34*f44*v1 +
291 4*f12*f2*f35*f44*v1 + f1*f22*f35*f44*v1 - 2*f23*f35*f44*v1 - 10*f13*f23*f3*f45*v1 + 5*f12*f24*f3*f45*v1 + 2*f1*f25*f3*f45*v1 - f12*f23*f32*f45*v1 + 2*f1*f24*f32*f45*v1 - f25*f32*f45*v1 +
292 10*f13*f2*f33*f45*v1 + f12*f22*f33*f45*v1 - 3*f24*f33*f45*v1 - 5*f12*f2*f34*f45*v1 - 2*f1*f22*f34*f45*v1 + 3*f23*f34*f45*v1 - 2*f1*f2*f35*f45*v1 + f22*f35*f45*v1 + 5*f13*f22*f3*f46*v1 +
293 2*f12*f23*f3*f46*v1 - 4*f1*f24*f3*f46*v1 - 5*f13*f2*f32*f46*v1 - f1*f23*f32*f46*v1 + 2*f24*f32*f46*v1 - 2*f12*f2*f33*f46*v1 + f1*f22*f33*f46*v1 + 4*f1*f2*f34*f46*v1 - 2*f22*f34*f46*v1 -
294 3*f12*f22*f3*f47*v1 + 2*f1*f23*f3*f47*v1 + 3*f12*f2*f32*f47*v1 - f23*f32*f47*v1 - 2*f1*f2*f33*f47*v1 + f22*f33*f47*v1 - f17*f33*f42*v2 + 2*f16*f34*f42*v2 - f15*f35*f42*v2 + 2*f17*f32*f43*v2 -
295 f16*f33*f43*v2 - 4*f15*f34*f43*v2 + 3*f14*f35*f43*v2 - f17*f3*f44*v2 - 4*f16*f32*f44*v2 + 8*f15*f33*f44*v2 - 3*f13*f35*f44*v2 + 3*f16*f3*f45*v2 - 8*f14*f33*f45*v2 + 4*f13*f34*f45*v2 +
296 f12*f35*f45*v2 - 3*f15*f3*f46*v2 + 4*f14*f32*f46*v2 + f13*f33*f46*v2 - 2*f12*f34*f46*v2 + f14*f3*f47*v2 - 2*f13*f32*f47*v2 + f12*f33*f47*v2 + f17*f23*f42*v3 - 2*f16*f24*f42*v3 +
297 f15*f25*f42*v3 - 2*f17*f22*f43*v3 + f16*f23*f43*v3 + 4*f15*f24*f43*v3 - 3*f14*f25*f43*v3 + f17*f2*f44*v3 + 4*f16*f22*f44*v3 - 8*f15*f23*f44*v3 + 3*f13*f25*f44*v3 - 3*f16*f2*f45*v3 +
298 8*f14*f23*f45*v3 - 4*f13*f24*f45*v3 - f12*f25*f45*v3 + 3*f15*f2*f46*v3 - 4*f14*f22*f46*v3 - f13*f23*f46*v3 + 2*f12*f24*f46*v3 - f14*f2*f47*v3 + 2*f13*f22*f47*v3 - f12*f23*f47*v3 +
299 f12*f1mf22*f1mf32*f2*f2mf3*f3*(f4*(-3*f2*f3 + 4*(f2 + f3)*f4 - 5*f42) + f1*(f2*f3 - 2*(f2 + f3)*f4 + 3*f42))*v4)/(f1mf22*f1mf32*f1mf43*f2mf3*f2mf42*f3mf42)
300 );
301
302 break;
303 }
304 default:
305 {
306 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
307 }
308 }
309
310 return retVal;
311}
312
313/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
314static double IMRPhenomX_Intermediate_Amp_22_delta1(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
315{
316 double retVal;
317
318 switch ( IntAmpFlag )
319 {
320 case 1043: //no left derivative; 1043 is used in IMRPhenomXHM
321 {
322 UNUSED double f12 = f1*f1;
323 UNUSED double f13 = f12*f1;
324 UNUSED double f14 = f13*f1;
325
326 UNUSED double f22 = f2*f2;
327 UNUSED double f23 = f22*f2;
328 UNUSED double f24 = f23*f2;
329 UNUSED double f25 = f24*f2;
330
331 UNUSED double f32 = f3*f3;
332 UNUSED double f33 = f32*f3;
333 UNUSED double f34 = f33*f3;
334
335 UNUSED double f42 = f4*f4;
336 UNUSED double f43 = f42*f4;
337 UNUSED double f44 = f43*f4;
338
339 UNUSED double f1mf2 = f1-f2;
340 UNUSED double f1mf3 = f1-f3;
341 UNUSED double f1mf4 = f1-f4;
342 UNUSED double f2mf3 = f2-f3;
343 UNUSED double f2mf4 = f2-f4;
344 UNUSED double f3mf4 = f3-f4;
345
346 UNUSED double f1mf42 = f1mf4*f1mf4;
347 UNUSED double f2mf42 = f2mf4*f2mf4;
348 UNUSED double f3mf42 = f3mf4*f3mf4;
349
350 UNUSED double v1mv2 = v1-v2;
351 UNUSED double v2mv3 = v2-v3;
352 UNUSED double v2mv4 = v2-v4;
353 UNUSED double v1mv3 = v1-v3;
354 UNUSED double v1mv4 = v1-v4;
355 UNUSED double v3mv4 = v3-v4;
356
357 retVal =(d4*f1mf4*f2mf4*f3mf4*(f1*f2*f3 + f2*f3*f4 + f1*(f2 + f3)*f4) + (f4*(f12*f1mf42*f43*v2mv3 + f34*(f43*v1mv2 +
358 3*f12*f4*v2mv4 + 2*f13*(-v2 + v4)) + f32*f4*(f44*v1mv2 + 4*f13*f4*v2mv4 + 3*f14*(-v2 + v4)) + 2*f33*(f44*(-v1 + v2)
359 + f14*v2mv4 + 2*f12*f42*(-v2 + v4)) + 2*f23*(f44*v1mv3 + f34*v1mv4 + 2*f12*f42*v3mv4 + 2*f32*f42*(-v1 + v4) + f14*(-v3 + v4))
360 + f24*(3*f32*f4*v1mv4 + f43*(-v1 + v3) + 2*f13*v3mv4 + 2*f33*(-v1 + v4) + 3*f12*f4*(-v3 + v4)) + f22*f4*(4*f33*f4*v1mv4 + f44*(-v1 + v3)
361 + 3*f14*v3mv4 + 3*f34*(-v1 + v4) + 4*f13*f4*(-v3 + v4))))/(f1mf2*f1mf3*f2mf3))/(f1mf42*f2mf42*f3mf42);
362
363 break;
364 }
365 case 104:
366 {
367
368 UNUSED double f12 = f1*f1;
369 UNUSED double f13 = f12*f1;
370 UNUSED double f14 = f13*f1;
371
372 UNUSED double f22 = f2*f2;
373 UNUSED double f23 = f22*f2;
374 UNUSED double f24 = f23*f2;
375
376 UNUSED double f32 = f3*f3;
377 UNUSED double f33 = f32*f3;
378 UNUSED double f34 = f33*f3;
379
380 UNUSED double f42 = f4*f4;
381 UNUSED double f43 = f42*f4;
382 UNUSED double f44 = f43*f4;
383
384 UNUSED double f1mf2 = f1-f2;
385 UNUSED double f1mf3 = f1-f3;
386 UNUSED double f1mf4 = f1-f4;
387 UNUSED double f2mf3 = f2-f3;
388 UNUSED double f2mf4 = f2-f4;
389 UNUSED double f3mf4 = f3-f4;
390
391 UNUSED double f1mf22 = f1mf2*f1mf2;
392 UNUSED double f1mf32 = f1mf3*f1mf3;
393 UNUSED double f1mf42 = f1mf4*f1mf4;
394 UNUSED double f2mf32 = f2mf3*f2mf3;
395 UNUSED double f2mf42 = f2mf4*f2mf4;
396 UNUSED double f3mf42 = f3mf4*f3mf4;
397 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
398
399 retVal = ((d4*f1*f1mf22*f1mf4*f2mf4*(2*f2*f4 + f1*(f2 + f4)) + f4*(-(d1*f1mf2*f1mf4*f2mf42*(2*f1*f2 + (f1 + f2)*f4)) -
400 2*f1*(f44*(v1 - v2) + 3*f24*(v1 - v4) + f14*(v2 - v4) + 4*f23*f4*(-v1 + v4)
401 + 2*f13*f4*(-v2 + v4) + f1*(2*f43*(-v1 + v2) + 6*f22*f4*(v1 - v4) + 4*f23*(-v1 + v4)))))/(f1mf22*f1mf43*f2mf42));
402
403
404 break;
405 }
406 case 105:
407 {
408
409 UNUSED double f12 = f1*f1;
410 UNUSED double f13 = f12*f1;
411 UNUSED double f14 = f13*f1;
412 UNUSED double f15 = f14*f1;
413 UNUSED double f16 = f15*f1;
414 UNUSED double f17 = f16*f1;
415
416 UNUSED double f22 = f2*f2;
417 UNUSED double f23 = f22*f2;
418 UNUSED double f24 = f23*f2;
419 UNUSED double f25 = f24*f2;
420 UNUSED double f26 = f25*f2;
421 UNUSED double f27 = f26*f2;
422
423 UNUSED double f32 = f3*f3;
424 UNUSED double f33 = f32*f3;
425 UNUSED double f34 = f33*f3;
426 UNUSED double f35 = f34*f3;
427 UNUSED double f36 = f35*f3;
428 UNUSED double f37 = f36*f3;
429
430 UNUSED double f42 = f4*f4;
431 UNUSED double f43 = f42*f4;
432 UNUSED double f44 = f43*f4;
433 UNUSED double f45 = f44*f4;
434 UNUSED double f46 = f45*f4;
435 UNUSED double f47 = f46*f4;
436
437 UNUSED double f1mf2 = f1-f2;
438 UNUSED double f1mf3 = f1-f3;
439 UNUSED double f1mf4 = f1-f4;
440 UNUSED double f2mf3 = f2-f3;
441 UNUSED double f2mf4 = f2-f4;
442 UNUSED double f3mf4 = f3-f4;
443
444 UNUSED double f1mf22 = f1mf2*f1mf2;
445 UNUSED double f1mf32 = f1mf3*f1mf3;
446 UNUSED double f1mf42 = f1mf4*f1mf4;
447 UNUSED double f2mf32 = f2mf3*f2mf3;
448 UNUSED double f2mf42 = f2mf4*f2mf4;
449 UNUSED double f3mf42 = f3mf4*f3mf4;
450 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
451
452 retVal = (
453 (d4*f1*f1mf22*f1mf32*f1mf4*f2mf3*f2mf4*f3mf4*(f1*f2*f3 + 2*f2*f3*f4 + f1*(f2 + f3)*f4) +
454 f4*(d1*f1mf2*f1mf3*f1mf4*f2mf3*f2mf42*f3mf42*(2*f1*f2*f3 + f2*f3*f4 + f1*(f2 + f3)*f4) +
455 f1*(f16*(f43*(v2 - v3) + 2*f33*(v2 - v4) + 3*f22*f4*(v3 - v4) + 3*f32*f4*(-v2 + v4) + 2*f23*(-v3 + v4)) +
456 f13*f4*(f45*(-v2 + v3) + 5*f34*f4*(v2 - v4) + 4*f25*(v3 - v4) + 4*f35*(-v2 + v4) + 5*f24*f4*(-v3 + v4)) +
457 f14*(3*f45*(v2 - v3) + 2*f35*(v2 - v4) + 5*f34*f4*(v2 - v4) + 10*f23*f42*(v3 - v4) + 10*f33*f42*(-v2 + v4) + 2*f25*(-v3 + v4) + 5*f24*f4*(-v3 + v4)) +
458 f15*(3*f44*(-v2 + v3) + 2*f33*f4*(v2 - v4) + 5*f32*f42*(v2 - v4) + 4*f24*(v3 - v4) + 4*f34*(-v2 + v4) + 2*f23*f4*(-v3 + v4) + 5*f22*f42*(-v3 + v4)) -
459 5*f12*(-(f32*f3mf42*f43*(v1 - v2)) + 2*f23*(f44*(-v1 + v3) + 2*f32*f42*(v1 - v4) + f34*(-v1 + v4)) + f24*(f43*(v1 - v3) + 2*f33*(v1 - v4) + 3*f32*f4*(-v1 + v4)) +
460 f22*f4*(f44*(v1 - v3) + 3*f34*(v1 - v4) + 4*f33*f4*(-v1 + v4))) +
461 f1*(-(f32*f3mf42*(4*f3 + 3*f4)*f43*(v1 - v2)) + 2*f23*(f45*(-v1 + v3) + 5*f34*f4*(v1 - v4) + 4*f35*(-v1 + v4)) + 4*f25*(f43*(v1 - v3) + 2*f33*(v1 - v4) + 3*f32*f4*(-v1 + v4)) -
462 5*f24*f4*(f43*(v1 - v3) + 2*f33*(v1 - v4) + 3*f32*f4*(-v1 + v4)) + 3*f22*f4*(f45*(v1 - v3) + 4*f35*(v1 - v4) + 5*f34*f4*(-v1 + v4))) -
463 2*(-(f33*f3mf42*f44*(v1 - v2)) + f24*(2*f45*(-v1 + v3) + 5*f33*f42*(v1 - v4) + 3*f35*(-v1 + v4)) + f25*(f44*(v1 - v3) + 3*f34*(v1 - v4) + 4*f33*f4*(-v1 + v4)) +
464 f23*f4*(f45*(v1 - v3) + 4*f35*(v1 - v4) + 5*f34*f4*(-v1 + v4))))))/(f1mf22*f1mf32*f1mf43*f2mf3*f2mf42*f3mf42)
465 );
466 break;
467 }
468 default:
469 {
470 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
471 }
472 }
473
474 return retVal;
475}
476
477/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
478static double IMRPhenomX_Intermediate_Amp_22_delta2(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
479{
480 double retVal;
481
482 switch ( IntAmpFlag )
483 {
484 case 1043: //no left derivative: v1, v2, v3, v4, d4; 1043 is used in IMRPhenomXHM
485 {
486 UNUSED double f12 = f1*f1;
487 UNUSED double f13 = f12*f1;
488 UNUSED double f14 = f13*f1;
489
490 UNUSED double f22 = f2*f2;
491 UNUSED double f23 = f22*f2;
492 UNUSED double f24 = f23*f2;
493 UNUSED double f25 = f24*f2;
494 UNUSED double f26 = f25*f2;
495
496 UNUSED double f32 = f3*f3;
497 UNUSED double f33 = f32*f3;
498 UNUSED double f34 = f33*f3;
499
500 UNUSED double f42 = f4*f4;
501 UNUSED double f43 = f42*f4;
502 UNUSED double f44 = f43*f4;
503 UNUSED double f46 = f44*f42;
504
505 UNUSED double f1mf2 = f1-f2;
506 UNUSED double f1mf3 = f1-f3;
507 UNUSED double f1mf4 = f1-f4;
508 UNUSED double f2mf3 = f2-f3;
509 UNUSED double f2mf4 = f2-f4;
510 UNUSED double f3mf4 = f3-f4;
511
512 UNUSED double f1mf42 = f1mf4*f1mf4;
513 UNUSED double f2mf42 = f2mf4*f2mf4;
514 UNUSED double f3mf42 = f3mf4*f3mf4;
515
516 UNUSED double v1mv2 = v1-v2;
517 UNUSED double v2mv3 = v2-v3;
518 UNUSED double v2mv4 = v2-v4;
519 UNUSED double v1mv3 = v1-v3;
520 UNUSED double v1mv4 = v1-v4;
521 UNUSED double v3mv4 = v3-v4;
522
523 retVal = (-(d4*f1mf2*f1mf3*f1mf4*f2mf3*f2mf4*f3mf4*(f3*f4 + f2*(f3 + f4) + f1*(f2 + f3 + f4))) - 2*f34*f43*v1 + 3*f33*f44*v1 - f3*f46*v1 - f14*f33*v2 + f13*f34*v2 + 3*f14*f3*f42*v2 - 3*f1*f34*f42*v2 -
524 2*f14*f43*v2 - 4*f13*f3*f43*v2 + 4*f1*f33*f43*v2 + 2*f34*f43*v2 + 3*f13*f44*v2 - 3*f33*f44*v2 - f1*f46*v2 + f3*f46*v2 + 2*f14*f43*v3 - 3*f13*f44*v3 + f1*f46*v3 +
525 f2*f42*(f44*v1mv3 + 3*f34*v1mv4 - 4*f33*f4*v1mv4 - 3*f14*v3mv4 + 4*f13*f4*v3mv4) + f24*(2*f43*v1mv3 + f33*v1mv4 - 3*f3*f42*v1mv4 - f13*v3mv4 + 3*f1*f42*v3mv4) +
526 f23*(-3*f44*v1mv3 - f34*v1mv4 + 4*f3*f43*v1mv4 + f14*v3mv4 - 4*f1*f43*v3mv4) + f14*f33*v4 - f13*f34*v4 - 3*f14*f3*f42*v4 + 3*f1*f34*f42*v4 + 4*f13*f3*f43*v4 - 4*f1*f33*f43*v4)/
527 (f1mf2*f1mf3*f1mf42*f2mf3*f2mf42*f3mf42);
528 break;
529 }
530 case 104:
531 {
532 UNUSED double f12 = f1*f1;
533 UNUSED double f13 = f12*f1;
534 UNUSED double f14 = f13*f1;
535 UNUSED double f15 = f14*f1;
536
537 UNUSED double f22 = f2*f2;
538 UNUSED double f23 = f22*f2;
539 UNUSED double f24 = f23*f2;
540
541 UNUSED double f32 = f3*f3;
542 UNUSED double f33 = f32*f3;
543 UNUSED double f34 = f33*f3;
544
545 UNUSED double f42 = f4*f4;
546 UNUSED double f43 = f42*f4;
547 UNUSED double f44 = f43*f4;
548 UNUSED double f45 = f44*f4;
549
550 UNUSED double f1mf2 = f1-f2;
551 UNUSED double f1mf3 = f1-f3;
552 UNUSED double f1mf4 = f1-f4;
553 UNUSED double f2mf3 = f2-f3;
554 UNUSED double f2mf4 = f2-f4;
555 UNUSED double f3mf4 = f3-f4;
556
557 UNUSED double f1mf22 = f1mf2*f1mf2;
558 UNUSED double f1mf32 = f1mf3*f1mf3;
559 UNUSED double f1mf42 = f1mf4*f1mf4;
560 UNUSED double f2mf32 = f2mf3*f2mf3;
561 UNUSED double f2mf42 = f2mf4*f2mf4;
562 UNUSED double f3mf42 = f3mf4*f3mf4;
563 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
564
565 retVal = ((-(d4*f1mf22*f1mf4*f2mf4*(f12 + f2*f4 + 2*f1*(f2 + f4))) + d1*f1mf2*f1mf4*f2mf42*(f1*f2 + 2*(f1 + f2)*f4 + f42)
566 - 4*f12*f23*v1 + 3*f1*f24*v1 - 4*f1*f23*f4*v1 + 3*f24*f4*v1 + 12*f12*f2*f42*v1 -
567 4*f23*f42*v1 - 8*f12*f43*v1 + f1*f44*v1 + f45*v1 + f15*v2 + f14*f4*v2 - 8*f13*f42*v2 + 8*f12*f43*v2 - f1*f44*v2 - f45*v2 -
568 f1mf22*(f13 + f2*(3*f2 - 4*f4)*f4 + f12*(2*f2 + f4) + f1*(3*f2 - 4*f4)*(f2 + 2*f4))*v4)/(f1mf22*f1mf43*f2mf42));
569
570 break;
571 }
572 case 105:
573 {
574 UNUSED double f12 = f1*f1;
575 UNUSED double f13 = f12*f1;
576 UNUSED double f14 = f13*f1;
577 UNUSED double f15 = f14*f1;
578 UNUSED double f16 = f15*f1;
579 UNUSED double f17 = f16*f1;
580
581 UNUSED double f22 = f2*f2;
582 UNUSED double f23 = f22*f2;
583 UNUSED double f24 = f23*f2;
584 UNUSED double f25 = f24*f2;
585 UNUSED double f26 = f25*f2;
586 UNUSED double f27 = f26*f2;
587
588 UNUSED double f32 = f3*f3;
589 UNUSED double f33 = f32*f3;
590 UNUSED double f34 = f33*f3;
591 UNUSED double f35 = f34*f3;
592 UNUSED double f36 = f35*f3;
593 UNUSED double f37 = f36*f3;
594
595 UNUSED double f42 = f4*f4;
596 UNUSED double f43 = f42*f4;
597 UNUSED double f44 = f43*f4;
598 UNUSED double f45 = f44*f4;
599 UNUSED double f46 = f45*f4;
600 UNUSED double f47 = f46*f4;
601
602 UNUSED double f1mf2 = f1-f2;
603 UNUSED double f1mf3 = f1-f3;
604 UNUSED double f1mf4 = f1-f4;
605 UNUSED double f2mf3 = f2-f3;
606 UNUSED double f2mf4 = f2-f4;
607 UNUSED double f3mf4 = f3-f4;
608
609 UNUSED double f1mf22 = f1mf2*f1mf2;
610 UNUSED double f1mf32 = f1mf3*f1mf3;
611 UNUSED double f1mf42 = f1mf4*f1mf4;
612 UNUSED double f2mf32 = f2mf3*f2mf3;
613 UNUSED double f2mf42 = f2mf4*f2mf4;
614 UNUSED double f3mf42 = f3mf4*f3mf4;
615 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
616
617 retVal = (
618 (-(d4*f1mf22*f1mf32*f1mf4*f2mf3*f2mf4*f3mf4*(f2*f3*f4 + f12*(f2 + f3 + f4) + 2*f1*(f2*f3 + (f2 + f3)*f4))) -
619 d1*f1mf2*f1mf3*f1mf4*f2mf3*f2mf42*f3mf42*(f1*f2*f3 + 2*(f2*f3 + f1*(f2 + f3))*f4 + (f1 + f2 + f3)*f42) + 5*f13*f24*f33*v1 - 4*f12*f25*f33*v1 - 5*f13*f23*f34*v1 + 3*f1*f25*f34*v1 +
620 4*f12*f23*f35*v1 - 3*f1*f24*f35*v1 + 5*f12*f24*f33*f4*v1 - 4*f1*f25*f33*f4*v1 - 5*f12*f23*f34*f4*v1 + 3*f25*f34*f4*v1 + 4*f1*f23*f35*f4*v1 - 3*f24*f35*f4*v1 - 15*f13*f24*f3*f42*v1 +
621 12*f12*f25*f3*f42*v1 + 5*f1*f24*f33*f42*v1 - 4*f25*f33*f42*v1 + 15*f13*f2*f34*f42*v1 - 5*f1*f23*f34*f42*v1 - 12*f12*f2*f35*f42*v1 + 4*f23*f35*f42*v1 + 10*f13*f24*f43*v1 - 8*f12*f25*f43*v1 +
622 20*f13*f23*f3*f43*v1 - 15*f12*f24*f3*f43*v1 - 20*f13*f2*f33*f43*v1 + 5*f24*f33*f43*v1 - 10*f13*f34*f43*v1 + 15*f12*f2*f34*f43*v1 - 5*f23*f34*f43*v1 + 8*f12*f35*f43*v1 - 15*f13*f23*f44*v1 +
623 10*f12*f24*f44*v1 + f1*f25*f44*v1 + 15*f13*f33*f44*v1 - 10*f12*f34*f44*v1 - f1*f35*f44*v1 + f12*f23*f45*v1 - 2*f1*f24*f45*v1 + f25*f45*v1 - f12*f33*f45*v1 + 2*f1*f34*f45*v1 - f35*f45*v1 +
624 5*f13*f2*f46*v1 + f1*f23*f46*v1 - 2*f24*f46*v1 - 5*f13*f3*f46*v1 - f1*f33*f46*v1 + 2*f34*f46*v1 - 3*f12*f2*f47*v1 + f23*f47*v1 + 3*f12*f3*f47*v1 - f33*f47*v1 - f17*f33*v2 + 2*f16*f34*v2 -
625 f15*f35*v2 - f16*f33*f4*v2 + 2*f15*f34*f4*v2 - f14*f35*f4*v2 + 3*f17*f3*f42*v2 - f15*f33*f42*v2 - 10*f14*f34*f42*v2 + 8*f13*f35*f42*v2 - 2*f17*f43*v2 - 5*f16*f3*f43*v2 + 15*f14*f33*f43*v2 -
626 8*f12*f35*f43*v2 + 4*f16*f44*v2 - 15*f13*f33*f44*v2 + 10*f12*f34*f44*v2 + f1*f35*f44*v2 + f12*f33*f45*v2 - 2*f1*f34*f45*v2 + f35*f45*v2 - 4*f14*f46*v2 + 5*f13*f3*f46*v2 + f1*f33*f46*v2 -
627 2*f34*f46*v2 + 2*f13*f47*v2 - 3*f12*f3*f47*v2 + f33*f47*v2 + f17*f23*v3 - 2*f16*f24*v3 + f15*f25*v3 + f16*f23*f4*v3 - 2*f15*f24*f4*v3 + f14*f25*f4*v3 - 3*f17*f2*f42*v3 + f15*f23*f42*v3 +
628 10*f14*f24*f42*v3 - 8*f13*f25*f42*v3 + 2*f17*f43*v3 + 5*f16*f2*f43*v3 - 15*f14*f23*f43*v3 + 8*f12*f25*f43*v3 - 4*f16*f44*v3 + 15*f13*f23*f44*v3 - 10*f12*f24*f44*v3 - f1*f25*f44*v3 -
629 f12*f23*f45*v3 + 2*f1*f24*f45*v3 - f25*f45*v3 + 4*f14*f46*v3 - 5*f13*f2*f46*v3 - f1*f23*f46*v3 + 2*f24*f46*v3 - 2*f13*f47*v3 + 3*f12*f2*f47*v3 - f23*f47*v3 -
630 f1mf22*f1mf32*f2mf3*(f13*(f22 + f2*f3 + f32 - 3*f42) + f2*f3*f4*(3*f2*f3 - 4*(f2 + f3)*f4 + 5*f42) + f1*(f2*f3 + 2*(f2 + f3)*f4)*(3*f2*f3 - 4*(f2 + f3)*f4 + 5*f42) +
631 f12*(2*f2*f3*(f2 + f3) + (f22 + f2*f3 + f32)*f4 - 6*(f2 + f3)*f42 + 5*f43))*v4)/(f1mf22*f1mf32*f1mf43*f2mf3*f2mf42*f3mf42)
632 );
633
634 break;
635 }
636 default:
637 {
638 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
639 }
640 }
641
642return retVal;
643}
644
645/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
646static double IMRPhenomX_Intermediate_Amp_22_delta3(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
647{
648 double retVal;
649
650 switch ( IntAmpFlag )
651 {
652 case 1043: //no left derivative: v1, v2, v3, v4, d4; 1043 is used in IMRPhenomXHM
653 {
654 UNUSED double f12 = f1*f1;
655 UNUSED double f13 = f12*f1;
656 UNUSED double f14 = f13*f1;
657
658 UNUSED double f22 = f2*f2;
659 UNUSED double f23 = f22*f2;
660 UNUSED double f24 = f23*f2;
661
662 UNUSED double f32 = f3*f3;
663 UNUSED double f34 = f32*f32;
664
665 UNUSED double f42 = f4*f4;
666 UNUSED double f43 = f42*f4;
667 UNUSED double f44 = f43*f4;
668 UNUSED double f45 = f44*f4;
669
670 UNUSED double f1mf2 = f1-f2;
671 UNUSED double f1mf3 = f1-f3;
672 UNUSED double f1mf4 = f1-f4;
673 UNUSED double f2mf3 = f2-f3;
674 UNUSED double f2mf4 = f2-f4;
675 UNUSED double f3mf4 = f3-f4;
676
677 UNUSED double f1mf42 = f1mf4*f1mf4;
678 UNUSED double f2mf42 = f2mf4*f2mf4;
679 UNUSED double f3mf42 = f3mf4*f3mf4;
680
681 UNUSED double v1mv2 = v1-v2;
682 UNUSED double v2mv3 = v2-v3;
683 UNUSED double v2mv4 = v2-v4;
684 UNUSED double v1mv3 = v1-v3;
685 UNUSED double v1mv4 = v1-v4;
686 UNUSED double v3mv4 = v3-v4;
687
688 retVal = (d4*f1mf2*f1mf3*f1mf4*f2mf3*f2mf4*f3mf4*(f1 + f2 + f3 + f4) + f34*f42*v1 - 3*f32*f44*v1 + 2*f3*f45*v1 + f14*f32*v2 - f12*f34*v2 - 2*f14*f3*f4*v2 + 2*f1*f34*f4*v2 + f14*f42*v2 - f34*f42*v2 +
689 4*f12*f3*f43*v2 - 4*f1*f32*f43*v2 - 3*f12*f44*v2 + 3*f32*f44*v2 + 2*f1*f45*v2 - 2*f3*f45*v2 - f14*f42*v3 + 3*f12*f44*v3 - 2*f1*f45*v3 +
690 f24*(-(f42*v1mv3) - f32*v1mv4 + 2*f3*f4*v1mv4 + f12*v3mv4 - 2*f1*f4*v3mv4) - 2*f2*f4*(f44*v1mv3 + f34*v1mv4 - 2*f32*f42*v1mv4 - f14*v3mv4 + 2*f12*f42*v3mv4) +
691 f22*(3*f44*v1mv3 + f34*v1mv4 - 4*f3*f43*v1mv4 - f14*v3mv4 + 4*f1*f43*v3mv4) - f14*f32*v4 + f12*f34*v4 + 2*f14*f3*f4*v4 - 2*f1*f34*f4*v4 - 4*f12*f3*f43*v4 + 4*f1*f32*f43*v4)/
692 (f1mf2*f1mf3*f1mf42*f2mf3*f2mf42*f3mf42);
693 break;
694 }
695 case 104:
696 {
697
698 UNUSED double f12 = f1*f1;
699 UNUSED double f13 = f12*f1;
700 UNUSED double f14 = f13*f1;
701
702 UNUSED double f22 = f2*f2;
703 UNUSED double f23 = f22*f2;
704 UNUSED double f24 = f23*f2;
705
706 UNUSED double f32 = f3*f3;
707 UNUSED double f33 = f32*f3;
708 UNUSED double f34 = f33*f3;
709
710 UNUSED double f42 = f4*f4;
711 UNUSED double f43 = f42*f4;
712 UNUSED double f44 = f43*f4;
713
714 UNUSED double f1mf2 = f1-f2;
715 UNUSED double f1mf3 = f1-f3;
716 UNUSED double f1mf4 = f1-f4;
717 UNUSED double f2mf3 = f2-f3;
718 UNUSED double f2mf4 = f2-f4;
719 UNUSED double f3mf4 = f3-f4;
720
721 UNUSED double f1mf22 = f1mf2*f1mf2;
722 UNUSED double f1mf32 = f1mf3*f1mf3;
723 UNUSED double f1mf42 = f1mf4*f1mf4;
724 UNUSED double f2mf32 = f2mf3*f2mf3;
725 UNUSED double f2mf42 = f2mf4*f2mf4;
726 UNUSED double f3mf42 = f3mf4*f3mf4;
727 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
728
729 retVal = ((d4*f1mf22*f1mf4*f2mf4*(2*f1 + f2 + f4) - d1*f1mf2*f1mf4*f2mf42*(f1 + f2 + 2*f4)
730 + 2*(f44*(-v1 + v2) + 2*f12*f2mf42*(v1 - v4) + 2*f22*f42*(v1 - v4)
731 + 2*f13*f4*(v2 - v4) + f24*(-v1 + v4) + f14*(-v2 + v4) + 2*f1*f4*(f42*(v1 - v2) + f22*(v1 - v4) + 2*f2*f4*(-v1 + v4)))) / (f1mf22*f1mf43*f2mf42));
732
733
734 break;
735 }
736 case 105:
737 {
738 UNUSED double f12 = f1*f1;
739 UNUSED double f13 = f12*f1;
740 UNUSED double f14 = f13*f1;
741 UNUSED double f15 = f14*f1;
742 UNUSED double f16 = f15*f1;
743 UNUSED double f17 = f16*f1;
744
745 UNUSED double f22 = f2*f2;
746 UNUSED double f23 = f22*f2;
747 UNUSED double f24 = f23*f2;
748 UNUSED double f25 = f24*f2;
749 UNUSED double f26 = f25*f2;
750 UNUSED double f27 = f26*f2;
751
752 UNUSED double f32 = f3*f3;
753 UNUSED double f33 = f32*f3;
754 UNUSED double f34 = f33*f3;
755 UNUSED double f35 = f34*f3;
756 UNUSED double f36 = f35*f3;
757 UNUSED double f37 = f36*f3;
758
759 UNUSED double f42 = f4*f4;
760 UNUSED double f43 = f42*f4;
761 UNUSED double f44 = f43*f4;
762 UNUSED double f45 = f44*f4;
763 UNUSED double f46 = f45*f4;
764 UNUSED double f47 = f46*f4;
765
766 UNUSED double f1mf2 = f1-f2;
767 UNUSED double f1mf3 = f1-f3;
768 UNUSED double f1mf4 = f1-f4;
769 UNUSED double f2mf3 = f2-f3;
770 UNUSED double f2mf4 = f2-f4;
771 UNUSED double f3mf4 = f3-f4;
772
773 UNUSED double f1mf22 = f1mf2*f1mf2;
774 UNUSED double f1mf32 = f1mf3*f1mf3;
775 UNUSED double f1mf42 = f1mf4*f1mf4;
776 UNUSED double f2mf32 = f2mf3*f2mf3;
777 UNUSED double f2mf42 = f2mf4*f2mf4;
778 UNUSED double f3mf42 = f3mf4*f3mf4;
779 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
780
781 retVal = (
782 (d4*f1mf22*f1mf32*f1mf4*f2mf3*f2mf4*f3mf4*(f12 + f2*f3 + (f2 + f3)*f4 + 2*f1*(f2 + f3 + f4)) + d1*f1mf2*f1mf3*f1mf4*f2mf3*f2mf42*f3mf42*(f1*f2 + f1*f3 + f2*f3 + 2*(f1 + f2 + f3)*f4 + f42) -
783 5*f13*f24*f32*v1 + 4*f12*f25*f32*v1 + 5*f13*f22*f34*v1 - 2*f25*f34*v1 - 4*f12*f22*f35*v1 + 2*f24*f35*v1 + 10*f13*f24*f3*f4*v1 - 8*f12*f25*f3*f4*v1 - 5*f12*f24*f32*f4*v1 + 4*f1*f25*f32*f4*v1 -
784 10*f13*f2*f34*f4*v1 + 5*f12*f22*f34*f4*v1 + 8*f12*f2*f35*f4*v1 - 4*f1*f22*f35*f4*v1 - 5*f13*f24*f42*v1 + 4*f12*f25*f42*v1 + 10*f12*f24*f3*f42*v1 - 8*f1*f25*f3*f42*v1 - 5*f1*f24*f32*f42*v1 +
785 4*f25*f32*f42*v1 + 5*f13*f34*f42*v1 - 10*f12*f2*f34*f42*v1 + 5*f1*f22*f34*f42*v1 - 4*f12*f35*f42*v1 + 8*f1*f2*f35*f42*v1 - 4*f22*f35*f42*v1 - 5*f12*f24*f43*v1 + 4*f1*f25*f43*v1 -
786 20*f13*f22*f3*f43*v1 + 10*f1*f24*f3*f43*v1 + 20*f13*f2*f32*f43*v1 - 5*f24*f32*f43*v1 + 5*f12*f34*f43*v1 - 10*f1*f2*f34*f43*v1 + 5*f22*f34*f43*v1 - 4*f1*f35*f43*v1 + 15*f13*f22*f44*v1 -
787 5*f1*f24*f44*v1 - 2*f25*f44*v1 - 15*f13*f32*f44*v1 + 5*f1*f34*f44*v1 + 2*f35*f44*v1 - 10*f13*f2*f45*v1 - f12*f22*f45*v1 + 3*f24*f45*v1 + 10*f13*f3*f45*v1 + f12*f32*f45*v1 - 3*f34*f45*v1 +
788 2*f12*f2*f46*v1 - f1*f22*f46*v1 - 2*f12*f3*f46*v1 + f1*f32*f46*v1 + 2*f1*f2*f47*v1 - f22*f47*v1 - 2*f1*f3*f47*v1 + f32*f47*v1 + f17*f32*v2 - 3*f15*f34*v2 + 2*f14*f35*v2 - 2*f17*f3*f4*v2 +
789 f16*f32*f4*v2 + 5*f14*f34*f4*v2 - 4*f13*f35*f4*v2 + f17*f42*v2 - 2*f16*f3*f42*v2 + f15*f32*f42*v2 + f16*f43*v2 + 10*f15*f3*f43*v2 - 15*f14*f32*f43*v2 + 4*f1*f35*f43*v2 - 8*f15*f44*v2 +
790 15*f13*f32*f44*v2 - 5*f1*f34*f44*v2 - 2*f35*f44*v2 + 8*f14*f45*v2 - 10*f13*f3*f45*v2 - f12*f32*f45*v2 + 3*f34*f45*v2 - f13*f46*v2 + 2*f12*f3*f46*v2 - f1*f32*f46*v2 - f12*f47*v2 +
791 2*f1*f3*f47*v2 - f32*f47*v2 - f17*f22*v3 + 3*f15*f24*v3 - 2*f14*f25*v3 + 2*f17*f2*f4*v3 - f16*f22*f4*v3 - 5*f14*f24*f4*v3 + 4*f13*f25*f4*v3 - f17*f42*v3 + 2*f16*f2*f42*v3 - f15*f22*f42*v3 -
792 f16*f43*v3 - 10*f15*f2*f43*v3 + 15*f14*f22*f43*v3 - 4*f1*f25*f43*v3 + 8*f15*f44*v3 - 15*f13*f22*f44*v3 + 5*f1*f24*f44*v3 + 2*f25*f44*v3 - 8*f14*f45*v3 + 10*f13*f2*f45*v3 + f12*f22*f45*v3 -
793 3*f24*f45*v3 + f13*f46*v3 - 2*f12*f2*f46*v3 + f1*f22*f46*v3 + f12*f47*v3 - 2*f1*f2*f47*v3 + f22*f47*v3 +
794 f1mf22*f1mf32*f2mf3*(2*f22*f32 + f13*(f2 + f3 - 2*f4) + f12*(f2 + f3 - 2*f4)*(2*(f2 + f3) + f4) - 4*(f22 + f2*f3 + f32)*f42 + 5*(f2 + f3)*f43 +
795 f1*(4*f2*f3*(f2 + f3) - 4*(f22 + f2*f3 + f32)*f4 - 3*(f2 + f3)*f42 + 10*f43))*v4)/(f1mf22*f1mf32*f1mf43*f2mf3*f2mf42*f3mf42)
796 );
797
798 break;
799 }
800 default:
801 {
802 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
803 }
804 }
805
806 return retVal;
807}
808
809/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
810static double IMRPhenomX_Intermediate_Amp_22_delta4(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
811{
812 double retVal;
813
814 switch ( IntAmpFlag )
815 {
816 case 1043: //no left derivative: v1, v2, v3, v4, d4; 1043 is used in IMRPhenomXHM
817 {
818 UNUSED double f12 = f1*f1;
819 UNUSED double f13 = f12*f1;
820
821 UNUSED double f22 = f2*f2;
822 UNUSED double f23 = f22*f2;
823 UNUSED double f24 = f23*f2;
824
825 UNUSED double f32 = f3*f3;
826 UNUSED double f33 = f32*f3;
827
828 UNUSED double f42 = f4*f4;
829 UNUSED double f43 = f42*f4;
830 UNUSED double f44 = f43*f4;
831
832 UNUSED double f1mf2 = f1-f2;
833 UNUSED double f1mf3 = f1-f3;
834 UNUSED double f1mf4 = f1-f4;
835 UNUSED double f2mf3 = f2-f3;
836 UNUSED double f2mf4 = f2-f4;
837 UNUSED double f3mf4 = f3-f4;
838
839 UNUSED double f1mf42 = f1mf4*f1mf4;
840 UNUSED double f2mf42 = f2mf4*f2mf4;
841 UNUSED double f3mf42 = f3mf4*f3mf4;
842
843 UNUSED double v1mv2 = v1-v2;
844 UNUSED double v2mv3 = v2-v3;
845 UNUSED double v2mv4 = v2-v4;
846 UNUSED double v1mv3 = v1-v3;
847 UNUSED double v1mv4 = v1-v4;
848 UNUSED double v3mv4 = v3-v4;
849
850 retVal = (-(d4*f1mf2*f1mf3*f1mf4*f2mf3*f2mf4*f3mf4) - f33*f42*v1 + 2*f32*f43*v1 - f3*f44*v1 - f13*f32*v2 + f12*f33*v2 + 2*f13*f3*f4*v2 - 2*f1*f33*f4*v2 - f13*f42*v2 - 3*f12*f3*f42*v2 + 3*f1*f32*f42*v2 +
851 f33*f42*v2 + 2*f12*f43*v2 - 2*f32*f43*v2 - f1*f44*v2 + f3*f44*v2 + f13*f42*v3 - 2*f12*f43*v3 + f1*f44*v3 + f23*(f42*v1mv3 + f32*v1mv4 - 2*f3*f4*v1mv4 - f12*v3mv4 + 2*f1*f4*v3mv4) +
852 f2*f4*(f43*v1mv3 + 2*f33*v1mv4 - 3*f32*f4*v1mv4 - 2*f13*v3mv4 + 3*f12*f4*v3mv4) + f22*(-2*f43*v1mv3 - f33*v1mv4 + 3*f3*f42*v1mv4 + f13*v3mv4 - 3*f1*f42*v3mv4) + f13*f32*v4 - f12*f33*v4 -
853 2*f13*f3*f4*v4 + 2*f1*f33*f4*v4 + 3*f12*f3*f42*v4 - 3*f1*f32*f42*v4)/(f1mf2*f1mf3*f1mf42*f2mf3*f2mf42*f3mf42);
854 break;
855 }
856 case 104:
857 {
858 UNUSED double f12 = f1*f1;
859 UNUSED double f13 = f12*f1;
860 UNUSED double f14 = f13*f1;
861
862 UNUSED double f22 = f2*f2;
863 UNUSED double f23 = f22*f2;
864 UNUSED double f24 = f23*f2;
865
866 UNUSED double f32 = f3*f3;
867 UNUSED double f33 = f32*f3;
868 UNUSED double f34 = f33*f3;
869
870 UNUSED double f42 = f4*f4;
871 UNUSED double f43 = f42*f4;
872 UNUSED double f44 = f43*f4;
873
874 UNUSED double f1mf2 = f1-f2;
875 UNUSED double f1mf3 = f1-f3;
876 UNUSED double f1mf4 = f1-f4;
877 UNUSED double f2mf3 = f2-f3;
878 UNUSED double f2mf4 = f2-f4;
879 UNUSED double f3mf4 = f3-f4;
880
881 UNUSED double f1mf22 = f1mf2*f1mf2;
882 UNUSED double f1mf32 = f1mf3*f1mf3;
883 UNUSED double f1mf42 = f1mf4*f1mf4;
884 UNUSED double f2mf32 = f2mf3*f2mf3;
885 UNUSED double f2mf42 = f2mf4*f2mf4;
886 UNUSED double f3mf42 = f3mf4*f3mf4;
887 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
888
889 retVal = ((-(d4*f1mf22*f1mf4*f2mf4) + d1*f1mf2*f1mf4*f2mf42 - 3*f1*f22*v1 + 2*f23*v1 + 6*f1*f2*f4*v1 - 3*f22*f4*v1
890 - 3*f1*f42*v1 + f43*v1 + f13*v2 - 3*f12*f4*v2 + 3*f1*f42*v2 - f43*v2 - f1mf22*(f1 + 2*f2 - 3*f4)*v4)/(f1mf22*f1mf43*f2mf42));
891
892 break;
893 }
894 case 105:
895 {
896 UNUSED double f12 = f1*f1;
897 UNUSED double f13 = f12*f1;
898 UNUSED double f14 = f13*f1;
899 UNUSED double f15 = f14*f1;
900 UNUSED double f16 = f15*f1;
901 UNUSED double f17 = f16*f1;
902
903 UNUSED double f22 = f2*f2;
904 UNUSED double f23 = f22*f2;
905 UNUSED double f24 = f23*f2;
906 UNUSED double f25 = f24*f2;
907 UNUSED double f26 = f25*f2;
908 UNUSED double f27 = f26*f2;
909
910 UNUSED double f32 = f3*f3;
911 UNUSED double f33 = f32*f3;
912 UNUSED double f34 = f33*f3;
913 UNUSED double f35 = f34*f3;
914 UNUSED double f36 = f35*f3;
915 UNUSED double f37 = f36*f3;
916
917 UNUSED double f42 = f4*f4;
918 UNUSED double f43 = f42*f4;
919 UNUSED double f44 = f43*f4;
920 UNUSED double f45 = f44*f4;
921 UNUSED double f46 = f45*f4;
922 UNUSED double f47 = f46*f4;
923
924 UNUSED double f1mf2 = f1-f2;
925 UNUSED double f1mf3 = f1-f3;
926 UNUSED double f1mf4 = f1-f4;
927 UNUSED double f2mf3 = f2-f3;
928 UNUSED double f2mf4 = f2-f4;
929 UNUSED double f3mf4 = f3-f4;
930
931 UNUSED double f1mf22 = f1mf2*f1mf2;
932 UNUSED double f1mf32 = f1mf3*f1mf3;
933 UNUSED double f1mf42 = f1mf4*f1mf4;
934 UNUSED double f2mf32 = f2mf3*f2mf3;
935 UNUSED double f2mf42 = f2mf4*f2mf4;
936 UNUSED double f3mf42 = f3mf4*f3mf4;
937 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
938
939 retVal = (
940 (-(d4*f1mf22*f1mf32*f1mf4*f2mf3*f2mf4*f3mf4*(2*f1 + f2 + f3 + f4)) - d1*f1mf2*f1mf3*f1mf4*f2mf3*f2mf42*f3mf42*(f1 + f2 + f3 + 2*f4) + 5*f13*f23*f32*v1 - 3*f1*f25*f32*v1 - 5*f13*f22*f33*v1 +
941 2*f25*f33*v1 + 3*f1*f22*f35*v1 - 2*f23*f35*v1 - 10*f13*f23*f3*f4*v1 + 6*f1*f25*f3*f4*v1 + 5*f12*f23*f32*f4*v1 - 3*f25*f32*f4*v1 + 10*f13*f2*f33*f4*v1 - 5*f12*f22*f33*f4*v1 -
942 6*f1*f2*f35*f4*v1 + 3*f22*f35*f4*v1 + 5*f13*f23*f42*v1 - 3*f1*f25*f42*v1 + 15*f13*f22*f3*f42*v1 - 10*f12*f23*f3*f42*v1 - 15*f13*f2*f32*f42*v1 + 5*f1*f23*f32*f42*v1 - 5*f13*f33*f42*v1 +
943 10*f12*f2*f33*f42*v1 - 5*f1*f22*f33*f42*v1 + 3*f1*f35*f42*v1 - 10*f13*f22*f43*v1 + 5*f12*f23*f43*v1 + f25*f43*v1 + 15*f12*f22*f3*f43*v1 - 10*f1*f23*f3*f43*v1 + 10*f13*f32*f43*v1 -
944 15*f12*f2*f32*f43*v1 + 5*f23*f32*f43*v1 - 5*f12*f33*f43*v1 + 10*f1*f2*f33*f43*v1 - 5*f22*f33*f43*v1 - f35*f43*v1 + 5*f13*f2*f44*v1 - 10*f12*f22*f44*v1 + 5*f1*f23*f44*v1 - 5*f13*f3*f44*v1 +
945 10*f12*f32*f44*v1 - 5*f1*f33*f44*v1 + 5*f12*f2*f45*v1 + 2*f1*f22*f45*v1 - 3*f23*f45*v1 - 5*f12*f3*f45*v1 - 2*f1*f32*f45*v1 + 3*f33*f45*v1 - 4*f1*f2*f46*v1 + 2*f22*f46*v1 + 4*f1*f3*f46*v1 -
946 2*f32*f46*v1 - 2*f16*f32*v2 + 3*f15*f33*v2 - f13*f35*v2 + 4*f16*f3*f4*v2 - 2*f15*f32*f4*v2 - 5*f14*f33*f4*v2 + 3*f12*f35*f4*v2 - 2*f16*f42*v2 - 5*f15*f3*f42*v2 + 10*f14*f32*f42*v2 -
947 3*f1*f35*f42*v2 + 4*f15*f43*v2 - 5*f14*f3*f43*v2 + f35*f43*v2 + 5*f13*f3*f44*v2 - 10*f12*f32*f44*v2 + 5*f1*f33*f44*v2 - 4*f13*f45*v2 + 5*f12*f3*f45*v2 + 2*f1*f32*f45*v2 - 3*f33*f45*v2 +
948 2*f12*f46*v2 - 4*f1*f3*f46*v2 + 2*f32*f46*v2 + 2*f16*f22*v3 - 3*f15*f23*v3 + f13*f25*v3 - 4*f16*f2*f4*v3 + 2*f15*f22*f4*v3 + 5*f14*f23*f4*v3 - 3*f12*f25*f4*v3 + 2*f16*f42*v3 +
949 5*f15*f2*f42*v3 - 10*f14*f22*f42*v3 + 3*f1*f25*f42*v3 - 4*f15*f43*v3 + 5*f14*f2*f43*v3 - f25*f43*v3 - 5*f13*f2*f44*v3 + 10*f12*f22*f44*v3 - 5*f1*f23*f44*v3 + 4*f13*f45*v3 - 5*f12*f2*f45*v3 -
950 2*f1*f22*f45*v3 + 3*f23*f45*v3 - 2*f12*f46*v3 + 4*f1*f2*f46*v3 - 2*f22*f46*v3 -
951 f1mf22*f1mf32*f2mf3*(2*f2*f3*(f2 + f3) + 2*f12*(f2 + f3 - 2*f4) - 3*(f22 + f2*f3 + f32)*f4 + f1*(f22 + 5*f2*f3 + f32 - 6*(f2 + f3)*f4 + 5*f42) + 5*f43)*v4)/
952 (f1mf22*f1mf32*f1mf43*f2mf3*f2mf42*f3mf42)
953 );
954
955 break;
956 }
957 default:
958 {
959 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
960 }
961 }
962
963 return retVal;
964}
965
966/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
967static double IMRPhenomX_Intermediate_Amp_22_delta5(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
968{
969 double retVal;
970
971 switch ( IntAmpFlag )
972 {
973 case 1043: //no left derivative: v1, v2, v3, v4, d4; 1043 is used in IMRPhenomXHM
974 {
975 retVal = 0.;
976 break;
977 }
978 case 104:
979 {
980 retVal = 0.0;
981 break;
982 }
983 case 105:
984 {
985 UNUSED double f12 = f1*f1;
986 UNUSED double f13 = f12*f1;
987 UNUSED double f14 = f13*f1;
988 UNUSED double f15 = f14*f1;
989 UNUSED double f16 = f15*f1;
990 UNUSED double f17 = f16*f1;
991
992 UNUSED double f22 = f2*f2;
993 UNUSED double f23 = f22*f2;
994 UNUSED double f24 = f23*f2;
995 UNUSED double f25 = f24*f2;
996 UNUSED double f26 = f25*f2;
997 UNUSED double f27 = f26*f2;
998
999 UNUSED double f32 = f3*f3;
1000 UNUSED double f33 = f32*f3;
1001 UNUSED double f34 = f33*f3;
1002 UNUSED double f35 = f34*f3;
1003 UNUSED double f36 = f35*f3;
1004 UNUSED double f37 = f36*f3;
1005
1006 UNUSED double f42 = f4*f4;
1007 UNUSED double f43 = f42*f4;
1008 UNUSED double f44 = f43*f4;
1009 UNUSED double f45 = f44*f4;
1010 UNUSED double f46 = f45*f4;
1011 UNUSED double f47 = f46*f4;
1012
1013 UNUSED double f1mf2 = f1-f2;
1014 UNUSED double f1mf3 = f1-f3;
1015 UNUSED double f1mf4 = f1-f4;
1016 UNUSED double f2mf3 = f2-f3;
1017 UNUSED double f2mf4 = f2-f4;
1018 UNUSED double f3mf4 = f3-f4;
1019
1020 UNUSED double f1mf22 = f1mf2*f1mf2;
1021 UNUSED double f1mf32 = f1mf3*f1mf3;
1022 UNUSED double f1mf42 = f1mf4*f1mf4;
1023 UNUSED double f2mf32 = f2mf3*f2mf3;
1024 UNUSED double f2mf42 = f2mf4*f2mf4;
1025 UNUSED double f3mf42 = f3mf4*f3mf4;
1026 UNUSED double f1mf43 = f1mf4*f1mf4*f1mf4;
1027
1028 retVal = (
1029 (d4*f1mf22*f1mf32*f1mf4*f2mf3*f2mf4*f3mf4 + d1*f1mf2*f1mf3*f1mf4*f2mf3*f2mf42*f3mf42 - 4*f12*f23*f32*v1 + 3*f1*f24*f32*v1 + 4*f12*f22*f33*v1 - 2*f24*f33*v1 - 3*f1*f22*f34*v1 + 2*f23*f34*v1 +
1030 8*f12*f23*f3*f4*v1 - 6*f1*f24*f3*f4*v1 - 4*f1*f23*f32*f4*v1 + 3*f24*f32*f4*v1 - 8*f12*f2*f33*f4*v1 + 4*f1*f22*f33*f4*v1 + 6*f1*f2*f34*f4*v1 - 3*f22*f34*f4*v1 - 4*f12*f23*f42*v1 +
1031 3*f1*f24*f42*v1 - 12*f12*f22*f3*f42*v1 + 8*f1*f23*f3*f42*v1 + 12*f12*f2*f32*f42*v1 - 4*f23*f32*f42*v1 + 4*f12*f33*f42*v1 - 8*f1*f2*f33*f42*v1 + 4*f22*f33*f42*v1 - 3*f1*f34*f42*v1 +
1032 8*f12*f22*f43*v1 - 4*f1*f23*f43*v1 - f24*f43*v1 - 8*f12*f32*f43*v1 + 4*f1*f33*f43*v1 + f34*f43*v1 - 4*f12*f2*f44*v1 - f1*f22*f44*v1 + 2*f23*f44*v1 + 4*f12*f3*f44*v1 + f1*f32*f44*v1 -
1033 2*f33*f44*v1 + 2*f1*f2*f45*v1 - f22*f45*v1 - 2*f1*f3*f45*v1 + f32*f45*v1 + f15*f32*v2 - 2*f14*f33*v2 + f13*f34*v2 - 2*f15*f3*f4*v2 + f14*f32*f4*v2 + 4*f13*f33*f4*v2 - 3*f12*f34*f4*v2 +
1034 f15*f42*v2 + 4*f14*f3*f42*v2 - 8*f13*f32*f42*v2 + 3*f1*f34*f42*v2 - 3*f14*f43*v2 + 8*f12*f32*f43*v2 - 4*f1*f33*f43*v2 - f34*f43*v2 + 3*f13*f44*v2 - 4*f12*f3*f44*v2 - f1*f32*f44*v2 +
1035 2*f33*f44*v2 - f12*f45*v2 + 2*f1*f3*f45*v2 - f32*f45*v2 - f15*f22*v3 + 2*f14*f23*v3 - f13*f24*v3 + 2*f15*f2*f4*v3 - f14*f22*f4*v3 - 4*f13*f23*f4*v3 + 3*f12*f24*f4*v3 - f15*f42*v3 -
1036 4*f14*f2*f42*v3 + 8*f13*f22*f42*v3 - 3*f1*f24*f42*v3 + 3*f14*f43*v3 - 8*f12*f22*f43*v3 + 4*f1*f23*f43*v3 + f24*f43*v3 - 3*f13*f44*v3 + 4*f12*f2*f44*v3 + f1*f22*f44*v3 - 2*f23*f44*v3 +
1037 f12*f45*v3 - 2*f1*f2*f45*v3 + f22*f45*v3 + f1mf22*f1mf32*f2mf3*(2*f2*f3 + f1*(f2 + f3 - 2*f4) - 3*(f2 + f3)*f4 + 4*f42)*v4)/(f1mf22*f1mf32*f1mf43*f2mf3*f2mf42*f3mf42)
1038 );
1039
1040 break ;
1041 }
1042 default:
1043 {
1044 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_v3: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
1045 }
1046 }
1047
1048 return retVal;
1049}
1050
1051/* Solves system of equations for 5th order polynomial ansatz. Section VI.B, Eq. 6.5 of arXiv:2001.11412. Note that \alpha in paper are \delta in code here. */
1053{
1054 double a0 = pAmp->delta0;
1055 double a1 = pAmp->delta1;
1056 double a2 = pAmp->delta2;
1057 double a3 = pAmp->delta3;
1058 double a4 = pAmp->delta4;
1059 double a5 = pAmp->delta5;
1060
1061 double ff7o6 = powers_of_f->seven_sixths;
1062
1063 int IntAmpFlag = pWF->IMRPhenomXIntermediateAmpVersion;
1064
1065 double polynomial;
1066
1067 switch ( IntAmpFlag )
1068 {
1069 case 1043: //1043 is used in IMRPhenomXHM
1070 case 104:
1071 {
1072 //polynomial = a0 + a1*ff + a2*ff2 + a3*ff3 + a4*ff4;
1073 polynomial = a0 + ff*(a1 + ff*(a2 + ff*(a3 + ff*a4)));
1074
1075 break;
1076 }
1077 case 105:
1078 {
1079 //polynomial = a0 + a1*ff + a2*ff2 + a3*ff3 + a4*ff4 + a5*ff5;
1080 polynomial = a0 + ff*(a1 + ff*(a2 + ff*(a3 + ff*(a4 + ff*a5))));
1081
1082 break;
1083 }
1084 default:
1085 {
1086 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Amp_22_Ansatz: IMRPhenomXIntermediateAmpVersion is not valid. Recommended flag is 104.\n");
1087
1088 ff7o6 = 0.0;
1089 polynomial = 1.0;
1090
1091 break;
1092 }
1093 }
1094
1095 /* Return a polynomial embedded in the background from the merger */
1096 return ff7o6 / polynomial;
1097}
1098
1099/******************************* Phase Functions: Merger *******************************/
1100
1101/* Intermediate phase collocation point v3. See Section VII.B of arXiv:2001.11412. */
1102UNUSED static double IMRPhenomX_Intermediate_Phase_22_v3(double eta, double S, double dchi, double delta, int IntPhaseFlag){
1103
1104 double eta2 = eta*eta;
1105 double eta3 = eta2*eta;
1106 double eta4 = eta3*eta;
1107
1108 double S2 = S*S;
1109 double S3 = S2*S;
1110
1111 double noSpin, eqSpin, uneqSpin;
1112
1113 switch ( IntPhaseFlag )
1114 {
1115 case 104: /* Canonical, 4 coefficients */
1116 {
1117 noSpin = (-85.65000000000003 - 413.9060603156141*eta - 4784.537141069749*eta2 + 1490.1208098409852*eta3 + 10040.191767983953*eta4)/(1. + 5.779247659216663*eta + 69.57046710291102*eta2);
1118
1119 eqSpin = (S*(4.692726457075735 - 7.038980703663575*S + eta2*(87.92695072054693 - 53.72849910673372*S - 40.89299875429382*S2) + 1.809693754991601*S2 + eta*(-13.873355143236994 + 11.141463520702901*S + 6.450651190707574*S2) + eta3*(-214.15302815157358 + 165.87430354875272*S + 53.19430999626736*S2 - 2.8784864337832756*S3) + 0.18008416467045393*S3))/(-0.19174821584528354 + 0.22339091810747033*S + 1.*eta3*S - 0.06756256922190347*S2);
1120
1121 uneqSpin = dchi*delta*eta3*(2989.296934478898 - 8693.785422139264*eta + 230.93311202289289*S);
1122
1123
1124 break;
1125 }
1126 case 105: /* Canonical, 5 coefficients */
1127 {
1128 noSpin = (-85.31300000000003 - 10937.096332153926*eta + 24894.22839497624*eta2 - 41966.01695284205*eta3 + 67157.48707167112*eta4)/(1. + 132.44486857622041*eta);
1129
1130 eqSpin = (S*(-50.47357379240076 + 73.09727082201734*S + eta3*(3596.7780512820414 - 2213.251793086697*S - 2751.2131780622212*S2) + eta*(156.5226913355741 - 58.19427796874254*S - 243.59756424149438*S2) - 14.822551015459917*S2 + eta2*(-1399.6118302073216 + 505.3549556613735*S + 1522.6588404541071*S2)))/(1.9694717754728777 - 2.272396692525365*S + 1.*S2);
1131
1132 uneqSpin = dchi*delta*eta3*(3453.778014556621 + eta*(-10048.103462582065 + 1757.9089741373239*S));
1133
1134 break;
1135 }
1136 default:
1137 {
1138 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_v3: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1139 break;
1140 }
1141 }
1142
1143 return (noSpin + eqSpin + uneqSpin);
1144}
1145
1146/* Intermediate phase collocation point v2. See Section VII.B of arXiv:2001.11412. */
1147static double IMRPhenomX_Intermediate_Phase_22_v2(double eta, double S, double dchi, double delta, int IntPhaseFlag){
1148
1149 double eta2 = eta*eta;
1150 double eta3 = eta2*eta;
1151
1152 double S2 = S*S;
1153 double S3 = S2*S;
1154 double S4 = S3*S;
1155
1156 double noSpin, eqSpin, uneqSpin;
1157
1158 switch ( IntPhaseFlag )
1159 {
1160 case 104: /* Canonical, 4 coefficients */
1161 {
1162 noSpin = (-84.09400000000004 - 1782.8025405571802*eta + 5384.38721936653*eta2)/(1. + 28.515617312596103*eta + 12.404097877099353*eta2);
1163
1164 eqSpin = (S*(22.5665046165141 - 39.94907120140026*S + 4.668251961072*S2 + 12.648584361431245*S3 + eta2*(-298.7528127869681 + 14.228745354543983*S + 398.1736953382448*S2 + 506.94924905801673*S3 - 626.3693674479357*S4) - 5.360554789680035*S4 + eta*(152.07900889608595 - 121.70617732909962*S2 - 169.36311036967322*S3 + 182.40064586992762*S4)))/(-1.1571201220629341 + 1.*S);
1165
1166 uneqSpin = dchi*delta*eta3*(5357.695021063607 - 15642.019339339662*eta + 674.8698102366333*S);
1167
1168 break;
1169 }
1170 case 105: /* Canonical, 5 coefficients */
1171 {
1172 noSpin = (-82.54500000000004 - 5.58197349185435e6*eta - 3.5225742421184325e8*eta2 + 1.4667258334378073e9*eta3)/(1. + 66757.12830903867*eta + 5.385164380400193e6*eta2 + 2.5176585751772933e6*eta3);
1173
1174 eqSpin = (S*(19.416719811164853 - 36.066611959079935*S - 0.8612656616290079*S2 + eta2*(170.97203068800542 - 107.41099349364234*S - 647.8103976942541*S3) + 5.95010003393006*S3 + eta3*(-1365.1499998427248 + 1152.425940764218*S + 415.7134909564443*S2 + 1897.5444343138167*S3 - 866.283566780576*S4) + 4.984750041013893*S4 + eta*(207.69898051583655 - 132.88417400679026*S - 17.671713040498304*S2 + 29.071788188638315*S3 + 37.462217031512786*S4)))/(-1.1492259468169692 + 1.*S);
1175
1176 uneqSpin = dchi*delta*eta3*(7343.130973149263 - 20486.813161100774*eta + 515.9898508588834*S);
1177
1178 break;
1179 }
1180 default:
1181 {
1182 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_v3: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1183 break;
1184 }
1185 }
1186
1187 return (noSpin + eqSpin + uneqSpin);
1188}
1189
1190/* v2mRDv4 = vIM2 - vRD4. See Section VII.B of arXiv:2001.11412. */
1191static double IMRPhenomX_Intermediate_Phase_22_v2mRDv4(double eta, double S, double dchi, double delta, int IntPhaseFlag){
1192
1193 double eta2 = eta*eta;
1194 double eta3 = eta2*eta;
1195
1196 double S2 = S*S;
1197
1198 double noSpin, eqSpin, uneqSpin;
1199
1200 switch ( IntPhaseFlag )
1201 {
1202 case 104: /* Canonical, 4 coefficients */
1203 {
1204 noSpin = (eta*(-8.244230124407343 - 182.80239160435949*eta + 638.2046409916306*eta2 - 578.878727101827*eta3))/(-0.004863669418916522 - 0.5095088831841608*eta + 1.*eta2);
1205
1206 eqSpin = (S*(0.1344136125169328 + 0.0751872427147183*S + eta2*(7.158823192173721 + 25.489598292111104*S - 7.982299108059922*S2) + eta*(-5.792368563647471 + 1.0190164430971749*S + 0.29150399620268874*S2) + 0.033627267594199865*S2 + eta3*(17.426503081351534 - 90.69790378048448*S + 20.080325133405847*S2)))/(0.03449545664201546 - 0.027941977370442107*S + (0.005274757661661763 + 0.0455523144123269*eta - 0.3880379911692037*eta2 + 1.*eta3)*S2);
1207
1208 uneqSpin = 160.2975913661124*dchi*delta*eta2;
1209
1210 break;
1211 }
1212 case 105: /* Canonical, 5 coefficients */
1213 {
1214 noSpin = (eta*(0.9951733419499662 + 101.21991715215253*eta + 632.4731389009143*eta2))/(0.00016803066316882238 + 0.11412314719189287*eta + 1.8413983770369362*eta2 + 1.*eta3);
1215
1216 eqSpin = (S*(18.694178521101332 + 16.89845522539974*S + 4941.31613710257*eta2*S + eta*(-697.6773920613674 - 147.53381808989846*S2) + 0.3612417066833153*S2 + eta3*(3531.552143264721 - 14302.70838220423*S + 178.85850322465944*S2)))/(2.965640445745779 - 2.7706595614504725*S + 1.*S2);
1217
1218 uneqSpin = dchi*delta*eta2*(356.74395864902294 + 1693.326644293169*eta2*S);
1219
1220 break;
1221 }
1222 default:
1223 {
1224 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_d23: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1225 break;
1226 }
1227 }
1228
1229 return (noSpin + eqSpin + uneqSpin);
1230}
1231
1232/* v3mRDv4 = vIM3 - vRD4. See Section VII.B of arXiv:2001.11412. */
1233static double IMRPhenomX_Intermediate_Phase_22_v3mRDv4(double eta, double S, double dchi, double delta, int IntPhaseFlag){
1234
1235 double eta2 = eta*eta;
1236 double eta3 = eta2*eta;
1237 double eta4 = eta3*eta;
1238 double eta6 = eta3*eta3;
1239
1240 double S2 = S*S;
1241
1242 double noSpin, eqSpin, uneqSpin;
1243
1244 switch ( IntPhaseFlag )
1245 {
1246 case 104: /* Canonical, 4 coefficients */
1247 {
1248 noSpin = (0.3145740304678042 + 299.79825045000655*eta - 605.8886581267144*eta2 - 1302.0954503758007*eta3)/(1. + 2.3903703875353255*eta - 19.03836730923657*eta2);
1249
1250 eqSpin = (S*(1.150925084789774 - 0.3072381261655531*S + eta4*(12160.22339193134 - 1459.725263347619*S - 9581.694749116636*S2) + eta2*(1240.3900459406875 - 289.48392062629966*S - 1218.1320231846412*S2) - 1.6191217310064605*S2 + eta*(-41.38762957457647 + 60.47477582076457*S2) + eta3*(-7540.259952257055 + 1379.3429194626635*S + 6372.99271204178*S2)))/(-1.4325421225106187 + 1.*S);
1251
1252 uneqSpin = dchi*delta*eta3*(-444.797248674011 + 1448.47758082697*eta + 152.49290092435044*S);
1253
1254 break;
1255 }
1256 case 105: /* Canonical, 5 coefficients */
1257 {
1258 noSpin = (eta*(-5.126358906504587 - 227.46830225846668*eta + 688.3609087244353*eta2 - 751.4184178636324*eta3))/(-0.004551938711031158 - 0.7811680872741462*eta + 1.*eta2);
1259
1260 eqSpin = (S*(0.1549280856660919 - 0.9539250460041732*S - 539.4071941841604*eta2*S + eta*(73.79645135116367 - 8.13494176717772*S2) - 2.84311102369862*S2 + eta3*(-936.3740515136005 + 1862.9097047992134*S + 224.77581754671272*S2)))/(-1.5308507364054487 + 1.*S);
1261
1262 uneqSpin = 2993.3598520496153*dchi*delta*eta6;
1263
1264 break;
1265 }
1266 default:
1267 {
1268 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_d23: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1269 break;
1270 }
1271 }
1272
1273 return (noSpin + eqSpin + uneqSpin);
1274}
1275
1276/* d43 = v4 - v3. See Section VII.B of arXiv:2001.11412. */
1277static double IMRPhenomX_Intermediate_Phase_22_d43(double eta, double S, double dchi, double delta, int IntPhaseFlag){
1278
1279 double eta2 = eta*eta;
1280 double eta3 = eta2*eta;
1281 double eta4 = eta3*eta;
1282
1283 double S2 = S*S;
1284
1285 double noSpin, eqSpin, uneqSpin;
1286
1287 switch ( IntPhaseFlag )
1288 {
1289 case 104:
1290 {
1291 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_d43: Point d43 should not be called when using IMRPhenomXIntermediatePhaseVersion == 104.\n");
1292 break;
1293 }
1294 case 105: /* Canonical, 5 coefficients */
1295 {
1296 noSpin = (0.4248820426833804 - 906.746595921514*eta - 282820.39946006844*eta2 - 967049.2793750163*eta3 + 670077.5414916876*eta4)/(1. + 1670.9440812294847*eta + 19783.077247023448*eta2);
1297
1298 eqSpin = (S*(0.22814271667259703 + 1.1366593671801855*S + eta3*(3499.432393555856 - 877.8811492839261*S - 4974.189172654984*S2) + eta*(12.840649528989287 - 61.17248283184154*S2) + 0.4818323187946999*S2 + eta2*(-711.8532052499075 + 269.9234918621958*S + 941.6974723887743*S2) + eta4*(-4939.642457025497 - 227.7672020783411*S + 8745.201037897836*S2)))/(-1.2442293719740283 + 1.*S);
1299
1300 uneqSpin = dchi*delta*(-514.8494071830514 + 1493.3851099678195*eta)*eta3;
1301
1302 break;
1303 }
1304 default:
1305 {
1306 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_d43: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1307 break;
1308 }
1309 }
1310
1311 return (noSpin + eqSpin + uneqSpin);
1312}
1313
1314
1315/*
1316 See section VII. B of arXiv:2001.11412.
1317
1318 Phase derivative ansatz for intermediate region, Eq. 7.6 of arXiv:2001.11412.
1319
1320 This is the canonical intermediate ansatz:
1321
1322 a_0 + a_1 ft^(-1) + a_2 ft^(-2) + a_3 ft^(-3) + a4 ft^(-4) + (4 * a_RD) / ( (2 * f_fdamp)^2 + (f - f_ring)^2 )
1323
1324 ft = (f / f_ring)
1325*/
1327{
1328 double invff1 = powers_of_f->m_one;
1329 double invff2 = powers_of_f->m_two;
1330 double invff3 = powers_of_f->m_three;
1331 double invff4 = powers_of_f->m_four;
1332
1333 double fda = pWF->fDAMP;
1334 double frd = pWF->fRING;
1335 /* We pass the GR Lorentzian term to make sure that variations to the GR coefficients in the ringdown decouple from the intermediate regime */
1336 double cL = pPhase->cLGR;
1337
1338 double LorentzianTerm;
1339 double phaseOut = 0;
1340
1341 int IntPhaseVersion = pWF->IMRPhenomXIntermediatePhaseVersion;
1342
1343 switch ( IntPhaseVersion )
1344 {
1345 case 104: /* Canonical, 4 coefficients */
1346 {
1347
1348 /* This is the Lorentzian term where cL = - a_{RD} dphase0 */
1349 LorentzianTerm = (4.0 * cL) / ( (4.0*fda*fda) + (ff - frd)*(ff - frd) );
1350
1351 /* Return a polynomial embedded in the background from the merger */
1352 phaseOut = pPhase->b0 + pPhase->b1*invff1 + pPhase->b2*invff2 + pPhase->b4*invff4 + LorentzianTerm;
1353
1354 break;
1355 }
1356 case 105: /* Canonical, 5 coefficients */
1357 {
1358
1359 /* This is the Lorentzian term where cL = - a_{RD} dphase0 */
1360 LorentzianTerm = (4.0 * cL) / ( (4.0*fda*fda) + (ff - frd)*(ff - frd) );
1361
1362 /* Return a polynomial embedded in the background from the merger */
1363 phaseOut = (pPhase->b0) + (pPhase->b1)*invff1 + (pPhase->b2)*invff2 + (pPhase->b3)*invff3 + (pPhase->b4)*invff4 + LorentzianTerm;
1364
1365 break;
1366 }
1367 default:
1368 {
1369 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_Ansatz: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1370 }
1371 }
1372
1373 return phaseOut;
1374}
1375
1376/*
1377 See section VII. B of arXiv:2001.11412.
1378
1379 Integrated phase ansatz for intermediate region, Eq. 7.6 of arXiv:2001.11412.
1380
1381 Effective spin parameterization used = StotR
1382*/
1384{
1385 double invff1 = powers_of_f->m_one;
1386 double invff2 = powers_of_f->m_two;
1387 double invff3 = powers_of_f->m_three;
1388 double logfv = powers_of_f->log;
1389
1390 double frd = pWF->fRING;
1391 double fda = pWF->fDAMP;
1392
1393 double b0 = pPhase->b0;
1394 double b1 = pPhase->b1;
1395 double b2 = pPhase->b2;
1396 double b3 = pPhase->b3;
1397 double b4 = pPhase->b4;
1398 /* We pass the GR Lorentzian term to make sure that variations to the GR coefficients in the ringdown decouple from the intermediate regime */
1399 double cL = pPhase->cLGR;
1400
1401 double phaseOut;
1402
1403 int IntPhaseVersion = pWF->IMRPhenomXIntermediatePhaseVersion;
1404
1405 switch ( IntPhaseVersion )
1406 {
1407 case 104: /* Canonical, 4 coefficients */
1408 {
1409 //phaseOut = pPhase->b0*f + pPhase->b1*logfv - pPhase->b2*invff1 - pPhase->b4*invff3/3.0 + ( 2.0*pPhase->cL*atan( (f - frd) / (2.0 * fda) ) ) / fda ;
1410 phaseOut = b0*f + b1*logfv - b2*invff1 - (b4*invff3/3.0) + ( 2.0*cL*atan( (f - frd) / (2.0 * fda) ) ) / fda ;
1411 break;
1412 }
1413 case 105: /* Canonical, 5 coefficients */
1414 {
1415 phaseOut = b0*f + b1*logfv - b2*invff1 - b3*invff2/2.0 - (b4*invff3/3.0) + ( 2.0 * cL * atan( (f - frd) / (2.0 * fda) ) ) / fda ;
1416 break;
1417 }
1418 default:
1419 {
1420 XLAL_ERROR_REAL8(XLAL_EINVAL, "Error in IMRPhenomX_Intermediate_Phase_22_AnsatzInt: IMRPhenomXIntermediatePhaseVersion is not valid. Recommended flag is 104.\n");
1421 break;
1422 }
1423 }
1424
1425 return phaseOut;
1426
1427}
const double b1
const double b2
const double b0
static double double delta
static double IMRPhenomX_Intermediate_Amp_22_v3(double eta, double S, double dchi, double delta, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Amp_22_delta3(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Phase_22_v2(double eta, double S, double dchi, double delta, int IntPhaseFlag)
static double IMRPhenomX_Intermediate_Amp_22_delta0(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Phase_22_v3mRDv4(double eta, double S, double dchi, double delta, int IntPhaseFlag)
static double IMRPhenomX_Intermediate_Amp_22_delta1(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Amp_22_delta4(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Phase_22_AnsatzInt(double f, IMRPhenomX_UsefulPowers *powers_of_f, IMRPhenomXWaveformStruct *pWF, IMRPhenomXPhaseCoefficients *pPhase)
static double IMRPhenomX_Intermediate_Amp_22_v2(double eta, double S, double dchi, double delta, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Phase_22_v2mRDv4(double eta, double S, double dchi, double delta, int IntPhaseFlag)
static double IMRPhenomX_Intermediate_Amp_22_delta5(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Phase_22_d43(double eta, double S, double dchi, double delta, int IntPhaseFlag)
static double IMRPhenomX_Intermediate_Amp_22_delta2(double d1, double d4, double v1, double v2, double v3, double v4, double f1, double f2, double f3, double f4, int IntAmpFlag)
static double IMRPhenomX_Intermediate_Amp_22_vA(double eta, double S, double dchi, double delta, int IntAmpFlag)
static UNUSED double IMRPhenomX_Intermediate_Phase_22_v3(double eta, double S, double dchi, double delta, int IntPhaseFlag)
static double IMRPhenomX_Intermediate_Amp_22_Ansatz(double ff, IMRPhenomX_UsefulPowers *powers_of_f, IMRPhenomXWaveformStruct *pWF, IMRPhenomXAmpCoefficients *pAmp)
static double IMRPhenomX_Intermediate_Phase_22_Ansatz(double ff, IMRPhenomX_UsefulPowers *powers_of_f, IMRPhenomXWaveformStruct *pWF, IMRPhenomXPhaseCoefficients *pPhase)
static const REAL8 f14[]
const double a4
const double a2
#define XLAL_ERROR_REAL8(...)
XLAL_EINVAL