Coverage for bilby/core/utils/calculus.py: 41%
111 statements
« prev ^ index » next coverage.py v7.6.1, created at 2025-05-06 04:57 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2025-05-06 04:57 +0000
1import math
3import numpy as np
4from scipy.interpolate import RectBivariateSpline
5from scipy.special import logsumexp
7from .log import logger
10def derivatives(
11 vals,
12 func,
13 releps=1e-3,
14 abseps=None,
15 mineps=1e-9,
16 reltol=1e-3,
17 epsscale=0.5,
18 nonfixedidx=None,
19):
20 """
21 Calculate the partial derivatives of a function at a set of values. The
22 derivatives are calculated using the central difference, using an iterative
23 method to check that the values converge as step size decreases.
25 Parameters
26 ==========
27 vals: array_like
28 A set of values, that are passed to a function, at which to calculate
29 the gradient of that function
30 func:
31 A function that takes in an array of values.
32 releps: float, array_like, 1e-3
33 The initial relative step size for calculating the derivative.
34 abseps: float, array_like, None
35 The initial absolute step size for calculating the derivative.
36 This overrides `releps` if set.
37 `releps` is set then that is used.
38 mineps: float, 1e-9
39 The minimum relative step size at which to stop iterations if no
40 convergence is achieved.
41 epsscale: float, 0.5
42 The factor by which releps if scaled in each iteration.
43 nonfixedidx: array_like, None
44 An array of indices in `vals` that are _not_ fixed values and therefore
45 can have derivatives taken. If `None` then derivatives of all values
46 are calculated.
48 Returns
49 =======
50 grads: array_like
51 An array of gradients for each non-fixed value.
52 """
54 if nonfixedidx is None:
55 nonfixedidx = range(len(vals))
57 if len(nonfixedidx) > len(vals):
58 raise ValueError("To many non-fixed values")
60 if max(nonfixedidx) >= len(vals) or min(nonfixedidx) < 0:
61 raise ValueError("Non-fixed indexes contain non-existent indices")
63 grads = np.zeros(len(nonfixedidx))
65 # maximum number of times the gradient can change sign
66 flipflopmax = 10.0
68 # set steps
69 if abseps is None:
70 if isinstance(releps, float):
71 eps = np.abs(vals) * releps
72 eps[eps == 0.0] = releps # if any values are zero set eps to releps
73 teps = releps * np.ones(len(vals))
74 elif isinstance(releps, (list, np.ndarray)):
75 if len(releps) != len(vals):
76 raise ValueError("Problem with input relative step sizes")
77 eps = np.multiply(np.abs(vals), releps)
78 eps[eps == 0.0] = np.array(releps)[eps == 0.0]
79 teps = releps
80 else:
81 raise RuntimeError("Relative step sizes are not a recognised type!")
82 else:
83 if isinstance(abseps, float):
84 eps = abseps * np.ones(len(vals))
85 elif isinstance(abseps, (list, np.ndarray)):
86 if len(abseps) != len(vals):
87 raise ValueError("Problem with input absolute step sizes")
88 eps = np.array(abseps)
89 else:
90 raise RuntimeError("Absolute step sizes are not a recognised type!")
91 teps = eps
93 # for each value in vals calculate the gradient
94 count = 0
95 for i in nonfixedidx:
96 # initial parameter diffs
97 leps = eps[i]
98 cureps = teps[i]
100 flipflop = 0
102 # get central finite difference
103 fvals = np.copy(vals)
104 bvals = np.copy(vals)
106 # central difference
107 fvals[i] += 0.5 * leps # change forwards distance to half eps
108 bvals[i] -= 0.5 * leps # change backwards distance to half eps
109 cdiff = (func(fvals) - func(bvals)) / leps
111 while 1:
112 fvals[i] -= 0.5 * leps # remove old step
113 bvals[i] += 0.5 * leps
115 # change the difference by a factor of two
116 cureps *= epsscale
117 if cureps < mineps or flipflop > flipflopmax:
118 # if no convergence set flat derivative (TODO: check if there is a better thing to do instead)
119 logger.warning(
120 "Derivative calculation did not converge: setting flat derivative."
121 )
122 grads[count] = 0.0
123 break
124 leps *= epsscale
126 # central difference
127 fvals[i] += 0.5 * leps # change forwards distance to half eps
128 bvals[i] -= 0.5 * leps # change backwards distance to half eps
129 cdiffnew = (func(fvals) - func(bvals)) / leps
131 if cdiffnew == cdiff:
132 grads[count] = cdiff
133 break
135 # check whether previous diff and current diff are the same within reltol
136 rat = cdiff / cdiffnew
137 if np.isfinite(rat) and rat > 0.0:
138 # gradient has not changed sign
139 if np.abs(1.0 - rat) < reltol:
140 grads[count] = cdiffnew
141 break
142 else:
143 cdiff = cdiffnew
144 continue
145 else:
146 cdiff = cdiffnew
147 flipflop += 1
148 continue
150 count += 1
152 return grads
155def logtrapzexp(lnf, dx):
156 """
157 Perform trapezium rule integration for the logarithm of a function on a grid.
159 Parameters
160 ==========
161 lnf: array_like
162 A :class:`numpy.ndarray` of values that are the natural logarithm of a function
163 dx: Union[array_like, float]
164 A :class:`numpy.ndarray` of steps sizes between values in the function, or a
165 single step size value.
167 Returns
168 =======
169 The natural logarithm of the area under the function.
170 """
172 lnfdx1 = lnf[:-1]
173 lnfdx2 = lnf[1:]
174 if isinstance(dx, (int, float)):
175 C = np.log(dx / 2.0)
176 elif isinstance(dx, (list, np.ndarray)):
177 if len(dx) != len(lnf) - 1:
178 raise ValueError(
179 "Step size array must have length one less than the function length"
180 )
182 lndx = np.log(dx)
183 lnfdx1 = lnfdx1.copy() + lndx
184 lnfdx2 = lnfdx2.copy() + lndx
185 C = -np.log(2.0)
186 else:
187 raise TypeError("Step size must be a single value or array-like")
189 return C + logsumexp([logsumexp(lnfdx1), logsumexp(lnfdx2)])
192class BoundedRectBivariateSpline(RectBivariateSpline):
194 def __init__(self, x, y, z, bbox=[None] * 4, kx=3, ky=3, s=0, fill_value=None):
195 self.x_min, self.x_max, self.y_min, self.y_max = bbox
196 if self.x_min is None:
197 self.x_min = min(x)
198 if self.x_max is None:
199 self.x_max = max(x)
200 if self.y_min is None:
201 self.y_min = min(y)
202 if self.y_max is None:
203 self.y_max = max(y)
204 self.fill_value = fill_value
205 super().__init__(x=x, y=y, z=z, bbox=bbox, kx=kx, ky=ky, s=s)
207 def __call__(self, x, y, dx=0, dy=0, grid=False):
208 result = super().__call__(x=x, y=y, dx=dx, dy=dy, grid=grid)
209 out_of_bounds_x = (x < self.x_min) | (x > self.x_max)
210 out_of_bounds_y = (y < self.y_min) | (y > self.y_max)
211 bad = out_of_bounds_x | out_of_bounds_y
212 result[bad] = self.fill_value
213 if result.size == 1:
214 if bad:
215 return self.fill_value
216 else:
217 return result.item()
218 else:
219 return result
222def round_up_to_power_of_two(x):
223 """Round up to the next power of two
225 Parameters
226 ----------
227 x: float
229 Returns
230 -------
231 float: next power of two
233 """
234 return 2**math.ceil(np.log2(x))