Coverage for bilby/core/prior/analytical.py: 86%
451 statements
« prev ^ index » next coverage.py v7.6.1, created at 2025-05-06 04:57 +0000
« prev ^ index » next coverage.py v7.6.1, created at 2025-05-06 04:57 +0000
1import numpy as np
2from scipy.special import erfinv
3from scipy.special._ufuncs import xlogy, erf, log1p, stdtrit, gammaln, stdtr, \
4 btdtri, betaln, btdtr, gammaincinv, gammainc
6from .base import Prior
7from ..utils import logger
10class DeltaFunction(Prior):
12 def __init__(self, peak, name=None, latex_label=None, unit=None):
13 """Dirac delta function prior, this always returns peak.
15 Parameters
16 ==========
17 peak: float
18 Peak value of the delta function
19 name: str
20 See superclass
21 latex_label: str
22 See superclass
23 unit: str
24 See superclass
26 """
27 super(DeltaFunction, self).__init__(name=name, latex_label=latex_label, unit=unit,
28 minimum=peak, maximum=peak, check_range_nonzero=False)
29 self.peak = peak
30 self._is_fixed = True
31 self.least_recently_sampled = peak
33 def rescale(self, val):
34 """Rescale everything to the peak with the correct shape.
36 Parameters
37 ==========
38 val: Union[float, int, array_like]
40 Returns
41 =======
42 float: Rescaled probability, equivalent to peak
43 """
44 return self.peak * val ** 0
46 def prob(self, val):
47 """Return the prior probability of val
49 Parameters
50 ==========
51 val: Union[float, int, array_like]
53 Returns
54 =======
55 Union[float, array_like]: np.inf if val = peak, 0 otherwise
57 """
58 at_peak = (val == self.peak)
59 return np.nan_to_num(np.multiply(at_peak, np.inf))
61 def cdf(self, val):
62 return np.ones_like(val) * (val > self.peak)
65class PowerLaw(Prior):
67 def __init__(self, alpha, minimum, maximum, name=None, latex_label=None,
68 unit=None, boundary=None):
69 """Power law with bounds and alpha, spectral index
71 Parameters
72 ==========
73 alpha: float
74 Power law exponent parameter
75 minimum: float
76 See superclass
77 maximum: float
78 See superclass
79 name: str
80 See superclass
81 latex_label: str
82 See superclass
83 unit: str
84 See superclass
85 boundary: str
86 See superclass
87 """
88 super(PowerLaw, self).__init__(name=name, latex_label=latex_label,
89 minimum=minimum, maximum=maximum, unit=unit,
90 boundary=boundary)
91 self.alpha = alpha
93 def rescale(self, val):
94 """
95 'Rescale' a sample from the unit line element to the power-law prior.
97 This maps to the inverse CDF. This has been analytically solved for this case.
99 Parameters
100 ==========
101 val: Union[float, int, array_like]
102 Uniform probability
104 Returns
105 =======
106 Union[float, array_like]: Rescaled probability
107 """
108 if self.alpha == -1:
109 return self.minimum * np.exp(val * np.log(self.maximum / self.minimum))
110 else:
111 return (self.minimum ** (1 + self.alpha) + val *
112 (self.maximum ** (1 + self.alpha) - self.minimum ** (1 + self.alpha))) ** (1. / (1 + self.alpha))
114 def prob(self, val):
115 """Return the prior probability of val
117 Parameters
118 ==========
119 val: Union[float, int, array_like]
121 Returns
122 =======
123 float: Prior probability of val
124 """
125 if self.alpha == -1:
126 return np.nan_to_num(1 / val / np.log(self.maximum / self.minimum)) * self.is_in_prior_range(val)
127 else:
128 return np.nan_to_num(val ** self.alpha * (1 + self.alpha) /
129 (self.maximum ** (1 + self.alpha) -
130 self.minimum ** (1 + self.alpha))) * self.is_in_prior_range(val)
132 def ln_prob(self, val):
133 """Return the logarithmic prior probability of val
135 Parameters
136 ==========
137 val: Union[float, int, array_like]
139 Returns
140 =======
141 float:
143 """
144 if self.alpha == -1:
145 normalising = 1. / np.log(self.maximum / self.minimum)
146 else:
147 normalising = (1 + self.alpha) / (self.maximum ** (1 + self.alpha) -
148 self.minimum ** (1 + self.alpha))
150 with np.errstate(divide='ignore', invalid='ignore'):
151 ln_in_range = np.log(1. * self.is_in_prior_range(val))
152 ln_p = self.alpha * np.nan_to_num(np.log(val)) + np.log(normalising)
154 return ln_p + ln_in_range
156 def cdf(self, val):
157 if self.alpha == -1:
158 _cdf = (np.log(val / self.minimum) /
159 np.log(self.maximum / self.minimum))
160 else:
161 _cdf = np.atleast_1d(val ** (self.alpha + 1) - self.minimum ** (self.alpha + 1)) / \
162 (self.maximum ** (self.alpha + 1) - self.minimum ** (self.alpha + 1))
163 _cdf = np.minimum(_cdf, 1)
164 _cdf = np.maximum(_cdf, 0)
165 return _cdf
168class Uniform(Prior):
170 def __init__(self, minimum, maximum, name=None, latex_label=None,
171 unit=None, boundary=None):
172 """Uniform prior with bounds
174 Parameters
175 ==========
176 minimum: float
177 See superclass
178 maximum: float
179 See superclass
180 name: str
181 See superclass
182 latex_label: str
183 See superclass
184 unit: str
185 See superclass
186 boundary: str
187 See superclass
188 """
189 super(Uniform, self).__init__(name=name, latex_label=latex_label,
190 minimum=minimum, maximum=maximum, unit=unit,
191 boundary=boundary)
193 def rescale(self, val):
194 """
195 'Rescale' a sample from the unit line element to the power-law prior.
197 This maps to the inverse CDF. This has been analytically solved for this case.
199 Parameters
200 ==========
201 val: Union[float, int, array_like]
202 Uniform probability
204 Returns
205 =======
206 Union[float, array_like]: Rescaled probability
207 """
208 return self.minimum + val * (self.maximum - self.minimum)
210 def prob(self, val):
211 """Return the prior probability of val
213 Parameters
214 ==========
215 val: Union[float, int, array_like]
217 Returns
218 =======
219 float: Prior probability of val
220 """
221 return ((val >= self.minimum) & (val <= self.maximum)) / (self.maximum - self.minimum)
223 def ln_prob(self, val):
224 """Return the log prior probability of val
226 Parameters
227 ==========
228 val: Union[float, int, array_like]
230 Returns
231 =======
232 float: log probability of val
233 """
234 return xlogy(1, (val >= self.minimum) & (val <= self.maximum)) - xlogy(1, self.maximum - self.minimum)
236 def cdf(self, val):
237 _cdf = (val - self.minimum) / (self.maximum - self.minimum)
238 _cdf = np.minimum(_cdf, 1)
239 _cdf = np.maximum(_cdf, 0)
240 return _cdf
243class LogUniform(PowerLaw):
245 def __init__(self, minimum, maximum, name=None, latex_label=None,
246 unit=None, boundary=None):
247 """Log-Uniform prior with bounds
249 Parameters
250 ==========
251 minimum: float
252 See superclass
253 maximum: float
254 See superclass
255 name: str
256 See superclass
257 latex_label: str
258 See superclass
259 unit: str
260 See superclass
261 boundary: str
262 See superclass
263 """
264 super(LogUniform, self).__init__(name=name, latex_label=latex_label, unit=unit,
265 minimum=minimum, maximum=maximum, alpha=-1, boundary=boundary)
266 if self.minimum <= 0:
267 logger.warning('You specified a uniform-in-log prior with minimum={}'.format(self.minimum))
270class SymmetricLogUniform(Prior):
272 def __init__(self, minimum, maximum, name=None, latex_label=None,
273 unit=None, boundary=None):
274 """Symmetric Log-Uniform distributions with bounds
276 This is identical to a Log-Uniform distribution, but mirrored about
277 the zero-axis and subsequently normalized. As such, the distribution
278 has support on the two regions [-maximum, -minimum] and [minimum,
279 maximum].
281 Parameters
282 ==========
283 minimum: float
284 See superclass
285 maximum: float
286 See superclass
287 name: str
288 See superclass
289 latex_label: str
290 See superclass
291 unit: str
292 See superclass
293 boundary: str
294 See superclass
295 """
296 super(SymmetricLogUniform, self).__init__(name=name, latex_label=latex_label,
297 minimum=minimum, maximum=maximum, unit=unit,
298 boundary=boundary)
300 def rescale(self, val):
301 """
302 'Rescale' a sample from the unit line element to the power-law prior.
304 This maps to the inverse CDF. This has been analytically solved for this case.
306 Parameters
307 ==========
308 val: Union[float, int, array_like]
309 Uniform probability
311 Returns
312 =======
313 Union[float, array_like]: Rescaled probability
314 """
315 if isinstance(val, (float, int)):
316 if val < 0.5:
317 return -self.maximum * np.exp(-2 * val * np.log(self.maximum / self.minimum))
318 else:
319 return self.minimum * np.exp(np.log(self.maximum / self.minimum) * (2 * val - 1))
320 else:
321 vals_less_than_5 = val < 0.5
322 rescaled = np.empty_like(val)
323 rescaled[vals_less_than_5] = -self.maximum * np.exp(-2 * val[vals_less_than_5] *
324 np.log(self.maximum / self.minimum))
325 rescaled[~vals_less_than_5] = self.minimum * np.exp(np.log(self.maximum / self.minimum) *
326 (2 * val[~vals_less_than_5] - 1))
327 return rescaled
329 def prob(self, val):
330 """Return the prior probability of val
332 Parameters
333 ==========
334 val: Union[float, int, array_like]
336 Returns
337 =======
338 float: Prior probability of val
339 """
340 val = np.abs(val)
341 return (np.nan_to_num(0.5 / val / np.log(self.maximum / self.minimum)) *
342 self.is_in_prior_range(val))
344 def ln_prob(self, val):
345 """Return the logarithmic prior probability of val
347 Parameters
348 ==========
349 val: Union[float, int, array_like]
351 Returns
352 =======
353 float:
355 """
356 return np.nan_to_num(- np.log(2 * np.abs(val)) - np.log(np.log(self.maximum / self.minimum)))
358 def cdf(self, val):
359 val = np.atleast_1d(val)
360 norm = 0.5 / np.log(self.maximum / self.minimum)
361 cdf = np.zeros((len(val)))
362 lower_indices = np.where(np.logical_and(-self.maximum <= val, val <= -self.minimum))[0]
363 upper_indices = np.where(np.logical_and(self.minimum <= val, val <= self.maximum))[0]
364 cdf[lower_indices] = -norm * np.log(-val[lower_indices] / self.maximum)
365 cdf[np.where(np.logical_and(-self.minimum < val, val < self.minimum))] = 0.5
366 cdf[upper_indices] = 0.5 + norm * np.log(val[upper_indices] / self.minimum)
367 cdf[np.where(self.maximum < val)] = 1
368 return cdf
371class Cosine(Prior):
373 def __init__(self, minimum=-np.pi / 2, maximum=np.pi / 2, name=None,
374 latex_label=None, unit=None, boundary=None):
375 """Cosine prior with bounds
377 Parameters
378 ==========
379 minimum: float
380 See superclass
381 maximum: float
382 See superclass
383 name: str
384 See superclass
385 latex_label: str
386 See superclass
387 unit: str
388 See superclass
389 boundary: str
390 See superclass
391 """
392 super(Cosine, self).__init__(minimum=minimum, maximum=maximum, name=name,
393 latex_label=latex_label, unit=unit, boundary=boundary)
395 def rescale(self, val):
396 """
397 'Rescale' a sample from the unit line element to a uniform in cosine prior.
399 This maps to the inverse CDF. This has been analytically solved for this case.
400 """
401 norm = 1 / (np.sin(self.maximum) - np.sin(self.minimum))
402 return np.arcsin(val / norm + np.sin(self.minimum))
404 def prob(self, val):
405 """Return the prior probability of val. Defined over [-pi/2, pi/2].
407 Parameters
408 ==========
409 val: Union[float, int, array_like]
411 Returns
412 =======
413 float: Prior probability of val
414 """
415 return np.cos(val) / 2 * self.is_in_prior_range(val)
417 def cdf(self, val):
418 _cdf = np.atleast_1d((np.sin(val) - np.sin(self.minimum)) /
419 (np.sin(self.maximum) - np.sin(self.minimum)))
420 _cdf[val > self.maximum] = 1
421 _cdf[val < self.minimum] = 0
422 return _cdf
425class Sine(Prior):
427 def __init__(self, minimum=0, maximum=np.pi, name=None,
428 latex_label=None, unit=None, boundary=None):
429 """Sine prior with bounds
431 Parameters
432 ==========
433 minimum: float
434 See superclass
435 maximum: float
436 See superclass
437 name: str
438 See superclass
439 latex_label: str
440 See superclass
441 unit: str
442 See superclass
443 boundary: str
444 See superclass
445 """
446 super(Sine, self).__init__(minimum=minimum, maximum=maximum, name=name,
447 latex_label=latex_label, unit=unit, boundary=boundary)
449 def rescale(self, val):
450 """
451 'Rescale' a sample from the unit line element to a uniform in sine prior.
453 This maps to the inverse CDF. This has been analytically solved for this case.
454 """
455 norm = 1 / (np.cos(self.minimum) - np.cos(self.maximum))
456 return np.arccos(np.cos(self.minimum) - val / norm)
458 def prob(self, val):
459 """Return the prior probability of val. Defined over [0, pi].
461 Parameters
462 ==========
463 val: Union[float, int, array_like]
465 Returns
466 =======
467 Union[float, array_like]: Prior probability of val
468 """
469 return np.sin(val) / 2 * self.is_in_prior_range(val)
471 def cdf(self, val):
472 _cdf = np.atleast_1d((np.cos(val) - np.cos(self.minimum)) /
473 (np.cos(self.maximum) - np.cos(self.minimum)))
474 _cdf[val > self.maximum] = 1
475 _cdf[val < self.minimum] = 0
476 return _cdf
479class Gaussian(Prior):
481 def __init__(self, mu, sigma, name=None, latex_label=None, unit=None, boundary=None):
482 """Gaussian prior with mean mu and width sigma
484 Parameters
485 ==========
486 mu: float
487 Mean of the Gaussian prior
488 sigma:
489 Width/Standard deviation of the Gaussian prior
490 name: str
491 See superclass
492 latex_label: str
493 See superclass
494 unit: str
495 See superclass
496 boundary: str
497 See superclass
498 """
499 super(Gaussian, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)
500 self.mu = mu
501 self.sigma = sigma
503 def rescale(self, val):
504 """
505 'Rescale' a sample from the unit line element to the appropriate Gaussian prior.
507 Parameters
508 ==========
509 val: Union[float, int, array_like]
511 This maps to the inverse CDF. This has been analytically solved for this case.
512 """
513 return self.mu + erfinv(2 * val - 1) * 2 ** 0.5 * self.sigma
515 def prob(self, val):
516 """Return the prior probability of val.
518 Parameters
519 ==========
520 val: Union[float, int, array_like]
522 Returns
523 =======
524 Union[float, array_like]: Prior probability of val
525 """
526 return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 / self.sigma
528 def ln_prob(self, val):
529 """Return the Log prior probability of val.
531 Parameters
532 ==========
533 val: Union[float, int, array_like]
535 Returns
536 =======
537 Union[float, array_like]: Prior probability of val
538 """
540 return -0.5 * ((self.mu - val) ** 2 / self.sigma ** 2 + np.log(2 * np.pi * self.sigma ** 2))
542 def cdf(self, val):
543 return (1 - erf((self.mu - val) / 2 ** 0.5 / self.sigma)) / 2
546class Normal(Gaussian):
547 """A synonym for the Gaussian distribution. """
550class TruncatedGaussian(Prior):
552 def __init__(self, mu, sigma, minimum, maximum, name=None,
553 latex_label=None, unit=None, boundary=None):
554 """Truncated Gaussian prior with mean mu and width sigma
556 https://en.wikipedia.org/wiki/Truncated_normal_distribution
558 Parameters
559 ==========
560 mu: float
561 Mean of the Gaussian prior
562 sigma:
563 Width/Standard deviation of the Gaussian prior
564 minimum: float
565 See superclass
566 maximum: float
567 See superclass
568 name: str
569 See superclass
570 latex_label: str
571 See superclass
572 unit: str
573 See superclass
574 boundary: str
575 See superclass
576 """
577 super(TruncatedGaussian, self).__init__(name=name, latex_label=latex_label, unit=unit,
578 minimum=minimum, maximum=maximum, boundary=boundary)
579 self.mu = mu
580 self.sigma = sigma
582 @property
583 def normalisation(self):
584 """ Calculates the proper normalisation of the truncated Gaussian
586 Returns
587 =======
588 float: Proper normalisation of the truncated Gaussian
589 """
590 return (erf((self.maximum - self.mu) / 2 ** 0.5 / self.sigma) - erf(
591 (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2
593 def rescale(self, val):
594 """
595 'Rescale' a sample from the unit line element to the appropriate truncated Gaussian prior.
597 This maps to the inverse CDF. This has been analytically solved for this case.
598 """
599 return erfinv(2 * val * self.normalisation + erf(
600 (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) * 2 ** 0.5 * self.sigma + self.mu
602 def prob(self, val):
603 """Return the prior probability of val.
605 Parameters
606 ==========
607 val: Union[float, int, array_like]
609 Returns
610 =======
611 float: Prior probability of val
612 """
613 return np.exp(-(self.mu - val) ** 2 / (2 * self.sigma ** 2)) / (2 * np.pi) ** 0.5 \
614 / self.sigma / self.normalisation * self.is_in_prior_range(val)
616 def cdf(self, val):
617 val = np.atleast_1d(val)
618 _cdf = (erf((val - self.mu) / 2 ** 0.5 / self.sigma) - erf(
619 (self.minimum - self.mu) / 2 ** 0.5 / self.sigma)) / 2 / self.normalisation
620 _cdf[val > self.maximum] = 1
621 _cdf[val < self.minimum] = 0
622 return _cdf
625class TruncatedNormal(TruncatedGaussian):
626 """A synonym for the TruncatedGaussian distribution."""
629class HalfGaussian(TruncatedGaussian):
630 def __init__(self, sigma, name=None, latex_label=None, unit=None, boundary=None):
631 """A Gaussian with its mode at zero, and truncated to only be positive.
633 Parameters
634 ==========
635 sigma: float
636 See superclass
637 name: str
638 See superclass
639 latex_label: str
640 See superclass
641 unit: str
642 See superclass
643 boundary: str
644 See superclass
645 """
646 super(HalfGaussian, self).__init__(mu=0., sigma=sigma, minimum=0., maximum=np.inf,
647 name=name, latex_label=latex_label,
648 unit=unit, boundary=boundary)
651class HalfNormal(HalfGaussian):
652 """A synonym for the HalfGaussian distribution."""
655class LogNormal(Prior):
656 def __init__(self, mu, sigma, name=None, latex_label=None, unit=None, boundary=None):
657 """Log-normal prior with mean mu and width sigma
659 https://en.wikipedia.org/wiki/Log-normal_distribution
661 Parameters
662 ==========
663 mu: float
664 Mean of the Gaussian prior
665 sigma:
666 Width/Standard deviation of the Gaussian prior
667 name: str
668 See superclass
669 latex_label: str
670 See superclass
671 unit: str
672 See superclass
673 boundary: str
674 See superclass
675 """
676 super(LogNormal, self).__init__(name=name, minimum=0., latex_label=latex_label,
677 unit=unit, boundary=boundary)
679 if sigma <= 0.:
680 raise ValueError("For the LogGaussian prior the standard deviation must be positive")
682 self.mu = mu
683 self.sigma = sigma
685 def rescale(self, val):
686 """
687 'Rescale' a sample from the unit line element to the appropriate LogNormal prior.
689 This maps to the inverse CDF. This has been analytically solved for this case.
690 """
691 return np.exp(self.mu + np.sqrt(2 * self.sigma ** 2) * erfinv(2 * val - 1))
693 def prob(self, val):
694 """Returns the prior probability of val.
696 Parameters
697 ==========
698 val: Union[float, int, array_like]
700 Returns
701 =======
702 Union[float, array_like]: Prior probability of val
703 """
704 if isinstance(val, (float, int)):
705 if val <= self.minimum:
706 _prob = 0.
707 else:
708 _prob = np.exp(-(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2)\
709 / np.sqrt(2 * np.pi) / val / self.sigma
710 else:
711 _prob = np.zeros(val.size)
712 idx = (val > self.minimum)
713 _prob[idx] = np.exp(-(np.log(val[idx]) - self.mu) ** 2 / self.sigma ** 2 / 2)\
714 / np.sqrt(2 * np.pi) / val[idx] / self.sigma
715 return _prob
717 def ln_prob(self, val):
718 """Returns the log prior probability of val.
720 Parameters
721 ==========
722 val: Union[float, int, array_like]
724 Returns
725 =======
726 Union[float, array_like]: Prior probability of val
727 """
728 if isinstance(val, (float, int)):
729 if val <= self.minimum:
730 _ln_prob = -np.inf
731 else:
732 _ln_prob = -(np.log(val) - self.mu) ** 2 / self.sigma ** 2 / 2\
733 - np.log(np.sqrt(2 * np.pi) * val * self.sigma)
734 else:
735 _ln_prob = -np.inf * np.ones(val.size)
736 idx = (val > self.minimum)
737 _ln_prob[idx] = -(np.log(val[idx]) - self.mu) ** 2\
738 / self.sigma ** 2 / 2 - np.log(np.sqrt(2 * np.pi) * val[idx] * self.sigma)
739 return _ln_prob
741 def cdf(self, val):
742 if isinstance(val, (float, int)):
743 if val <= self.minimum:
744 _cdf = 0.
745 else:
746 _cdf = 0.5 + erf((np.log(val) - self.mu) / self.sigma / np.sqrt(2)) / 2
747 else:
748 _cdf = np.zeros(val.size)
749 _cdf[val > self.minimum] = 0.5 + erf((
750 np.log(val[val > self.minimum]) - self.mu) / self.sigma / np.sqrt(2)) / 2
751 return _cdf
754class LogGaussian(LogNormal):
755 """Synonym of LogNormal prior."""
758class Exponential(Prior):
759 def __init__(self, mu, name=None, latex_label=None, unit=None, boundary=None):
760 """Exponential prior with mean mu
762 Parameters
763 ==========
764 mu: float
765 Mean of the Exponential prior
766 name: str
767 See superclass
768 latex_label: str
769 See superclass
770 unit: str
771 See superclass
772 boundary: str
773 See superclass
774 """
775 super(Exponential, self).__init__(name=name, minimum=0., latex_label=latex_label,
776 unit=unit, boundary=boundary)
777 self.mu = mu
779 def rescale(self, val):
780 """
781 'Rescale' a sample from the unit line element to the appropriate Exponential prior.
783 This maps to the inverse CDF. This has been analytically solved for this case.
784 """
785 return -self.mu * log1p(-val)
787 def prob(self, val):
788 """Return the prior probability of val.
790 Parameters
791 ==========
792 val: Union[float, int, array_like]
794 Returns
795 =======
796 Union[float, array_like]: Prior probability of val
797 """
798 if isinstance(val, (float, int)):
799 if val < self.minimum:
800 _prob = 0.
801 else:
802 _prob = np.exp(-val / self.mu) / self.mu
803 else:
804 _prob = np.zeros(val.size)
805 _prob[val >= self.minimum] = np.exp(-val[val >= self.minimum] / self.mu) / self.mu
806 return _prob
808 def ln_prob(self, val):
809 """Returns the log prior probability of val.
811 Parameters
812 ==========
813 val: Union[float, int, array_like]
815 Returns
816 =======
817 Union[float, array_like]: Prior probability of val
818 """
819 if isinstance(val, (float, int)):
820 if val < self.minimum:
821 _ln_prob = -np.inf
822 else:
823 _ln_prob = -val / self.mu - np.log(self.mu)
824 else:
825 _ln_prob = -np.inf * np.ones(val.size)
826 _ln_prob[val >= self.minimum] = -val[val >= self.minimum] / self.mu - np.log(self.mu)
827 return _ln_prob
829 def cdf(self, val):
830 if isinstance(val, (float, int)):
831 if val < self.minimum:
832 _cdf = 0.
833 else:
834 _cdf = 1. - np.exp(-val / self.mu)
835 else:
836 _cdf = np.zeros(val.size)
837 _cdf[val >= self.minimum] = 1. - np.exp(-val[val >= self.minimum] / self.mu)
838 return _cdf
841class StudentT(Prior):
842 def __init__(self, df, mu=0., scale=1., name=None, latex_label=None,
843 unit=None, boundary=None):
844 """Student's t-distribution prior with number of degrees of freedom df,
845 mean mu and scale
847 https://en.wikipedia.org/wiki/Student%27s_t-distribution#Generalized_Student's_t-distribution
849 Parameters
850 ==========
851 df: float
852 Number of degrees of freedom for distribution
853 mu: float
854 Mean of the Student's t-prior
855 scale:
856 Width of the Student's t-prior
857 name: str
858 See superclass
859 latex_label: str
860 See superclass
861 unit: str
862 See superclass
863 boundary: str
864 See superclass
865 """
866 super(StudentT, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)
868 if df <= 0. or scale <= 0.:
869 raise ValueError("For the StudentT prior the number of degrees of freedom and scale must be positive")
871 self.df = df
872 self.mu = mu
873 self.scale = scale
875 def rescale(self, val):
876 """
877 'Rescale' a sample from the unit line element to the appropriate Student's t-prior.
879 This maps to the inverse CDF. This has been analytically solved for this case.
880 """
881 if isinstance(val, (float, int)):
882 if val == 0:
883 rescaled = -np.inf
884 elif val == 1:
885 rescaled = np.inf
886 else:
887 rescaled = stdtrit(self.df, val) * self.scale + self.mu
888 else:
889 rescaled = stdtrit(self.df, val) * self.scale + self.mu
890 rescaled[val == 0] = -np.inf
891 rescaled[val == 1] = np.inf
892 return rescaled
894 def prob(self, val):
895 """Return the prior probability of val.
897 Parameters
898 ==========
899 val: Union[float, int, array_like]
901 Returns
902 =======
903 Union[float, array_like]: Prior probability of val
904 """
905 return np.exp(self.ln_prob(val))
907 def ln_prob(self, val):
908 """Returns the log prior probability of val.
910 Parameters
911 ==========
912 val: Union[float, int, array_like]
914 Returns
915 =======
916 Union[float, array_like]: Prior probability of val
917 """
918 return gammaln(0.5 * (self.df + 1)) - gammaln(0.5 * self.df)\
919 - np.log(np.sqrt(np.pi * self.df) * self.scale) - (self.df + 1) / 2 *\
920 np.log(1 + ((val - self.mu) / self.scale) ** 2 / self.df)
922 def cdf(self, val):
923 return stdtr(self.df, (val - self.mu) / self.scale)
926class Beta(Prior):
927 def __init__(self, alpha, beta, minimum=0, maximum=1, name=None,
928 latex_label=None, unit=None, boundary=None):
929 """Beta distribution
931 https://en.wikipedia.org/wiki/Beta_distribution
933 This wraps around
934 https://docs.scipy.org/doc/scipy/reference/generated/scipy.stats.beta.html
936 Parameters
937 ==========
938 alpha: float
939 first shape parameter
940 beta: float
941 second shape parameter
942 minimum: float
943 See superclass
944 maximum: float
945 See superclass
946 name: str
947 See superclass
948 latex_label: str
949 See superclass
950 unit: str
951 See superclass
952 boundary: str
953 See superclass
954 """
955 super(Beta, self).__init__(minimum=minimum, maximum=maximum, name=name,
956 latex_label=latex_label, unit=unit, boundary=boundary)
958 if alpha <= 0. or beta <= 0.:
959 raise ValueError("alpha and beta must both be positive values")
961 self.alpha = alpha
962 self.beta = beta
964 def rescale(self, val):
965 """
966 'Rescale' a sample from the unit line element to the appropriate Beta prior.
968 This maps to the inverse CDF. This has been analytically solved for this case.
969 """
970 return btdtri(self.alpha, self.beta, val) * (self.maximum - self.minimum) + self.minimum
972 def prob(self, val):
973 """Return the prior probability of val.
975 Parameters
976 ==========
977 val: Union[float, int, array_like]
979 Returns
980 =======
981 Union[float, array_like]: Prior probability of val
982 """
983 return np.exp(self.ln_prob(val))
985 def ln_prob(self, val):
986 """Returns the log prior probability of val.
988 Parameters
989 ==========
990 val: Union[float, int, array_like]
992 Returns
993 =======
994 Union[float, array_like]: Prior probability of val
995 """
996 _ln_prob = xlogy(self.alpha - 1, val - self.minimum) + xlogy(self.beta - 1, self.maximum - val)\
997 - betaln(self.alpha, self.beta) - xlogy(self.alpha + self.beta - 1, self.maximum - self.minimum)
999 # deal with the fact that if alpha or beta are < 1 you get infinities at 0 and 1
1000 if isinstance(val, (float, int)):
1001 if np.isfinite(_ln_prob) and self.minimum <= val <= self.maximum:
1002 return _ln_prob
1003 return -np.inf
1004 else:
1005 _ln_prob_sub = np.full_like(val, -np.inf)
1006 idx = np.isfinite(_ln_prob) & (val >= self.minimum) & (val <= self.maximum)
1007 _ln_prob_sub[idx] = _ln_prob[idx]
1008 return _ln_prob_sub
1010 def cdf(self, val):
1011 if isinstance(val, (float, int)):
1012 if val > self.maximum:
1013 return 1.
1014 elif val < self.minimum:
1015 return 0.
1016 else:
1017 return btdtr(self.alpha, self.beta,
1018 (val - self.minimum) / (self.maximum - self.minimum))
1019 else:
1020 _cdf = np.nan_to_num(btdtr(self.alpha, self.beta,
1021 (val - self.minimum) / (self.maximum - self.minimum)))
1022 _cdf[val < self.minimum] = 0.
1023 _cdf[val > self.maximum] = 1.
1024 return _cdf
1027class Logistic(Prior):
1028 def __init__(self, mu, scale, name=None, latex_label=None, unit=None, boundary=None):
1029 """Logistic distribution
1031 https://en.wikipedia.org/wiki/Logistic_distribution
1033 Parameters
1034 ==========
1035 mu: float
1036 Mean of the distribution
1037 scale: float
1038 Width of the distribution
1039 name: str
1040 See superclass
1041 latex_label: str
1042 See superclass
1043 unit: str
1044 See superclass
1045 boundary: str
1046 See superclass
1047 """
1048 super(Logistic, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)
1050 if scale <= 0.:
1051 raise ValueError("For the Logistic prior the scale must be positive")
1053 self.mu = mu
1054 self.scale = scale
1056 def rescale(self, val):
1057 """
1058 'Rescale' a sample from the unit line element to the appropriate Logistic prior.
1060 This maps to the inverse CDF. This has been analytically solved for this case.
1061 """
1062 if isinstance(val, (float, int)):
1063 if val == 0:
1064 rescaled = -np.inf
1065 elif val == 1:
1066 rescaled = np.inf
1067 else:
1068 rescaled = self.mu + self.scale * np.log(val / (1. - val))
1069 else:
1070 rescaled = np.inf * np.ones(val.size)
1071 rescaled[val == 0] = -np.inf
1072 rescaled[(val > 0) & (val < 1)] = self.mu + self.scale\
1073 * np.log(val[(val > 0) & (val < 1)] / (1. - val[(val > 0) & (val < 1)]))
1074 return rescaled
1076 def prob(self, val):
1077 """Return the prior probability of val.
1079 Parameters
1080 ==========
1081 val: Union[float, int, array_like]
1083 Returns
1084 =======
1085 Union[float, array_like]: Prior probability of val
1086 """
1087 return np.exp(self.ln_prob(val))
1089 def ln_prob(self, val):
1090 """Returns the log prior probability of val.
1092 Parameters
1093 ==========
1094 val: Union[float, int, array_like]
1096 Returns
1097 =======
1098 Union[float, array_like]: Prior probability of val
1099 """
1100 return -(val - self.mu) / self.scale -\
1101 2. * np.log(1. + np.exp(-(val - self.mu) / self.scale)) - np.log(self.scale)
1103 def cdf(self, val):
1104 return 1. / (1. + np.exp(-(val - self.mu) / self.scale))
1107class Cauchy(Prior):
1108 def __init__(self, alpha, beta, name=None, latex_label=None, unit=None, boundary=None):
1109 """Cauchy distribution
1111 https://en.wikipedia.org/wiki/Cauchy_distribution
1113 Parameters
1114 ==========
1115 alpha: float
1116 Location parameter
1117 beta: float
1118 Scale parameter
1119 name: str
1120 See superclass
1121 latex_label: str
1122 See superclass
1123 unit: str
1124 See superclass
1125 boundary: str
1126 See superclass
1127 """
1128 super(Cauchy, self).__init__(name=name, latex_label=latex_label, unit=unit, boundary=boundary)
1130 if beta <= 0.:
1131 raise ValueError("For the Cauchy prior the scale must be positive")
1133 self.alpha = alpha
1134 self.beta = beta
1136 def rescale(self, val):
1137 """
1138 'Rescale' a sample from the unit line element to the appropriate Cauchy prior.
1140 This maps to the inverse CDF. This has been analytically solved for this case.
1141 """
1142 rescaled = self.alpha + self.beta * np.tan(np.pi * (val - 0.5))
1143 if isinstance(val, (float, int)):
1144 if val == 1:
1145 rescaled = np.inf
1146 elif val == 0:
1147 rescaled = -np.inf
1148 else:
1149 rescaled[val == 1] = np.inf
1150 rescaled[val == 0] = -np.inf
1151 return rescaled
1153 def prob(self, val):
1154 """Return the prior probability of val.
1156 Parameters
1157 ==========
1158 val: Union[float, int, array_like]
1160 Returns
1161 =======
1162 Union[float, array_like]: Prior probability of val
1163 """
1164 return 1. / self.beta / np.pi / (1. + ((val - self.alpha) / self.beta) ** 2)
1166 def ln_prob(self, val):
1167 """Return the log prior probability of val.
1169 Parameters
1170 ==========
1171 val: Union[float, int, array_like]
1173 Returns
1174 =======
1175 Union[float, array_like]: Log prior probability of val
1176 """
1177 return - np.log(self.beta * np.pi) - np.log(1. + ((val - self.alpha) / self.beta) ** 2)
1179 def cdf(self, val):
1180 return 0.5 + np.arctan((val - self.alpha) / self.beta) / np.pi
1183class Lorentzian(Cauchy):
1184 """Synonym for the Cauchy distribution"""
1187class Gamma(Prior):
1188 def __init__(self, k, theta=1., name=None, latex_label=None, unit=None, boundary=None):
1189 """Gamma distribution
1191 https://en.wikipedia.org/wiki/Gamma_distribution
1193 Parameters
1194 ==========
1195 k: float
1196 The shape parameter
1197 theta: float
1198 The scale parameter
1199 name: str
1200 See superclass
1201 latex_label: str
1202 See superclass
1203 unit: str
1204 See superclass
1205 boundary: str
1206 See superclass
1207 """
1208 super(Gamma, self).__init__(name=name, minimum=0., latex_label=latex_label,
1209 unit=unit, boundary=boundary)
1211 if k <= 0 or theta <= 0:
1212 raise ValueError("For the Gamma prior the shape and scale must be positive")
1214 self.k = k
1215 self.theta = theta
1217 def rescale(self, val):
1218 """
1219 'Rescale' a sample from the unit line element to the appropriate Gamma prior.
1221 This maps to the inverse CDF. This has been analytically solved for this case.
1222 """
1223 return gammaincinv(self.k, val) * self.theta
1225 def prob(self, val):
1226 """Return the prior probability of val.
1228 Parameters
1229 ==========
1230 val: Union[float, int, array_like]
1232 Returns
1233 =======
1234 Union[float, array_like]: Prior probability of val
1235 """
1236 return np.exp(self.ln_prob(val))
1238 def ln_prob(self, val):
1239 """Returns the log prior probability of val.
1241 Parameters
1242 ==========
1243 val: Union[float, int, array_like]
1245 Returns
1246 =======
1247 Union[float, array_like]: Prior probability of val
1248 """
1249 if isinstance(val, (float, int)):
1250 if val < self.minimum:
1251 _ln_prob = -np.inf
1252 else:
1253 _ln_prob = xlogy(self.k - 1, val) - val / self.theta - xlogy(self.k, self.theta) - gammaln(self.k)
1254 else:
1255 _ln_prob = -np.inf * np.ones(val.size)
1256 idx = (val >= self.minimum)
1257 _ln_prob[idx] = xlogy(self.k - 1, val[idx]) - val[idx] / self.theta\
1258 - xlogy(self.k, self.theta) - gammaln(self.k)
1259 return _ln_prob
1261 def cdf(self, val):
1262 if isinstance(val, (float, int)):
1263 if val < self.minimum:
1264 _cdf = 0.
1265 else:
1266 _cdf = gammainc(self.k, val / self.theta)
1267 else:
1268 _cdf = np.zeros(val.size)
1269 _cdf[val >= self.minimum] = gammainc(self.k, val[val >= self.minimum] / self.theta)
1270 return _cdf
1273class ChiSquared(Gamma):
1274 def __init__(self, nu, name=None, latex_label=None, unit=None, boundary=None):
1275 """Chi-squared distribution
1277 https://en.wikipedia.org/wiki/Chi-squared_distribution
1279 Parameters
1280 ==========
1281 nu: int
1282 Number of degrees of freedom
1283 name: str
1284 See superclass
1285 latex_label: str
1286 See superclass
1287 unit: str
1288 See superclass
1289 boundary: str
1290 See superclass
1291 """
1293 if nu <= 0 or not isinstance(nu, int):
1294 raise ValueError("For the ChiSquared prior the number of degrees of freedom must be a positive integer")
1296 super(ChiSquared, self).__init__(name=name, k=nu / 2., theta=2.,
1297 latex_label=latex_label, unit=unit, boundary=boundary)
1299 @property
1300 def nu(self):
1301 return int(self.k * 2)
1303 @nu.setter
1304 def nu(self, nu):
1305 self.k = nu / 2.
1308class FermiDirac(Prior):
1309 def __init__(self, sigma, mu=None, r=None, name=None, latex_label=None,
1310 unit=None):
1311 """A Fermi-Dirac type prior, with a fixed lower boundary at zero
1312 (see, e.g. Section 2.3.5 of [1]_). The probability distribution
1313 is defined by Equation 22 of [1]_.
1315 Parameters
1316 ==========
1317 sigma: float (required)
1318 The range over which the attenuation of the distribution happens
1319 mu: float
1320 The point at which the distribution falls to 50% of its maximum
1321 value
1322 r: float
1323 A value giving mu/sigma. This can be used instead of specifying
1324 mu.
1325 name: str
1326 See superclass
1327 latex_label: str
1328 See superclass
1329 unit: str
1330 See superclass
1332 References
1333 ==========
1335 .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
1336 <https:arxiv.org/abs/1705.08978v1>`_, 2017.
1337 """
1338 super(FermiDirac, self).__init__(name=name, latex_label=latex_label, unit=unit, minimum=0.)
1340 self.sigma = sigma
1342 if mu is None and r is None:
1343 raise ValueError("For the Fermi-Dirac prior either a 'mu' value or 'r' "
1344 "value must be given.")
1346 if r is None and mu is not None:
1347 self.mu = mu
1348 self.r = self.mu / self.sigma
1349 else:
1350 self.r = r
1351 self.mu = self.sigma * self.r
1353 if self.r <= 0. or self.sigma <= 0.:
1354 raise ValueError("For the Fermi-Dirac prior the values of sigma and r "
1355 "must be positive.")
1357 def rescale(self, val):
1358 """
1359 'Rescale' a sample from the unit line element to the appropriate Fermi-Dirac prior.
1361 Parameters
1362 ==========
1363 val: Union[float, int, array_like]
1365 This maps to the inverse CDF. This has been analytically solved for this case,
1366 see Equation 24 of [1]_.
1368 References
1369 ==========
1371 .. [1] M. Pitkin, M. Isi, J. Veitch & G. Woan, `arXiv:1705.08978v1
1372 <https:arxiv.org/abs/1705.08978v1>`_, 2017.
1373 """
1374 inv = (-np.exp(-1. * self.r) + (1. + np.exp(self.r)) ** -val +
1375 np.exp(-1. * self.r) * (1. + np.exp(self.r)) ** -val)
1377 # if val is 1 this will cause inv to be negative (due to numerical
1378 # issues), so return np.inf
1379 if isinstance(val, (float, int)):
1380 if inv < 0:
1381 return np.inf
1382 else:
1383 return -self.sigma * np.log(inv)
1384 else:
1385 idx = inv >= 0.
1386 tmpinv = np.inf * np.ones(len(np.atleast_1d(val)))
1387 tmpinv[idx] = -self.sigma * np.log(inv[idx])
1388 return tmpinv
1390 def prob(self, val):
1391 """Return the prior probability of val.
1393 Parameters
1394 ==========
1395 val: Union[float, int, array_like]
1397 Returns
1398 =======
1399 float: Prior probability of val
1400 """
1401 return np.exp(self.ln_prob(val))
1403 def ln_prob(self, val):
1404 """Return the log prior probability of val.
1406 Parameters
1407 ==========
1408 val: Union[float, int, array_like]
1410 Returns
1411 =======
1412 Union[float, array_like]: Log prior probability of val
1413 """
1415 norm = -np.log(self.sigma * np.log(1. + np.exp(self.r)))
1416 if isinstance(val, (float, int)):
1417 if val < self.minimum:
1418 return -np.inf
1419 else:
1420 return norm - np.logaddexp((val / self.sigma) - self.r, 0.)
1421 else:
1422 val = np.atleast_1d(val)
1423 lnp = -np.inf * np.ones(len(val))
1424 idx = val >= self.minimum
1425 lnp[idx] = norm - np.logaddexp((val[idx] / self.sigma) - self.r, 0.)
1426 return lnp
1429class Categorical(Prior):
1430 def __init__(self, ncategories, name=None, latex_label=None,
1431 unit=None, boundary="periodic"):
1432 """ An equal-weighted Categorical prior
1434 Parameters
1435 ==========
1436 ncategories: int
1437 The number of available categories. The prior mass support is then
1438 integers [0, ncategories - 1].
1439 name: str
1440 See superclass
1441 latex_label: str
1442 See superclass
1443 unit: str
1444 See superclass
1445 """
1447 minimum = 0
1448 # Small delta added to help with MCMC walking
1449 maximum = ncategories - 1 + 1e-15
1450 super(Categorical, self).__init__(
1451 name=name, latex_label=latex_label, minimum=minimum,
1452 maximum=maximum, unit=unit, boundary=boundary)
1453 self.ncategories = ncategories
1454 self.categories = np.arange(self.minimum, self.maximum)
1455 self.p = 1 / self.ncategories
1456 self.lnp = -np.log(self.ncategories)
1458 def rescale(self, val):
1459 """
1460 'Rescale' a sample from the unit line element to the categorical prior.
1462 This maps to the inverse CDF. This has been analytically solved for this case.
1464 Parameters
1465 ==========
1466 val: Union[float, int, array_like]
1467 Uniform probability
1469 Returns
1470 =======
1471 Union[float, array_like]: Rescaled probability
1472 """
1473 return np.floor(val * (1 + self.maximum))
1475 def prob(self, val):
1476 """Return the prior probability of val.
1478 Parameters
1479 ==========
1480 val: Union[float, int, array_like]
1482 Returns
1483 =======
1484 float: Prior probability of val
1485 """
1486 if isinstance(val, (float, int)):
1487 if val in self.categories:
1488 return self.p
1489 else:
1490 return 0
1491 else:
1492 val = np.atleast_1d(val)
1493 probs = np.zeros_like(val, dtype=np.float64)
1494 idxs = np.isin(val, self.categories)
1495 probs[idxs] = self.p
1496 return probs
1498 def ln_prob(self, val):
1499 """Return the logarithmic prior probability of val
1501 Parameters
1502 ==========
1503 val: Union[float, int, array_like]
1505 Returns
1506 =======
1507 float:
1509 """
1510 if isinstance(val, (float, int)):
1511 if val in self.categories:
1512 return self.lnp
1513 else:
1514 return -np.inf
1515 else:
1516 val = np.atleast_1d(val)
1517 probs = -np.inf * np.ones_like(val, dtype=np.float64)
1518 idxs = np.isin(val, self.categories)
1519 probs[idxs] = self.lnp
1520 return probs
1523class Triangular(Prior):
1524 """
1525 Define a new prior class which draws from a triangular distribution.
1527 For distribution details see: wikipedia.org/wiki/Triangular_distribution
1529 Here, minimum <= mode <= maximum,
1530 where the mode has the highest pdf value.
1532 """
1533 def __init__(self, mode, minimum, maximum, name=None, latex_label=None, unit=None):
1534 super(Triangular, self).__init__(
1535 name=name,
1536 latex_label=latex_label,
1537 unit=unit,
1538 minimum=minimum,
1539 maximum=maximum,
1540 )
1541 self.mode = mode
1542 self.fractional_mode = (self.mode - self.minimum) / (
1543 self.maximum - self.minimum
1544 )
1545 self.scale = self.maximum - self.minimum
1546 self.rescaled_minimum = self.minimum - (self.minimum == self.mode) * self.scale
1547 self.rescaled_maximum = self.maximum + (self.maximum == self.mode) * self.scale
1549 def rescale(self, val):
1550 """
1551 'Rescale' a sample from standard uniform to a triangular distribution.
1553 This maps to the inverse CDF. This has been analytically solved for this case.
1555 Parameters
1556 ==========
1557 val: Union[float, int, array_like]
1558 Uniform probability
1560 Returns
1561 =======
1562 Union[float, array_like]: Rescaled probability
1564 """
1565 below_mode = (val * self.scale * (self.mode - self.minimum)) ** 0.5
1566 above_mode = ((1 - val) * self.scale * (self.maximum - self.mode)) ** 0.5
1567 return (self.minimum + below_mode) * (val < self.fractional_mode) + (
1568 self.maximum - above_mode
1569 ) * (val >= self.fractional_mode)
1571 def prob(self, val):
1572 """
1573 Return the prior probability of val
1575 Parameters
1576 ==========
1577 val: Union[float, int, array_like]
1579 Returns
1580 =======
1581 float: Prior probability of val
1583 """
1584 between_minimum_and_mode = (
1585 (val < self.mode)
1586 * (self.minimum <= val)
1587 * (val - self.rescaled_minimum)
1588 / (self.mode - self.rescaled_minimum)
1589 )
1590 between_mode_and_maximum = (
1591 (self.mode <= val)
1592 * (val <= self.maximum)
1593 * (self.rescaled_maximum - val)
1594 / (self.rescaled_maximum - self.mode)
1595 )
1596 return 2.0 * (between_minimum_and_mode + between_mode_and_maximum) / self.scale
1598 def cdf(self, val):
1599 """
1600 Return the prior cumulative probability at val
1602 Parameters
1603 ==========
1604 val: Union[float, int, array_like]
1606 Returns
1607 =======
1608 float: prior cumulative probability at val
1610 """
1611 return (
1612 (val > self.mode)
1613 + (val > self.minimum)
1614 * (val <= self.maximum)
1615 / (self.scale)
1616 * (
1617 (val > self.mode)
1618 * (self.rescaled_maximum - val) ** 2.0
1619 / (self.mode - self.rescaled_maximum)
1620 + (val <= self.mode)
1621 * (val - self.rescaled_minimum) ** 2.0
1622 / (self.mode - self.rescaled_minimum)
1623 )
1624 )