Coverage for bilby/core/sampler/pymultinest.py: 53%

77 statements  

« prev     ^ index     » next       coverage.py v7.6.1, created at 2025-05-06 04:57 +0000

1import datetime 

2import importlib 

3import os 

4import time 

5 

6import numpy as np 

7 

8from .base_sampler import NestedSampler, _TemporaryFileSamplerMixin, signal_wrapper 

9 

10 

11class Pymultinest(_TemporaryFileSamplerMixin, NestedSampler): 

12 """ 

13 bilby wrapper of pymultinest 

14 (https://github.com/JohannesBuchner/PyMultiNest) 

15 

16 All positional and keyword arguments (i.e., the args and kwargs) passed to 

17 `run_sampler` will be propagated to `pymultinest.run`, see documentation 

18 for that class for further help. Under Other Parameters, we list commonly 

19 used kwargs and the bilby defaults. 

20 

21 Parameters 

22 ========== 

23 npoints: int 

24 The number of live points, note this can also equivalently be given as 

25 one of [nlive, nlives, n_live_points] 

26 importance_nested_sampling: bool, (False) 

27 If true, use importance nested sampling 

28 sampling_efficiency: float or {'parameter', 'model'}, ('parameter') 

29 Defines the sampling efficiency 

30 verbose: Bool 

31 If true, print information information about the convergence during 

32 resume: bool 

33 If true, resume run from checkpoint (if available) 

34 

35 """ 

36 

37 sampler_name = "pymultinest" 

38 abbreviation = "pm" 

39 default_kwargs = dict( 

40 importance_nested_sampling=False, 

41 resume=True, 

42 verbose=True, 

43 sampling_efficiency="parameter", 

44 n_live_points=500, 

45 n_params=None, 

46 n_clustering_params=None, 

47 wrapped_params=None, 

48 multimodal=True, 

49 const_efficiency_mode=False, 

50 evidence_tolerance=0.5, 

51 n_iter_before_update=100, 

52 null_log_evidence=-1e90, 

53 max_modes=100, 

54 mode_tolerance=-1e90, 

55 outputfiles_basename=None, 

56 seed=-1, 

57 context=0, 

58 write_output=True, 

59 log_zero=-1e100, 

60 max_iter=0, 

61 init_MPI=False, 

62 dump_callback=None, 

63 ) 

64 short_name = "pm" 

65 hard_exit = True 

66 sampling_seed_key = "seed" 

67 

68 def __init__( 

69 self, 

70 likelihood, 

71 priors, 

72 outdir="outdir", 

73 label="label", 

74 use_ratio=False, 

75 plot=False, 

76 exit_code=77, 

77 skip_import_verification=False, 

78 temporary_directory=True, 

79 **kwargs 

80 ): 

81 super(Pymultinest, self).__init__( 

82 likelihood=likelihood, 

83 priors=priors, 

84 outdir=outdir, 

85 label=label, 

86 use_ratio=use_ratio, 

87 plot=plot, 

88 skip_import_verification=skip_import_verification, 

89 exit_code=exit_code, 

90 temporary_directory=temporary_directory, 

91 **kwargs 

92 ) 

93 self._apply_multinest_boundaries() 

94 

95 def _translate_kwargs(self, kwargs): 

96 kwargs = super()._translate_kwargs(kwargs) 

97 if "n_live_points" not in kwargs: 

98 for equiv in self.npoints_equiv_kwargs: 

99 if equiv in kwargs: 

100 kwargs["n_live_points"] = kwargs.pop(equiv) 

101 

102 def _verify_kwargs_against_default_kwargs(self): 

103 """Check the kwargs""" 

104 

105 self.outputfiles_basename = self.kwargs.pop("outputfiles_basename", None) 

106 

107 # for PyMultiNest >=2.9 the n_params kwarg cannot be None 

108 if self.kwargs["n_params"] is None: 

109 self.kwargs["n_params"] = self.ndim 

110 if self.kwargs["dump_callback"] is None: 

111 self.kwargs["dump_callback"] = self._dump_callback 

112 NestedSampler._verify_kwargs_against_default_kwargs(self) 

113 

114 def _dump_callback(self, *args, **kwargs): 

115 if self.use_temporary_directory: 

116 self._copy_temporary_directory_contents_to_proper_path() 

117 self._calculate_and_save_sampling_time() 

118 

119 def _apply_multinest_boundaries(self): 

120 if self.kwargs["wrapped_params"] is None: 

121 self.kwargs["wrapped_params"] = list() 

122 for param in self.search_parameter_keys: 

123 if self.priors[param].boundary == "periodic": 

124 self.kwargs["wrapped_params"].append(1) 

125 else: 

126 self.kwargs["wrapped_params"].append(0) 

127 

128 @signal_wrapper 

129 def run_sampler(self): 

130 import pymultinest 

131 

132 self._verify_kwargs_against_default_kwargs() 

133 

134 self._setup_run_directory() 

135 self._check_and_load_sampling_time_file() 

136 if not self.kwargs["resume"]: 

137 self.total_sampling_time = 0.0 

138 

139 # Overwrite pymultinest's signal handling function 

140 pm_run = importlib.import_module("pymultinest.run") 

141 pm_run.interrupt_handler = self.write_current_state_and_exit 

142 

143 self.start_time = time.time() 

144 out = pymultinest.solve( 

145 LogLikelihood=self.log_likelihood, 

146 Prior=self.prior_transform, 

147 n_dims=self.ndim, 

148 **self.kwargs 

149 ) 

150 self._calculate_and_save_sampling_time() 

151 

152 self._clean_up_run_directory() 

153 

154 post_equal_weights = os.path.join( 

155 self.outputfiles_basename, "post_equal_weights.dat" 

156 ) 

157 post_equal_weights_data = np.loadtxt(post_equal_weights) 

158 self.result.log_likelihood_evaluations = post_equal_weights_data[:, -1] 

159 self.result.sampler_output = out 

160 self.result.samples = post_equal_weights_data[:, :-1] 

161 self.result.log_evidence = out["logZ"] 

162 self.result.log_evidence_err = out["logZerr"] 

163 self.calc_likelihood_count() 

164 self.result.outputfiles_basename = self.outputfiles_basename 

165 self.result.sampling_time = datetime.timedelta(seconds=self.total_sampling_time) 

166 self.result.nested_samples = self._nested_samples 

167 return self.result 

168 

169 @property 

170 def _nested_samples(self): 

171 """ 

172 Extract nested samples from the pymultinest files. 

173 This requires combining the "dead" points from `ev.dat` and the "live" 

174 points from `phys_live.points`. 

175 The prior volume associated with the current live points is the simple 

176 estimate of `remaining_prior_volume / N`. 

177 """ 

178 import pandas as pd 

179 

180 dir_ = self.kwargs["outputfiles_basename"] 

181 dead_points = np.genfromtxt(dir_ + "/ev.dat") 

182 live_points = np.genfromtxt(dir_ + "/phys_live.points") 

183 

184 nlive = self.kwargs["n_live_points"] 

185 final_log_prior_volume = -len(dead_points) / nlive - np.log(nlive) 

186 live_points = np.insert(live_points, -1, final_log_prior_volume, axis=-1) 

187 

188 nested_samples = pd.DataFrame( 

189 np.vstack([dead_points, live_points]).copy(), 

190 columns=self.search_parameter_keys 

191 + ["log_likelihood", "log_prior_volume", "mode"], 

192 ) 

193 return nested_samples