Coverage for /usr/local/lib/python3.11/dist-packages/grond/optimisers/highscore/optimiser.py: 87%
448 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-10-26 15:57 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2023-10-26 15:57 +0000
1from __future__ import print_function
2import math
3import os.path as op
4import os
5import logging
6import time
7import numpy as num
8from collections import OrderedDict
10from pyrocko.guts import StringChoice, Int, Float, Object, List
11from pyrocko.guts_array import Array
13from grond.meta import GrondError, Forbidden, has_get_plot_classes
14from grond.problems.base import ModelHistory
15from grond.optimisers.base import Optimiser, OptimiserConfig, BadProblem, \
16 OptimiserStatus
18guts_prefix = 'grond'
20logger = logging.getLogger('grond.optimisers.highscore.optimiser')
23def nextpow2(i):
24 return 2**int(math.ceil(math.log(i)/math.log(2.)))
27def excentricity_compensated_probabilities(xs, sbx, factor):
28 inonflat = num.where(sbx != 0.0)[0]
29 scale = num.zeros_like(sbx)
30 scale[inonflat] = 1.0 / (sbx[inonflat] * (factor if factor != 0. else 1.0))
31 distances_sqr_all = num.sum(
32 ((xs[num.newaxis, :, :] - xs[:, num.newaxis, :]) *
33 scale[num.newaxis, num.newaxis, :])**2, axis=2)
34 probabilities = 1.0 / num.sum(distances_sqr_all < 1.0, axis=1)
35 # print(num.sort(num.sum(distances_sqr_all < 1.0, axis=1)))
36 probabilities /= num.sum(probabilities)
37 return probabilities
40def excentricity_compensated_choice(xs, sbx, factor, rstate):
41 probabilities = excentricity_compensated_probabilities(
42 xs, sbx, factor)
43 r = rstate.random_sample()
44 ichoice = num.searchsorted(num.cumsum(probabilities), r)
45 ichoice = min(ichoice, xs.shape[0]-1)
46 return ichoice
49def local_std(xs):
50 ssbx = num.sort(xs, axis=0)
51 dssbx = num.diff(ssbx, axis=0)
52 mdssbx = num.median(dssbx, axis=0)
53 return mdssbx * dssbx.shape[0] / 2.6
56class SamplerDistributionChoice(StringChoice):
57 choices = ['multivariate_normal', 'normal']
60class StandardDeviationEstimatorChoice(StringChoice):
61 choices = [
62 'median_density_single_chain',
63 'standard_deviation_all_chains',
64 'standard_deviation_single_chain']
67class SamplerStartingPointChoice(StringChoice):
68 choices = ['excentricity_compensated', 'random', 'mean']
71class BootstrapTypeChoice(StringChoice):
72 choices = ['bayesian', 'classic']
75def fnone(i):
76 return i if i is not None else -1
79class Sample(Object):
81 '''Sample model with context about how it was generated.'''
83 model = Array.T(shape=(None,), dtype=num.float64, serialize_as='list')
84 iphase = Int.T(optional=True)
85 ichain_base = Int.T(optional=True)
86 ilink_base = Int.T(optional=True)
87 imodel_base = Int.T(optional=True)
89 def preconstrain(self, problem):
90 self.model = problem.preconstrain(self.model)
92 def pack_context(self):
93 i = num.zeros(4, dtype=int)
94 i[:] = (
95 fnone(self.iphase),
96 fnone(self.ichain_base),
97 fnone(self.ilink_base),
98 fnone(self.imodel_base))
100 return i
103class SamplerPhase(Object):
104 niterations = Int.T(
105 help='Number of iteration for this phase.')
106 ntries_preconstrain_limit = Int.T(
107 default=1000,
108 help='Tries to find a valid preconstrained sample.')
109 seed = Int.T(
110 optional=True,
111 help='Random state seed.')
113 def __init__(self, *args, **kwargs):
114 Object.__init__(self, *args, **kwargs)
115 self._rstate = None
117 def get_rstate(self):
118 if self._rstate is None:
119 self._rstate = num.random.RandomState(self.seed)
121 return self._rstate
123 def get_raw_sample(self, problem, iiter, chains):
124 raise NotImplementedError
126 def get_sample(self, problem, iiter, chains):
127 assert 0 <= iiter < self.niterations
129 ntries_preconstrain = 0
130 for ntries_preconstrain in range(self.ntries_preconstrain_limit):
131 try:
132 sample = self.get_raw_sample(problem, iiter, chains)
133 sample.preconstrain(problem)
134 return sample
136 except Forbidden:
137 pass
139 raise GrondError(
140 'could not find any suitable candidate sample within %i tries' % (
141 self.ntries_preconstrain_limit))
144class InjectionSamplerPhase(SamplerPhase):
145 xs_inject = Array.T(
146 dtype=num.float64, shape=(None, None),
147 help='Array with the reference model.')
149 def get_raw_sample(self, problem, iiter, chains):
150 return Sample(model=self.xs_inject[iiter, :])
153class UniformSamplerPhase(SamplerPhase):
155 def get_raw_sample(self, problem, iiter, chains):
156 xbounds = problem.get_parameter_bounds()
157 return Sample(model=problem.random_uniform(xbounds, self.get_rstate()))
160class DirectedSamplerPhase(SamplerPhase):
161 scatter_scale = Float.T(
162 optional=True,
163 help='Scales search radius around the current `highscore` models')
164 scatter_scale_begin = Float.T(
165 optional=True,
166 help='Scaling factor at beginning of the phase.')
167 scatter_scale_end = Float.T(
168 optional=True,
169 help='Scaling factor at the end of the directed phase.')
170 starting_point = SamplerStartingPointChoice.T(
171 default='excentricity_compensated',
172 help='Tunes to the center value of the sampler distribution.'
173 'May increase the likelihood to draw a highscore member model'
174 ' off-center to the mean value')
176 sampler_distribution = SamplerDistributionChoice.T(
177 default='normal',
178 help='Distribution new models are drawn from.')
180 standard_deviation_estimator = StandardDeviationEstimatorChoice.T(
181 default='median_density_single_chain')
183 ntries_sample_limit = Int.T(default=1000)
185 def get_scatter_scale_factor(self, iiter):
186 s = self.scatter_scale
187 sa = self.scatter_scale_begin
188 sb = self.scatter_scale_end
190 assert s is None or (sa is None and sb is None)
192 if sa != sb:
193 tb = float(self.niterations-1)
194 tau = tb/(math.log(sa) - math.log(sb))
195 t0 = math.log(sa) * tau
196 t = float(iiter)
197 return num.exp(-(t-t0) / tau)
199 else:
200 return s or 1.0
202 def get_raw_sample(self, problem, iiter, chains):
203 rstate = self.get_rstate()
204 factor = self.get_scatter_scale_factor(iiter)
205 npar = problem.nparameters
206 pnames = problem.parameter_names
207 xbounds = problem.get_parameter_bounds()
209 ilink_choice = None
210 ichain_choice = num.argmin(chains.accept_sum)
212 if self.starting_point == 'excentricity_compensated':
213 models = chains.models(ichain_choice)
214 ilink_choice = excentricity_compensated_choice(
215 models,
216 chains.standard_deviation_models(
217 ichain_choice, self.standard_deviation_estimator),
218 2., rstate)
220 xchoice = chains.model(ichain_choice, ilink_choice)
222 elif self.starting_point == 'random':
223 ilink_choice = rstate.randint(0, chains.nlinks)
224 xchoice = chains.model(ichain_choice, ilink_choice)
226 elif self.starting_point == 'mean':
227 xchoice = chains.mean_model(ichain_choice)
229 else:
230 assert False, 'invalid starting_point choice: %s' % (
231 self.starting_point)
233 ntries_sample = 0
234 if self.sampler_distribution == 'normal':
235 x = num.zeros(npar, dtype=float)
236 sx = chains.standard_deviation_models(
237 ichain_choice, self.standard_deviation_estimator)
239 for ipar in range(npar):
240 ntries = 0
241 while True:
242 if sx[ipar] > 0.:
243 v = rstate.normal(
244 xchoice[ipar],
245 factor*sx[ipar])
246 else:
247 v = xchoice[ipar]
249 if xbounds[ipar, 0] <= v and \
250 v <= xbounds[ipar, 1]:
252 break
254 if ntries > self.ntries_sample_limit:
255 logger.warning(
256 'failed to produce a suitable '
257 'candidate sample from normal '
258 'distribution for parameter \'%s\''
259 '- drawing from uniform instead.' %
260 pnames[ipar])
261 v = rstate.uniform(xbounds[ipar, 0],
262 xbounds[ipar, 1])
263 break
265 ntries += 1
267 x[ipar] = v
269 elif self.sampler_distribution == 'multivariate_normal':
270 ok_mask_sum = num.zeros(npar, dtype=int)
271 while True:
272 ntries_sample += 1
273 xcandi = rstate.multivariate_normal(
274 xchoice, factor**2 * chains.cov(ichain_choice))
276 ok_mask = num.logical_and(
277 xbounds[:, 0] <= xcandi, xcandi <= xbounds[:, 1])
279 if num.all(ok_mask):
280 break
282 ok_mask_sum += ok_mask
284 if ntries_sample > self.ntries_sample_limit:
285 logger.warning(
286 'failed to produce a suitable candidate '
287 'sample from multivariate normal '
288 'distribution, (%s) - drawing from uniform instead' %
289 ', '.join('%s:%i' % xx for xx in
290 zip(pnames, ok_mask_sum)))
291 xbounds = problem.get_parameter_bounds()
292 xcandi = problem.random_uniform(xbounds, rstate)
293 break
295 x = xcandi
297 imodel_base = None
298 if ilink_choice is not None:
299 imodel_base = chains.imodel(ichain_choice, ilink_choice)
301 return Sample(
302 model=x,
303 ichain_base=ichain_choice,
304 ilink_base=ilink_choice,
305 imodel_base=imodel_base)
308def make_bayesian_weights(nbootstrap, nmisfits,
309 type='bayesian', rstate=None):
310 ws = num.zeros((nbootstrap, nmisfits))
311 if rstate is None:
312 rstate = num.random.RandomState()
314 for ibootstrap in range(nbootstrap):
315 if type == 'classic':
316 ii = rstate.randint(0, nmisfits, size=nmisfits)
317 ws[ibootstrap, :] = num.histogram(
318 ii, nmisfits, (-0.5, nmisfits - 0.5))[0]
319 elif type == 'bayesian':
320 f = rstate.uniform(0., 1., size=nmisfits+1)
321 f[0] = 0.
322 f[-1] = 1.
323 f = num.sort(f)
324 g = f[1:] - f[:-1]
325 ws[ibootstrap, :] = g * nmisfits
326 else:
327 assert False
328 return ws
331class Chains(object):
332 def __init__(
333 self, problem, history, nchains, nlinks_cap):
335 self.problem = problem
336 self.history = history
337 self.nchains = nchains
338 self.nlinks_cap = nlinks_cap
339 self.chains_m = num.zeros(
340 (self.nchains, nlinks_cap), dtype=float)
341 self.chains_i = num.zeros(
342 (self.nchains, nlinks_cap), dtype=int)
343 self.nlinks = 0
344 self.nread = 0
346 self.accept_sum = num.zeros(self.nchains, dtype=int)
347 self._acceptance_history = num.zeros(
348 (self.nchains, 1024), dtype=bool)
350 history.add_listener(self)
352 def goto(self, n=None):
353 if n is None:
354 n = self.history.nmodels
356 n = min(self.history.nmodels, n)
358 assert self.nread <= n
360 while self.nread < n:
361 nread = self.nread
362 gbms = self.history.bootstrap_misfits[nread, :]
364 self.chains_m[:, self.nlinks] = gbms
365 self.chains_i[:, self.nlinks] = nread
366 nbootstrap = self.chains_m.shape[0]
368 self.nlinks += 1
369 chains_m = self.chains_m
370 chains_i = self.chains_i
372 for ichain in range(nbootstrap):
373 isort = num.argsort(chains_m[ichain, :self.nlinks])
374 chains_m[ichain, :self.nlinks] = chains_m[ichain, isort]
375 chains_i[ichain, :self.nlinks] = chains_i[ichain, isort]
377 if self.nlinks == self.nlinks_cap:
378 accept = (chains_i[:, self.nlinks_cap-1] != nread) \
379 .astype(bool)
380 self.nlinks -= 1
381 else:
382 accept = num.ones(self.nchains, dtype=bool)
384 self._append_acceptance(accept)
385 self.accept_sum += accept
386 self.nread += 1
388 def load(self):
389 return self.goto()
391 def extend(self, ioffset, n, models, misfits, sampler_contexts):
392 self.goto(ioffset + n)
394 def indices(self, ichain):
395 if ichain is not None:
396 return self.chains_i[ichain, :self.nlinks]
397 else:
398 return self.chains_i[:, :self.nlinks].ravel()
400 def models(self, ichain=None):
401 return self.history.models[self.indices(ichain), :]
403 def model(self, ichain, ilink):
404 return self.history.models[self.chains_i[ichain, ilink], :]
406 def imodel(self, ichain, ilink):
407 return self.chains_i[ichain, ilink]
409 def misfits(self, ichain=0):
410 return self.chains_m[ichain, :self.nlinks]
412 def misfit(self, ichain, ilink):
413 assert ilink < self.nlinks
414 return self.chains_m[ichain, ilink]
416 def mean_model(self, ichain=None):
417 xs = self.models(ichain)
418 return num.mean(xs, axis=0)
420 def best_model(self, ichain=0):
421 xs = self.models(ichain)
422 return xs[0]
424 def best_model_misfit(self, ichain=0):
425 return self.chains_m[ichain, 0]
427 def standard_deviation_models(self, ichain, estimator):
428 if estimator == 'median_density_single_chain':
429 xs = self.models(ichain)
430 return local_std(xs)
431 elif estimator == 'standard_deviation_all_chains':
432 bxs = self.models()
433 return num.std(bxs, axis=0)
434 elif estimator == 'standard_deviation_single_chain':
435 xs = self.models(ichain)
436 return num.std(xs, axis=0)
437 else:
438 assert False, 'invalid standard_deviation_estimator choice'
440 def covariance_models(self, ichain):
441 xs = self.models(ichain)
442 return num.cov(xs.T)
444 @property
445 def acceptance_history(self):
446 return self._acceptance_history[:, :self.nread]
448 def _append_acceptance(self, acceptance):
449 if self.nread >= self._acceptance_history.shape[1]:
450 new_buf = num.zeros(
451 (self.nchains, nextpow2(self.nread+1)), dtype=bool)
452 new_buf[:, :self._acceptance_history.shape[1]] = \
453 self._acceptance_history
454 self._acceptance_history = new_buf
455 self._acceptance_history[:, self.nread] = acceptance
458@has_get_plot_classes
459class HighScoreOptimiser(Optimiser):
460 '''Monte-Carlo-based directed search optimisation with bootstrap.'''
462 sampler_phases = List.T(SamplerPhase.T())
463 chain_length_factor = Float.T(default=8.)
464 nbootstrap = Int.T(default=100)
465 bootstrap_type = BootstrapTypeChoice.T(default='bayesian')
466 bootstrap_seed = Int.T(default=23)
468 SPARKS = u'\u2581\u2582\u2583\u2584\u2585\u2586\u2587\u2588'
469 ACCEPTANCE_AVG_LEN = 100
471 def __init__(self, **kwargs):
472 Optimiser.__init__(self, **kwargs)
473 self._bootstrap_weights = None
474 self._bootstrap_residuals = None
475 self._correlated_weights = None
476 self._status_chains = None
477 self._rstate_bootstrap = None
479 def get_rstate_bootstrap(self):
480 if self._rstate_bootstrap is None:
481 self._rstate_bootstrap = num.random.RandomState(
482 self.bootstrap_seed)
484 return self._rstate_bootstrap
486 def init_bootstraps(self, problem):
487 self.init_bootstrap_weights(problem)
488 self.init_bootstrap_residuals(problem)
490 def init_bootstrap_weights(self, problem):
491 logger.info('Initializing Bayesian bootstrap weights.')
493 nmisfits_w = sum(
494 t.nmisfits for t in problem.targets if t.can_bootstrap_weights)
496 ws = make_bayesian_weights(
497 self.nbootstrap,
498 nmisfits=nmisfits_w,
499 rstate=self.get_rstate_bootstrap())
501 imf = 0
502 for t in problem.targets:
503 if t.can_bootstrap_weights:
504 t.set_bootstrap_weights(ws[:, imf:imf+t.nmisfits])
505 imf += t.nmisfits
506 else:
507 t.set_bootstrap_weights(
508 num.ones((self.nbootstrap, t.nmisfits)))
510 def init_bootstrap_residuals(self, problem):
511 logger.info('Initializing Bayesian bootstrap residuals.')
513 for t in problem.targets:
514 if t.can_bootstrap_residuals:
515 t.init_bootstrap_residuals(
516 self.nbootstrap, rstate=self.get_rstate_bootstrap())
517 else:
518 t.set_bootstrap_residuals(
519 num.zeros((self.nbootstrap, t.nmisfits)))
521 def get_bootstrap_weights(self, problem):
522 if self._bootstrap_weights is None:
523 try:
524 problem.targets[0].get_bootstrap_weights()
525 except Exception:
526 self.init_bootstraps(problem)
528 bootstrap_weights = num.hstack(
529 [t.get_bootstrap_weights()
530 for t in problem.targets])
532 self._bootstrap_weights = num.vstack((
533 num.ones((1, problem.nmisfits)),
534 bootstrap_weights))
536 return self._bootstrap_weights
538 def get_bootstrap_residuals(self, problem):
539 if self._bootstrap_residuals is None:
540 try:
541 problem.targets[0].get_bootstrap_residuals()
542 except Exception:
543 self.init_bootstraps(problem)
545 bootstrap_residuals = num.hstack(
546 [t.get_bootstrap_residuals()
547 for t in problem.targets])
549 self._bootstrap_residuals = num.vstack((
550 num.zeros((1, problem.nmisfits)),
551 bootstrap_residuals))
553 return self._bootstrap_residuals
555 def get_correlated_weights(self, problem):
556 if self._correlated_weights is None:
557 corr = dict()
558 misfit_idx = num.cumsum(
559 [0.] + [t.nmisfits for t in problem.targets], dtype=int)
561 for it, target in enumerate(problem.targets):
562 weights = target.get_correlated_weights()
563 if weights is None:
564 continue
565 corr[misfit_idx[it]] = weights
567 self._correlated_weights = corr
569 return self._correlated_weights
571 @property
572 def nchains(self):
573 return self.nbootstrap + 1
575 def chains(self, problem, history):
576 nlinks_cap = int(round(
577 self.chain_length_factor * problem.nparameters + 1))
579 return Chains(
580 problem, history,
581 nchains=self.nchains, nlinks_cap=nlinks_cap)
583 def get_sampler_phase(self, iiter):
584 niter = 0
585 for iphase, phase in enumerate(self.sampler_phases):
586 if iiter < niter + phase.niterations:
587 return iphase, phase, iiter - niter
589 niter += phase.niterations
591 assert False, 'sample out of bounds'
593 def log_progress(self, problem, iiter, niter, phase, iiter_phase):
594 t = time.time()
595 if self._tlog_last < t - 10. \
596 or iiter_phase == 0 \
597 or iiter_phase == phase.niterations - 1:
599 logger.info(
600 '%s at %i/%i (%s, %i/%i)' % (
601 problem.name,
602 iiter+1, niter,
603 phase.__class__.__name__, iiter_phase, phase.niterations))
605 self._tlog_last = t
607 def optimise(self, problem, rundir=None):
608 if rundir is not None:
609 self.dump(filename=op.join(rundir, 'optimiser.yaml'))
611 history = ModelHistory(problem,
612 nchains=self.nchains,
613 path=rundir, mode='w')
614 chains = self.chains(problem, history)
616 niter = self.niterations
617 isbad_mask = None
618 self._tlog_last = 0
619 for iiter in range(niter):
620 iphase, phase, iiter_phase = self.get_sampler_phase(iiter)
621 self.log_progress(problem, iiter, niter, phase, iiter_phase)
623 sample = phase.get_sample(problem, iiter_phase, chains)
624 sample.iphase = iphase
626 if isbad_mask is not None and num.any(isbad_mask):
627 isok_mask = num.logical_not(isbad_mask)
628 else:
629 isok_mask = None
631 misfits = problem.misfits(sample.model, mask=isok_mask)
633 bootstrap_misfits = problem.combine_misfits(
634 misfits,
635 extra_weights=self.get_bootstrap_weights(problem),
636 extra_residuals=self.get_bootstrap_residuals(problem),
637 extra_correlated_weights=self.get_correlated_weights(problem))
638 isbad_mask_new = num.isnan(misfits[:, 0])
639 if isbad_mask is not None and num.any(
640 isbad_mask != isbad_mask_new):
642 errmess = [
643 'problem %s: inconsistency in data availability'
644 ' at iteration %i' %
645 (problem.name, iiter)]
647 for target, isbad_new, isbad in zip(
648 problem.targets, isbad_mask_new, isbad_mask):
650 if isbad_new != isbad:
651 errmess.append(' %s, %s -> %s' % (
652 target.string_id(), isbad, isbad_new))
654 raise BadProblem('\n'.join(errmess))
656 isbad_mask = isbad_mask_new
658 if num.all(isbad_mask):
659 raise BadProblem(
660 'Problem %s: all target misfit values are NaN.'
661 % problem.name)
663 history.append(
664 sample.model, misfits,
665 bootstrap_misfits,
666 sample.pack_context())
668 @property
669 def niterations(self):
670 return sum([ph.niterations for ph in self.sampler_phases])
672 def get_status(self, history):
673 if self._status_chains is None:
674 self._status_chains = self.chains(history.problem, history)
676 self._status_chains.goto(history.nmodels)
678 chains = self._status_chains
679 problem = history.problem
681 row_names = [p.name_nogroups for p in problem.parameters]
682 row_names.append('Misfit')
684 def colum_array(data):
685 arr = num.full(len(row_names), fill_value=num.nan)
686 arr[:data.size] = data
687 return arr
689 phase = self.get_sampler_phase(history.nmodels-1)[1]
691 bs_mean = colum_array(chains.mean_model(ichain=None))
692 bs_std = colum_array(chains.standard_deviation_models(
693 ichain=None, estimator='standard_deviation_all_chains'))
695 glob_mean = colum_array(chains.mean_model(ichain=0))
696 glob_mean[-1] = num.mean(chains.misfits(ichain=0))
698 glob_std = colum_array(chains.standard_deviation_models(
699 ichain=0, estimator='standard_deviation_single_chain'))
700 glob_std[-1] = num.std(chains.misfits(ichain=0))
702 glob_best = colum_array(chains.best_model(ichain=0))
703 glob_best[-1] = chains.best_model_misfit()
705 glob_misfits = chains.misfits(ichain=0)
707 acceptance_latest = chains.acceptance_history[
708 :, -min(chains.acceptance_history.shape[1], self.ACCEPTANCE_AVG_LEN):] # noqa
709 acceptance_avg = acceptance_latest.mean(axis=1)
711 def spark_plot(data, bins):
712 hist, _ = num.histogram(data, bins)
713 hist_max = num.max(hist)
714 if hist_max == 0.0:
715 hist_max = 1.0
716 hist = hist / hist_max
717 vec = num.digitize(hist, num.linspace(0., 1., len(self.SPARKS)))
718 return ''.join([self.SPARKS[b-1] for b in vec])
720 return OptimiserStatus(
721 row_names=row_names,
722 column_data=OrderedDict(
723 zip(['BS mean', 'BS std',
724 'Glob mean', 'Glob std', 'Glob best'],
725 [bs_mean, bs_std, glob_mean, glob_std, glob_best])),
726 extra_header= # noqa
727 u'Optimiser phase: {phase}, exploring {nchains} BS chains\n' # noqa
728 u'Global chain misfit distribution: \u2080{mf_dist}\xb9\n'
729 u'Acceptance rate distribution: \u2080{acceptance}'
730 u'\u2081\u2080\u2080\ufe6a (Median {acceptance_med:.1f}%)'
731 .format(
732 phase=phase.__class__.__name__,
733 nchains=chains.nchains,
734 mf_dist=spark_plot(
735 glob_misfits, num.linspace(0., 1., 25)),
736 acceptance=spark_plot(
737 acceptance_avg,
738 num.linspace(0., 1., 25)),
739 acceptance_med=num.median(acceptance_avg) * 100.
740 ))
742 def get_movie_maker(
743 self, problem, history, xpar_name, ypar_name, movie_filename):
745 from . import plot
746 return plot.HighScoreOptimiserPlot(
747 self, problem, history, xpar_name, ypar_name, movie_filename)
749 @classmethod
750 def get_plot_classes(cls):
751 from .plot import HighScoreAcceptancePlot
752 plots = Optimiser.get_plot_classes()
753 plots.append(HighScoreAcceptancePlot)
754 return plots
757class HighScoreOptimiserConfig(OptimiserConfig):
759 sampler_phases = List.T(
760 SamplerPhase.T(),
761 default=[UniformSamplerPhase(niterations=1000),
762 DirectedSamplerPhase(niterations=5000)],
763 help='Stages of the sampler: Start with uniform sampling of the model'
764 ' model space and narrow down through directed sampling.')
765 chain_length_factor = Float.T(
766 default=8.,
767 help='Controls the length of each chain: '
768 'chain_length_factor * nparameters + 1')
769 nbootstrap = Int.T(
770 default=100,
771 help='Number of bootstrap realisations to be tracked simultaneously in'
772 ' the optimisation.')
774 def get_optimiser(self):
775 return HighScoreOptimiser(
776 sampler_phases=list(self.sampler_phases),
777 chain_length_factor=self.chain_length_factor,
778 nbootstrap=self.nbootstrap)
781def load_optimiser_history(dirname, problem):
782 fn = op.join(dirname, 'accepted')
783 with open(fn, 'r') as f:
784 nmodels = os.fstat(f.fileno()).st_size // (problem.nbootstrap+1)
785 data1 = num.fromfile(
786 f,
787 dtype='<i1',
788 count=nmodels*(problem.nbootstrap+1)).astype(bool)
790 accepted = data1.reshape((nmodels, problem.nbootstrap+1))
792 fn = op.join(dirname, 'choices')
793 with open(fn, 'r') as f:
794 data2 = num.fromfile(
795 f,
796 dtype='<i8',
797 count=nmodels*2).astype(num.int64)
799 ibootstrap_choices, imodel_choices = data2.reshape((nmodels, 2)).T
800 return ibootstrap_choices, imodel_choices, accepted
803__all__ = '''
804 SamplerDistributionChoice
805 StandardDeviationEstimatorChoice
806 SamplerPhase
807 InjectionSamplerPhase
808 UniformSamplerPhase
809 DirectedSamplerPhase
810 Chains
811 HighScoreOptimiserConfig
812 HighScoreOptimiser
813'''.split()