1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5from __future__ import absolute_import, division, print_function
7from collections import defaultdict
8from functools import cmp_to_key
9import time
10import math
11import os
12import re
13import logging
14try:
15 import resource
16except ImportError:
17 resource = None
19import numpy as num
21from pyrocko.guts import (Object, Float, String, StringChoice, List,
22 Timestamp, Int, SObject, ArgumentError, Dict,
23 ValidationError)
24from pyrocko.guts_array import Array
26from pyrocko import moment_tensor as pmt
27from pyrocko import trace, util, config, model
28from pyrocko.orthodrome import ne_to_latlon
29from pyrocko.model import Location
31from . import meta, store, ws
32from .targets import Target, StaticTarget, SatelliteTarget
34pjoin = os.path.join
36guts_prefix = 'pf'
38d2r = math.pi / 180.
40logger = logging.getLogger('pyrocko.gf.seismosizer')
43def cmp_none_aware(a, b):
44 if isinstance(a, tuple) and isinstance(b, tuple):
45 for xa, xb in zip(a, b):
46 rv = cmp_none_aware(xa, xb)
47 if rv != 0:
48 return rv
50 return 0
52 anone = a is None
53 bnone = b is None
55 if anone and bnone:
56 return 0
58 if anone:
59 return -1
61 if bnone:
62 return 1
64 return bool(a > b) - bool(a < b)
67def xtime():
68 return time.time()
71class SeismosizerError(Exception):
72 pass
75class BadRequest(SeismosizerError):
76 pass
79class DuplicateStoreId(Exception):
80 pass
83class NoDefaultStoreSet(Exception):
84 pass
87class ConversionError(Exception):
88 pass
91class NoSuchStore(BadRequest):
93 def __init__(self, store_id=None, dirs=None):
94 BadRequest.__init__(self)
95 self.store_id = store_id
96 self.dirs = dirs
98 def __str__(self):
99 if self.store_id is not None:
100 rstr = 'no GF store with id "%s" found.' % self.store_id
101 else:
102 rstr = 'GF store not found.'
104 if self.dirs is not None:
105 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
106 return rstr
109def ufloat(s):
110 units = {
111 'k': 1e3,
112 'M': 1e6,
113 }
115 factor = 1.0
116 if s and s[-1] in units:
117 factor = units[s[-1]]
118 s = s[:-1]
119 if not s:
120 raise ValueError('unit without a number: \'%s\'' % s)
122 return float(s) * factor
125def ufloat_or_none(s):
126 if s:
127 return ufloat(s)
128 else:
129 return None
132def int_or_none(s):
133 if s:
134 return int(s)
135 else:
136 return None
139def nonzero(x, eps=1e-15):
140 return abs(x) > eps
143def permudef(ln, j=0):
144 if j < len(ln):
145 k, v = ln[j]
146 for y in v:
147 ln[j] = k, y
148 for s in permudef(ln, j + 1):
149 yield s
151 ln[j] = k, v
152 return
153 else:
154 yield ln
157def arr(x):
158 return num.atleast_1d(num.asarray(x))
161def discretize_rect_source(deltas, deltat, time, north, east, depth,
162 strike, dip, length, width,
163 anchor, velocity, stf=None,
164 nucleation_x=None, nucleation_y=None,
165 decimation_factor=1):
167 if stf is None:
168 stf = STF()
170 mindeltagf = num.min(deltas)
171 mindeltagf = min(mindeltagf, deltat * velocity)
173 ln = length
174 wd = width
176 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
177 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
179 n = int(nl * nw)
181 dl = ln / nl
182 dw = wd / nw
184 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
185 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
187 points = num.empty((n, 3), dtype=float)
188 points[:, 0] = num.tile(xl, nw)
189 points[:, 1] = num.repeat(xw, nl)
190 points[:, 2] = 0.0
192 if nucleation_x is not None:
193 dist_x = num.abs(nucleation_x - points[:, 0])
194 else:
195 dist_x = num.zeros(n)
197 if nucleation_y is not None:
198 dist_y = num.abs(nucleation_y - points[:, 1])
199 else:
200 dist_y = num.zeros(n)
202 dist = num.sqrt(dist_x**2 + dist_y**2)
203 times = dist / velocity
205 anch_x, anch_y = map_anchor[anchor]
207 points[:, 0] -= anch_x * 0.5 * length
208 points[:, 1] -= anch_y * 0.5 * width
210 rotmat = num.asarray(
211 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
213 points = num.dot(rotmat.T, points.T).T
215 xtau, amplitudes = stf.discretize_t(deltat, time)
216 nt = xtau.size
218 points2 = num.repeat(points, nt, axis=0)
219 times2 = num.repeat(times, nt) + num.tile(xtau, n)
220 amplitudes2 = num.tile(amplitudes, n)
222 points2[:, 0] += north
223 points2[:, 1] += east
224 points2[:, 2] += depth
226 return points2, times2, amplitudes2, dl, dw, nl, nw
229def check_rect_source_discretisation(points2, nl, nw, store):
230 # We assume a non-rotated fault plane
231 N_CRITICAL = 8
232 points = points2.T.reshape((3, nl, nw))
233 if points.size <= N_CRITICAL:
234 logger.warning('RectangularSource is defined by only %d sub-sources!'
235 % points.size)
236 return True
238 distances = num.sqrt(
239 (points[0, 0, :] - points[0, 1, :])**2 +
240 (points[1, 0, :] - points[1, 1, :])**2 +
241 (points[2, 0, :] - points[2, 1, :])**2)
243 depths = points[2, 0, :]
244 vs_profile = store.config.get_vs(
245 lat=0., lon=0.,
246 points=num.repeat(depths[:, num.newaxis], 3, axis=1),
247 interpolation='multilinear')
249 min_wavelength = vs_profile * (store.config.deltat * 2)
250 if not num.all(min_wavelength > distances/2):
251 return False
252 return True
255def outline_rect_source(strike, dip, length, width, anchor):
256 ln = length
257 wd = width
258 points = num.array(
259 [[-0.5 * ln, -0.5 * wd, 0.],
260 [0.5 * ln, -0.5 * wd, 0.],
261 [0.5 * ln, 0.5 * wd, 0.],
262 [-0.5 * ln, 0.5 * wd, 0.],
263 [-0.5 * ln, -0.5 * wd, 0.]])
265 anch_x, anch_y = map_anchor[anchor]
266 points[:, 0] -= anch_x * 0.5 * length
267 points[:, 1] -= anch_y * 0.5 * width
269 rotmat = num.asarray(
270 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
272 return num.dot(rotmat.T, points.T).T
275def from_plane_coords(
276 strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
277 lat=0., lon=0.,
278 north_shift=0, east_shift=0,
279 anchor='top', cs='xy'):
281 ln = length
282 wd = width
283 x_abs = []
284 y_abs = []
285 if not isinstance(x_plane_coords, list):
286 x_plane_coords = [x_plane_coords]
287 y_plane_coords = [y_plane_coords]
289 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
290 points = num.array(
291 [[-0.5 * ln*x_plane, -0.5 * wd*y_plane, 0.],
292 [0.5 * ln*x_plane, -0.5 * wd*y_plane, 0.],
293 [0.5 * ln*x_plane, 0.5 * wd*y_plane, 0.],
294 [-0.5 * ln*x_plane, 0.5 * wd*y_plane, 0.],
295 [-0.5 * ln*x_plane, -0.5 * wd*y_plane, 0.]])
297 anch_x, anch_y = map_anchor[anchor]
298 points[:, 0] -= anch_x * 0.5 * length
299 points[:, 1] -= anch_y * 0.5 * width
301 rotmat = num.asarray(
302 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
304 points = num.dot(rotmat.T, points.T).T
305 points[:, 0] += north_shift
306 points[:, 1] += east_shift
307 points[:, 2] += depth
308 if cs in ('latlon', 'lonlat'):
309 latlon = ne_to_latlon(lat, lon,
310 points[:, 0], points[:, 1])
311 latlon = num.array(latlon).T
312 x_abs.append(latlon[1:2, 1])
313 y_abs.append(latlon[2:3, 0])
314 if cs == 'xy':
315 x_abs.append(points[1:2, 1])
316 y_abs.append(points[2:3, 0])
318 if cs == 'lonlat':
319 return y_abs, x_abs
320 else:
321 return x_abs, y_abs
324class InvalidGridDef(Exception):
325 pass
328class Range(SObject):
329 '''
330 Convenient range specification.
332 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
334 Range('0 .. 10k : 1k')
335 Range(start=0., stop=10e3, step=1e3)
336 Range(0, 10e3, 1e3)
337 Range('0 .. 10k @ 11')
338 Range(start=0., stop=10*km, n=11)
340 Range(0, 10e3, n=11)
341 Range(values=[x*1e3 for x in range(11)])
343 Depending on the use context, it can be possible to omit any part of the
344 specification. E.g. in the context of extracting a subset of an already
345 existing range, the existing range's specification values would be filled
346 in where missing.
348 The values are distributed with equal spacing, unless the ``spacing``
349 argument is modified. The values can be created offset or relative to an
350 external base value with the ``relative`` argument if the use context
351 supports this.
353 The range specification can be expressed with a short string
354 representation::
356 'start .. stop @ num | spacing, relative'
357 'start .. stop : step | spacing, relative'
359 most parts of the expression can be omitted if not needed. Whitespace is
360 allowed for readability but can also be omitted.
361 '''
363 start = Float.T(optional=True)
364 stop = Float.T(optional=True)
365 step = Float.T(optional=True)
366 n = Int.T(optional=True)
367 values = Array.T(optional=True, dtype=float, shape=(None,))
369 spacing = StringChoice.T(
370 choices=['lin', 'log', 'symlog'],
371 default='lin',
372 optional=True)
374 relative = StringChoice.T(
375 choices=['', 'add', 'mult'],
376 default='',
377 optional=True)
379 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
380 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
381 r'(\|(?P<stuff>.+))?$')
383 def __init__(self, *args, **kwargs):
384 d = {}
385 if len(args) == 1:
386 d = self.parse(args[0])
387 elif len(args) in (2, 3):
388 d['start'], d['stop'] = [float(x) for x in args[:2]]
389 if len(args) == 3:
390 d['step'] = float(args[2])
392 for k, v in kwargs.items():
393 if k in d:
394 raise ArgumentError('%s specified more than once' % k)
396 d[k] = v
398 SObject.__init__(self, **d)
400 def __str__(self):
401 def sfloat(x):
402 if x is not None:
403 return '%g' % x
404 else:
405 return ''
407 if self.values:
408 return ','.join('%g' % x for x in self.values)
410 if self.start is None and self.stop is None:
411 s0 = ''
412 else:
413 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
415 s1 = ''
416 if self.step is not None:
417 s1 = [' : %g', ':%g'][s0 == ''] % self.step
418 elif self.n is not None:
419 s1 = [' @ %i', '@%i'][s0 == ''] % self.n
421 if self.spacing == 'lin' and self.relative == '':
422 s2 = ''
423 else:
424 x = []
425 if self.spacing != 'lin':
426 x.append(self.spacing)
428 if self.relative != '':
429 x.append(self.relative)
431 s2 = ' | %s' % ','.join(x)
433 return s0 + s1 + s2
435 @classmethod
436 def parse(cls, s):
437 s = re.sub(r'\s+', '', s)
438 m = cls.pattern.match(s)
439 if not m:
440 try:
441 vals = [ufloat(x) for x in s.split(',')]
442 except Exception:
443 raise InvalidGridDef(
444 '"%s" is not a valid range specification' % s)
446 return dict(values=num.array(vals, dtype=float))
448 d = m.groupdict()
449 try:
450 start = ufloat_or_none(d['start'])
451 stop = ufloat_or_none(d['stop'])
452 step = ufloat_or_none(d['step'])
453 n = int_or_none(d['n'])
454 except Exception:
455 raise InvalidGridDef(
456 '"%s" is not a valid range specification' % s)
458 spacing = 'lin'
459 relative = ''
461 if d['stuff'] is not None:
462 t = d['stuff'].split(',')
463 for x in t:
464 if x in cls.spacing.choices:
465 spacing = x
466 elif x and x in cls.relative.choices:
467 relative = x
468 else:
469 raise InvalidGridDef(
470 '"%s" is not a valid range specification' % s)
472 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
473 relative=relative)
475 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
476 if self.values:
477 return self.values
479 start = self.start
480 stop = self.stop
481 step = self.step
482 n = self.n
484 swap = step is not None and step < 0.
485 if start is None:
486 start = [mi, ma][swap]
487 if stop is None:
488 stop = [ma, mi][swap]
489 if step is None and inc is not None:
490 step = [inc, -inc][ma < mi]
492 if start is None or stop is None:
493 raise InvalidGridDef(
494 'Cannot use range specification "%s" without start '
495 'and stop in this context' % self)
497 if step is None and n is None:
498 step = stop - start
500 if n is None:
501 if (step < 0) != (stop - start < 0):
502 raise InvalidGridDef(
503 'Range specification "%s" has inconsistent ordering '
504 '(step < 0 => stop > start)' % self)
506 n = int(round((stop - start) / step)) + 1
507 stop2 = start + (n - 1) * step
508 if abs(stop - stop2) > eps:
509 n = int(math.floor((stop - start) / step)) + 1
510 stop = start + (n - 1) * step
511 else:
512 stop = stop2
514 if start == stop:
515 n = 1
517 if self.spacing == 'lin':
518 vals = num.linspace(start, stop, n)
520 elif self.spacing in ('log', 'symlog'):
521 if start > 0. and stop > 0.:
522 vals = num.exp(num.linspace(num.log(start),
523 num.log(stop), n))
524 elif start < 0. and stop < 0.:
525 vals = -num.exp(num.linspace(num.log(-start),
526 num.log(-stop), n))
527 else:
528 raise InvalidGridDef(
529 'Log ranges should not include or cross zero '
530 '(in range specification "%s").' % self)
532 if self.spacing == 'symlog':
533 nvals = - vals
534 vals = num.concatenate((nvals[::-1], vals))
536 if self.relative in ('add', 'mult') and base is None:
537 raise InvalidGridDef(
538 'Cannot use relative range specification in this context.')
540 vals = self.make_relative(base, vals)
542 return list(map(float, vals))
544 def make_relative(self, base, vals):
545 if self.relative == 'add':
546 vals += base
548 if self.relative == 'mult':
549 vals *= base
551 return vals
554class GridDefElement(Object):
556 param = meta.StringID.T()
557 rs = Range.T()
559 def __init__(self, shorthand=None, **kwargs):
560 if shorthand is not None:
561 t = shorthand.split('=')
562 if len(t) != 2:
563 raise InvalidGridDef(
564 'Invalid grid specification element: %s' % shorthand)
566 sp, sr = t[0].strip(), t[1].strip()
568 kwargs['param'] = sp
569 kwargs['rs'] = Range(sr)
571 Object.__init__(self, **kwargs)
573 def shorthand(self):
574 return self.param + ' = ' + str(self.rs)
577class GridDef(Object):
579 elements = List.T(GridDefElement.T())
581 def __init__(self, shorthand=None, **kwargs):
582 if shorthand is not None:
583 t = shorthand.splitlines()
584 tt = []
585 for x in t:
586 x = x.strip()
587 if x:
588 tt.extend(x.split(';'))
590 elements = []
591 for se in tt:
592 elements.append(GridDef(se))
594 kwargs['elements'] = elements
596 Object.__init__(self, **kwargs)
598 def shorthand(self):
599 return '; '.join(str(x) for x in self.elements)
602class Cloneable(object):
604 def __iter__(self):
605 return iter(self.T.propnames)
607 def __getitem__(self, k):
608 if k not in self.keys():
609 raise KeyError(k)
611 return getattr(self, k)
613 def __setitem__(self, k, v):
614 if k not in self.keys():
615 raise KeyError(k)
617 return setattr(self, k, v)
619 def clone(self, **kwargs):
620 '''
621 Make a copy of the object.
623 A new object of the same class is created and initialized with the
624 parameters of the object on which this method is called on. If
625 ``kwargs`` are given, these are used to override any of the
626 initialization parameters.
627 '''
629 d = dict(self)
630 for k in d:
631 v = d[k]
632 if isinstance(v, Cloneable):
633 d[k] = v.clone()
635 d.update(kwargs)
636 return self.__class__(**d)
638 @classmethod
639 def keys(cls):
640 '''
641 Get list of the source model's parameter names.
642 '''
644 return cls.T.propnames
647class STF(Object, Cloneable):
649 '''
650 Base class for source time functions.
651 '''
653 def __init__(self, effective_duration=None, **kwargs):
654 if effective_duration is not None:
655 kwargs['duration'] = effective_duration / \
656 self.factor_duration_to_effective()
658 Object.__init__(self, **kwargs)
660 @classmethod
661 def factor_duration_to_effective(cls):
662 return 1.0
664 def centroid_time(self, tref):
665 return tref
667 @property
668 def effective_duration(self):
669 return self.duration * self.factor_duration_to_effective()
671 def discretize_t(self, deltat, tref):
672 tl = math.floor(tref / deltat) * deltat
673 th = math.ceil(tref / deltat) * deltat
674 if tl == th:
675 return num.array([tl], dtype=float), num.ones(1)
676 else:
677 return (
678 num.array([tl, th], dtype=float),
679 num.array([th - tref, tref - tl], dtype=float) / deltat)
681 def base_key(self):
682 return (type(self).__name__,)
685g_unit_pulse = STF()
688def sshift(times, amplitudes, tshift, deltat):
690 t0 = math.floor(tshift / deltat) * deltat
691 t1 = math.ceil(tshift / deltat) * deltat
692 if t0 == t1:
693 return times, amplitudes
695 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float)
697 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
698 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
700 times2 = num.arange(times.size + 1, dtype=float) * \
701 deltat + times[0] + t0
703 return times2, amplitudes2
706class BoxcarSTF(STF):
708 '''
709 Boxcar type source time function.
711 .. figure :: /static/stf-BoxcarSTF.svg
712 :width: 40%
713 :align: center
714 :alt: boxcar source time function
715 '''
717 duration = Float.T(
718 default=0.0,
719 help='duration of the boxcar')
721 anchor = Float.T(
722 default=0.0,
723 help='anchor point with respect to source.time: ('
724 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
725 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
726 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
728 @classmethod
729 def factor_duration_to_effective(cls):
730 return 1.0
732 def centroid_time(self, tref):
733 return tref - 0.5 * self.duration * self.anchor
735 def discretize_t(self, deltat, tref):
736 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
737 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
738 tmin = round(tmin_stf / deltat) * deltat
739 tmax = round(tmax_stf / deltat) * deltat
740 nt = int(round((tmax - tmin) / deltat)) + 1
741 times = num.linspace(tmin, tmax, nt)
742 amplitudes = num.ones_like(times)
743 if times.size > 1:
744 t_edges = num.linspace(
745 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
746 t = tmin_stf + self.duration * num.array(
747 [0.0, 0.0, 1.0, 1.0], dtype=float)
748 f = num.array([0., 1., 1., 0.], dtype=float)
749 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
750 amplitudes /= num.sum(amplitudes)
752 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
754 return sshift(times, amplitudes, -tshift, deltat)
756 def base_key(self):
757 return (type(self).__name__, self.duration, self.anchor)
760class TriangularSTF(STF):
762 '''
763 Triangular type source time function.
765 .. figure :: /static/stf-TriangularSTF.svg
766 :width: 40%
767 :align: center
768 :alt: triangular source time function
769 '''
771 duration = Float.T(
772 default=0.0,
773 help='baseline of the triangle')
775 peak_ratio = Float.T(
776 default=0.5,
777 help='fraction of time compared to duration, '
778 'when the maximum amplitude is reached')
780 anchor = Float.T(
781 default=0.0,
782 help='anchor point with respect to source.time: ('
783 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
784 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
785 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
787 @classmethod
788 def factor_duration_to_effective(cls, peak_ratio=None):
789 if peak_ratio is None:
790 peak_ratio = cls.peak_ratio.default()
792 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
794 def __init__(self, effective_duration=None, **kwargs):
795 if effective_duration is not None:
796 kwargs['duration'] = effective_duration / \
797 self.factor_duration_to_effective(
798 kwargs.get('peak_ratio', None))
800 STF.__init__(self, **kwargs)
802 @property
803 def centroid_ratio(self):
804 ra = self.peak_ratio
805 rb = 1.0 - ra
806 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
808 def centroid_time(self, tref):
809 ca = self.centroid_ratio
810 cb = 1.0 - ca
811 if self.anchor <= 0.:
812 return tref - ca * self.duration * self.anchor
813 else:
814 return tref - cb * self.duration * self.anchor
816 @property
817 def effective_duration(self):
818 return self.duration * self.factor_duration_to_effective(
819 self.peak_ratio)
821 def tminmax_stf(self, tref):
822 ca = self.centroid_ratio
823 cb = 1.0 - ca
824 if self.anchor <= 0.:
825 tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
826 tmax_stf = tmin_stf + self.duration
827 else:
828 tmax_stf = tref + cb * self.duration * (1. - self.anchor)
829 tmin_stf = tmax_stf - self.duration
831 return tmin_stf, tmax_stf
833 def discretize_t(self, deltat, tref):
834 tmin_stf, tmax_stf = self.tminmax_stf(tref)
836 tmin = round(tmin_stf / deltat) * deltat
837 tmax = round(tmax_stf / deltat) * deltat
838 nt = int(round((tmax - tmin) / deltat)) + 1
839 if nt > 1:
840 t_edges = num.linspace(
841 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
842 t = tmin_stf + self.duration * num.array(
843 [0.0, self.peak_ratio, 1.0], dtype=float)
844 f = num.array([0., 1., 0.], dtype=float)
845 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
846 amplitudes /= num.sum(amplitudes)
847 else:
848 amplitudes = num.ones(1)
850 times = num.linspace(tmin, tmax, nt)
851 return times, amplitudes
853 def base_key(self):
854 return (
855 type(self).__name__, self.duration, self.peak_ratio, self.anchor)
858class HalfSinusoidSTF(STF):
860 '''
861 Half sinusoid type source time function.
863 .. figure :: /static/stf-HalfSinusoidSTF.svg
864 :width: 40%
865 :align: center
866 :alt: half-sinusouid source time function
867 '''
869 duration = Float.T(
870 default=0.0,
871 help='duration of the half-sinusoid (baseline)')
873 anchor = Float.T(
874 default=0.0,
875 help='anchor point with respect to source.time: ('
876 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
877 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
878 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
880 exponent = Int.T(
881 default=1,
882 help='set to 2 to use square of the half-period sinusoidal function.')
884 def __init__(self, effective_duration=None, **kwargs):
885 if effective_duration is not None:
886 kwargs['duration'] = effective_duration / \
887 self.factor_duration_to_effective(
888 kwargs.get('exponent', 1))
890 STF.__init__(self, **kwargs)
892 @classmethod
893 def factor_duration_to_effective(cls, exponent):
894 if exponent == 1:
895 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
896 elif exponent == 2:
897 return math.sqrt(math.pi**2 - 6) / math.pi
898 else:
899 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
901 @property
902 def effective_duration(self):
903 return self.duration * self.factor_duration_to_effective(self.exponent)
905 def centroid_time(self, tref):
906 return tref - 0.5 * self.duration * self.anchor
908 def discretize_t(self, deltat, tref):
909 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
910 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
911 tmin = round(tmin_stf / deltat) * deltat
912 tmax = round(tmax_stf / deltat) * deltat
913 nt = int(round((tmax - tmin) / deltat)) + 1
914 if nt > 1:
915 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
916 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
918 if self.exponent == 1:
919 fint = -num.cos(
920 (t_edges - tmin_stf) * (math.pi / self.duration))
922 elif self.exponent == 2:
923 fint = (t_edges - tmin_stf) / self.duration \
924 - 1.0 / (2.0 * math.pi) * num.sin(
925 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
926 else:
927 raise ValueError(
928 'Exponent for HalfSinusoidSTF must be 1 or 2.')
930 amplitudes = fint[1:] - fint[:-1]
931 amplitudes /= num.sum(amplitudes)
932 else:
933 amplitudes = num.ones(1)
935 times = num.linspace(tmin, tmax, nt)
936 return times, amplitudes
938 def base_key(self):
939 return (type(self).__name__, self.duration, self.anchor)
942class SmoothRampSTF(STF):
943 '''
944 Smooth-ramp type source time function for near-field displacement.
945 Based on moment function of double-couple point source proposed by Bruestle
946 and Mueller (PEPI, 1983).
948 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
949 earthquakes from Love-wave modelling for regional distances, PEPI 32,
950 312-324.
952 .. figure :: /static/stf-SmoothRampSTF.svg
953 :width: 40%
954 :alt: smooth ramp source time function
955 '''
956 duration = Float.T(
957 default=0.0,
958 help='duration of the ramp (baseline)')
960 rise_ratio = Float.T(
961 default=0.5,
962 help='fraction of time compared to duration, '
963 'when the maximum amplitude is reached')
965 anchor = Float.T(
966 default=0.0,
967 help='anchor point with respect to source.time: ('
968 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
969 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
970 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
972 def discretize_t(self, deltat, tref):
973 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
974 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
975 tmin = round(tmin_stf / deltat) * deltat
976 tmax = round(tmax_stf / deltat) * deltat
977 D = round((tmax - tmin) / deltat) * deltat
978 nt = int(round(D / deltat)) + 1
979 times = num.linspace(tmin, tmax, nt)
980 if nt > 1:
981 rise_time = self.rise_ratio * self.duration
982 amplitudes = num.ones_like(times)
983 tp = tmin + rise_time
984 ii = num.where(times <= tp)
985 t_inc = times[ii]
986 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
987 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
988 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
990 amplitudes /= num.sum(amplitudes)
991 else:
992 amplitudes = num.ones(1)
994 return times, amplitudes
996 def base_key(self):
997 return (type(self).__name__,
998 self.duration, self.rise_ratio, self.anchor)
1001class ResonatorSTF(STF):
1002 '''
1003 Simple resonator like source time function.
1005 .. math ::
1007 f(t) = 0 for t < 0
1008 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
1011 .. figure :: /static/stf-SmoothRampSTF.svg
1012 :width: 40%
1013 :alt: smooth ramp source time function
1015 '''
1017 duration = Float.T(
1018 default=0.0,
1019 help='decay time')
1021 frequency = Float.T(
1022 default=1.0,
1023 help='resonance frequency')
1025 def discretize_t(self, deltat, tref):
1026 tmin_stf = tref
1027 tmax_stf = tref + self.duration * 3
1028 tmin = math.floor(tmin_stf / deltat) * deltat
1029 tmax = math.ceil(tmax_stf / deltat) * deltat
1030 times = util.arange2(tmin, tmax, deltat)
1031 amplitudes = num.exp(-(times-tref)/self.duration) \
1032 * num.sin(2.0 * num.pi * self.frequency * (times-tref))
1034 return times, amplitudes
1036 def base_key(self):
1037 return (type(self).__name__,
1038 self.duration, self.frequency)
1041class STFMode(StringChoice):
1042 choices = ['pre', 'post']
1045class Source(Location, Cloneable):
1046 '''
1047 Base class for all source models.
1048 '''
1050 name = String.T(optional=True, default='')
1052 time = Timestamp.T(
1053 default=Timestamp.D('1970-01-01 00:00:00'),
1054 help='source origin time.')
1056 stf = STF.T(
1057 optional=True,
1058 help='source time function.')
1060 stf_mode = STFMode.T(
1061 default='post',
1062 help='whether to apply source time function in pre or '
1063 'post-processing.')
1065 def __init__(self, **kwargs):
1066 Location.__init__(self, **kwargs)
1068 def update(self, **kwargs):
1069 '''
1070 Change some of the source models parameters.
1072 Example::
1074 >>> from pyrocko import gf
1075 >>> s = gf.DCSource()
1076 >>> s.update(strike=66., dip=33.)
1077 >>> print s
1078 --- !pf.DCSource
1079 depth: 0.0
1080 time: 1970-01-01 00:00:00
1081 magnitude: 6.0
1082 strike: 66.0
1083 dip: 33.0
1084 rake: 0.0
1086 '''
1088 for (k, v) in kwargs.items():
1089 self[k] = v
1091 def grid(self, **variables):
1092 '''
1093 Create grid of source model variations.
1095 :returns: :py:class:`SourceGrid` instance.
1097 Example::
1099 >>> from pyrocko import gf
1100 >>> base = DCSource()
1101 >>> R = gf.Range
1102 >>> for s in base.grid(R('
1104 '''
1105 return SourceGrid(base=self, variables=variables)
1107 def base_key(self):
1108 '''
1109 Get key to decide about source discretization / GF stack sharing.
1111 When two source models differ only in amplitude and origin time, the
1112 discretization and the GF stacking can be done only once for a unit
1113 amplitude and a zero origin time and the amplitude and origin times of
1114 the seismograms can be applied during post-processing of the synthetic
1115 seismogram.
1117 For any derived parameterized source model, this method is called to
1118 decide if discretization and stacking of the source should be shared.
1119 When two source models return an equal vector of values discretization
1120 is shared.
1121 '''
1122 return (self.depth, self.lat, self.north_shift,
1123 self.lon, self.east_shift, self.time, type(self).__name__) + \
1124 self.effective_stf_pre().base_key()
1126 def get_factor(self):
1127 '''
1128 Get the scaling factor to be applied during post-processing.
1130 Discretization of the base seismogram is usually done for a unit
1131 amplitude, because a common factor can be efficiently multiplied to
1132 final seismograms. This eliminates to do repeat the stacking when
1133 creating seismograms for a series of source models only differing in
1134 amplitude.
1136 This method should return the scaling factor to apply in the
1137 post-processing (often this is simply the scalar moment of the source).
1138 '''
1140 return 1.0
1142 def effective_stf_pre(self):
1143 '''
1144 Return the STF applied before stacking of the Green's functions.
1146 This STF is used during discretization of the parameterized source
1147 models, i.e. to produce a temporal distribution of point sources.
1149 Handling of the STF before stacking of the GFs is less efficient but
1150 allows to use different source time functions for different parts of
1151 the source.
1152 '''
1154 if self.stf is not None and self.stf_mode == 'pre':
1155 return self.stf
1156 else:
1157 return g_unit_pulse
1159 def effective_stf_post(self):
1160 '''
1161 Return the STF applied after stacking of the Green's fuctions.
1163 This STF is used in the post-processing of the synthetic seismograms.
1165 Handling of the STF after stacking of the GFs is usually more efficient
1166 but is only possible when a common STF is used for all subsources.
1167 '''
1169 if self.stf is not None and self.stf_mode == 'post':
1170 return self.stf
1171 else:
1172 return g_unit_pulse
1174 def _dparams_base(self):
1175 return dict(times=arr(self.time),
1176 lat=self.lat, lon=self.lon,
1177 north_shifts=arr(self.north_shift),
1178 east_shifts=arr(self.east_shift),
1179 depths=arr(self.depth))
1181 def _dparams_base_repeated(self, times):
1182 if times is None:
1183 return self._dparams_base()
1185 nt = times.size
1186 north_shifts = num.repeat(self.north_shift, nt)
1187 east_shifts = num.repeat(self.east_shift, nt)
1188 depths = num.repeat(self.depth, nt)
1189 return dict(times=times,
1190 lat=self.lat, lon=self.lon,
1191 north_shifts=north_shifts,
1192 east_shifts=east_shifts,
1193 depths=depths)
1195 def pyrocko_event(self, store=None, target=None, **kwargs):
1196 duration = None
1197 if self.stf:
1198 duration = self.stf.effective_duration
1200 return model.Event(
1201 lat=self.lat,
1202 lon=self.lon,
1203 north_shift=self.north_shift,
1204 east_shift=self.east_shift,
1205 time=self.time,
1206 name=self.name,
1207 depth=self.depth,
1208 duration=duration,
1209 **kwargs)
1211 def outline(self, cs='xyz'):
1212 points = num.atleast_2d(num.zeros([1, 3]))
1214 points[:, 0] += self.north_shift
1215 points[:, 1] += self.east_shift
1216 points[:, 2] += self.depth
1217 if cs == 'xyz':
1218 return points
1219 elif cs == 'xy':
1220 return points[:, :2]
1221 elif cs in ('latlon', 'lonlat'):
1222 latlon = ne_to_latlon(
1223 self.lat, self.lon, points[:, 0], points[:, 1])
1225 latlon = num.array(latlon).T
1226 if cs == 'latlon':
1227 return latlon
1228 else:
1229 return latlon[:, ::-1]
1231 @classmethod
1232 def from_pyrocko_event(cls, ev, **kwargs):
1233 if ev.depth is None:
1234 raise ConversionError(
1235 'Cannot convert event object to source object: '
1236 'no depth information available')
1238 stf = None
1239 if ev.duration is not None:
1240 stf = HalfSinusoidSTF(effective_duration=ev.duration)
1242 d = dict(
1243 name=ev.name,
1244 time=ev.time,
1245 lat=ev.lat,
1246 lon=ev.lon,
1247 north_shift=ev.north_shift,
1248 east_shift=ev.east_shift,
1249 depth=ev.depth,
1250 stf=stf)
1251 d.update(kwargs)
1252 return cls(**d)
1254 def get_magnitude(self):
1255 raise NotImplementedError(
1256 '%s does not implement get_magnitude()'
1257 % self.__class__.__name__)
1260class SourceWithMagnitude(Source):
1261 '''
1262 Base class for sources containing a moment magnitude.
1263 '''
1265 magnitude = Float.T(
1266 default=6.0,
1267 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1269 def __init__(self, **kwargs):
1270 if 'moment' in kwargs:
1271 mom = kwargs.pop('moment')
1272 if 'magnitude' not in kwargs:
1273 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1275 Source.__init__(self, **kwargs)
1277 @property
1278 def moment(self):
1279 return float(pmt.magnitude_to_moment(self.magnitude))
1281 @moment.setter
1282 def moment(self, value):
1283 self.magnitude = float(pmt.moment_to_magnitude(value))
1285 def pyrocko_event(self, store=None, target=None, **kwargs):
1286 return Source.pyrocko_event(
1287 self, store, target,
1288 magnitude=self.magnitude,
1289 **kwargs)
1291 @classmethod
1292 def from_pyrocko_event(cls, ev, **kwargs):
1293 d = {}
1294 if ev.magnitude:
1295 d.update(magnitude=ev.magnitude)
1297 d.update(kwargs)
1298 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
1300 def get_magnitude(self):
1301 return self.magnitude
1304class DerivedMagnitudeError(ValidationError):
1305 pass
1308class SourceWithDerivedMagnitude(Source):
1310 class __T(Source.T):
1312 def validate_extra(self, val):
1313 Source.T.validate_extra(self, val)
1314 val.check_conflicts()
1316 def check_conflicts(self):
1317 '''
1318 Check for parameter conflicts.
1320 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
1321 on conflicts.
1322 '''
1323 pass
1325 def get_magnitude(self, store=None, target=None):
1326 raise DerivedMagnitudeError('No magnitude set.')
1328 def get_moment(self, store=None, target=None):
1329 return float(pmt.magnitude_to_moment(
1330 self.get_magnitude(store, target)))
1332 def pyrocko_moment_tensor(self, store=None, target=None):
1333 raise NotImplementedError(
1334 '%s does not implement pyrocko_moment_tensor()'
1335 % self.__class__.__name__)
1337 def pyrocko_event(self, store=None, target=None, **kwargs):
1338 try:
1339 mt = self.pyrocko_moment_tensor(store, target)
1340 magnitude = self.get_magnitude()
1341 except (DerivedMagnitudeError, NotImplementedError):
1342 mt = None
1343 magnitude = None
1345 return Source.pyrocko_event(
1346 self, store, target,
1347 moment_tensor=mt,
1348 magnitude=magnitude,
1349 **kwargs)
1352class ExplosionSource(SourceWithDerivedMagnitude):
1353 '''
1354 An isotropic explosion point source.
1355 '''
1357 magnitude = Float.T(
1358 optional=True,
1359 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1361 volume_change = Float.T(
1362 optional=True,
1363 help='volume change of the explosion/implosion or '
1364 'the contracting/extending magmatic source. [m^3]')
1366 discretized_source_class = meta.DiscretizedExplosionSource
1368 def __init__(self, **kwargs):
1369 if 'moment' in kwargs:
1370 mom = kwargs.pop('moment')
1371 if 'magnitude' not in kwargs:
1372 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1374 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1376 def base_key(self):
1377 return SourceWithDerivedMagnitude.base_key(self) + \
1378 (self.volume_change,)
1380 def check_conflicts(self):
1381 if self.magnitude is not None and self.volume_change is not None:
1382 raise DerivedMagnitudeError(
1383 'Magnitude and volume_change are both defined.')
1385 def get_magnitude(self, store=None, target=None):
1386 self.check_conflicts()
1388 if self.magnitude is not None:
1389 return self.magnitude
1391 elif self.volume_change is not None:
1392 moment = self.volume_change * \
1393 self.get_moment_to_volume_change_ratio(store, target)
1395 return float(pmt.moment_to_magnitude(abs(moment)))
1396 else:
1397 return float(pmt.moment_to_magnitude(1.0))
1399 def get_volume_change(self, store=None, target=None):
1400 self.check_conflicts()
1402 if self.volume_change is not None:
1403 return self.volume_change
1405 elif self.magnitude is not None:
1406 moment = float(pmt.magnitude_to_moment(self.magnitude))
1407 return moment / self.get_moment_to_volume_change_ratio(
1408 store, target)
1410 else:
1411 return 1.0 / self.get_moment_to_volume_change_ratio(store)
1413 def get_moment_to_volume_change_ratio(self, store, target=None):
1414 if store is None:
1415 raise DerivedMagnitudeError(
1416 'Need earth model to convert between volume change and '
1417 'magnitude.')
1419 points = num.array(
1420 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1422 interpolation = target.interpolation if target else 'multilinear'
1423 try:
1424 shear_moduli = store.config.get_shear_moduli(
1425 self.lat, self.lon,
1426 points=points,
1427 interpolation=interpolation)[0]
1428 except meta.OutOfBounds:
1429 raise DerivedMagnitudeError(
1430 'Could not get shear modulus at source position.')
1432 return float(3. * shear_moduli)
1434 def get_factor(self):
1435 return 1.0
1437 def discretize_basesource(self, store, target=None):
1438 times, amplitudes = self.effective_stf_pre().discretize_t(
1439 store.config.deltat, self.time)
1441 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
1443 if self.volume_change is not None:
1444 if self.volume_change < 0.:
1445 amplitudes *= -1
1447 return meta.DiscretizedExplosionSource(
1448 m0s=amplitudes,
1449 **self._dparams_base_repeated(times))
1451 def pyrocko_moment_tensor(self, store=None, target=None):
1452 a = self.get_moment(store, target) * math.sqrt(2. / 3.)
1453 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
1456class RectangularExplosionSource(ExplosionSource):
1457 '''
1458 Rectangular or line explosion source.
1459 '''
1461 discretized_source_class = meta.DiscretizedExplosionSource
1463 strike = Float.T(
1464 default=0.0,
1465 help='strike direction in [deg], measured clockwise from north')
1467 dip = Float.T(
1468 default=90.0,
1469 help='dip angle in [deg], measured downward from horizontal')
1471 length = Float.T(
1472 default=0.,
1473 help='length of rectangular source area [m]')
1475 width = Float.T(
1476 default=0.,
1477 help='width of rectangular source area [m]')
1479 anchor = StringChoice.T(
1480 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1481 'bottom_left', 'bottom_right'],
1482 default='center',
1483 optional=True,
1484 help='Anchor point for positioning the plane, can be: top, center or'
1485 'bottom and also top_left, top_right,bottom_left,'
1486 'bottom_right, center_left and center right')
1488 nucleation_x = Float.T(
1489 optional=True,
1490 help='horizontal position of rupture nucleation in normalized fault '
1491 'plane coordinates (-1 = left edge, +1 = right edge)')
1493 nucleation_y = Float.T(
1494 optional=True,
1495 help='down-dip position of rupture nucleation in normalized fault '
1496 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1498 velocity = Float.T(
1499 default=3500.,
1500 help='speed of explosion front [m/s]')
1502 def base_key(self):
1503 return Source.base_key(self) + (self.strike, self.dip, self.length,
1504 self.width, self.nucleation_x,
1505 self.nucleation_y, self.velocity,
1506 self.anchor)
1508 def discretize_basesource(self, store, target=None):
1510 if self.nucleation_x is not None:
1511 nucx = self.nucleation_x * 0.5 * self.length
1512 else:
1513 nucx = None
1515 if self.nucleation_y is not None:
1516 nucy = self.nucleation_y * 0.5 * self.width
1517 else:
1518 nucy = None
1520 stf = self.effective_stf_pre()
1522 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
1523 store.config.deltas, store.config.deltat,
1524 self.time, self.north_shift, self.east_shift, self.depth,
1525 self.strike, self.dip, self.length, self.width, self.anchor,
1526 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
1528 amplitudes /= num.sum(amplitudes)
1529 amplitudes *= self.get_moment(store, target)
1531 return meta.DiscretizedExplosionSource(
1532 lat=self.lat,
1533 lon=self.lon,
1534 times=times,
1535 north_shifts=points[:, 0],
1536 east_shifts=points[:, 1],
1537 depths=points[:, 2],
1538 m0s=amplitudes)
1540 def outline(self, cs='xyz'):
1541 points = outline_rect_source(self.strike, self.dip, self.length,
1542 self.width, self.anchor)
1544 points[:, 0] += self.north_shift
1545 points[:, 1] += self.east_shift
1546 points[:, 2] += self.depth
1547 if cs == 'xyz':
1548 return points
1549 elif cs == 'xy':
1550 return points[:, :2]
1551 elif cs in ('latlon', 'lonlat'):
1552 latlon = ne_to_latlon(
1553 self.lat, self.lon, points[:, 0], points[:, 1])
1555 latlon = num.array(latlon).T
1556 if cs == 'latlon':
1557 return latlon
1558 else:
1559 return latlon[:, ::-1]
1561 def get_nucleation_abs_coord(self, cs='xy'):
1563 if self.nucleation_x is None:
1564 return None, None
1566 coords = from_plane_coords(self.strike, self.dip, self.length,
1567 self.width, self.depth, self.nucleation_x,
1568 self.nucleation_y, lat=self.lat,
1569 lon=self.lon, north_shift=self.north_shift,
1570 east_shift=self.east_shift, cs=cs)
1571 return coords
1574class DCSource(SourceWithMagnitude):
1575 '''
1576 A double-couple point source.
1577 '''
1579 strike = Float.T(
1580 default=0.0,
1581 help='strike direction in [deg], measured clockwise from north')
1583 dip = Float.T(
1584 default=90.0,
1585 help='dip angle in [deg], measured downward from horizontal')
1587 rake = Float.T(
1588 default=0.0,
1589 help='rake angle in [deg], '
1590 'measured counter-clockwise from right-horizontal '
1591 'in on-plane view')
1593 discretized_source_class = meta.DiscretizedMTSource
1595 def base_key(self):
1596 return Source.base_key(self) + (self.strike, self.dip, self.rake)
1598 def get_factor(self):
1599 return float(pmt.magnitude_to_moment(self.magnitude))
1601 def discretize_basesource(self, store, target=None):
1602 mot = pmt.MomentTensor(
1603 strike=self.strike, dip=self.dip, rake=self.rake)
1605 times, amplitudes = self.effective_stf_pre().discretize_t(
1606 store.config.deltat, self.time)
1607 return meta.DiscretizedMTSource(
1608 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
1609 **self._dparams_base_repeated(times))
1611 def pyrocko_moment_tensor(self, store=None, target=None):
1612 return pmt.MomentTensor(
1613 strike=self.strike,
1614 dip=self.dip,
1615 rake=self.rake,
1616 scalar_moment=self.moment)
1618 def pyrocko_event(self, store=None, target=None, **kwargs):
1619 return SourceWithMagnitude.pyrocko_event(
1620 self, store, target,
1621 moment_tensor=self.pyrocko_moment_tensor(store, target),
1622 **kwargs)
1624 @classmethod
1625 def from_pyrocko_event(cls, ev, **kwargs):
1626 d = {}
1627 mt = ev.moment_tensor
1628 if mt:
1629 (strike, dip, rake), _ = mt.both_strike_dip_rake()
1630 d.update(
1631 strike=float(strike),
1632 dip=float(dip),
1633 rake=float(rake),
1634 magnitude=float(mt.moment_magnitude()))
1636 d.update(kwargs)
1637 return super(DCSource, cls).from_pyrocko_event(ev, **d)
1640class CLVDSource(SourceWithMagnitude):
1641 '''
1642 A pure CLVD point source.
1643 '''
1645 discretized_source_class = meta.DiscretizedMTSource
1647 azimuth = Float.T(
1648 default=0.0,
1649 help='azimuth direction of largest dipole, clockwise from north [deg]')
1651 dip = Float.T(
1652 default=90.,
1653 help='dip direction of largest dipole, downward from horizontal [deg]')
1655 def base_key(self):
1656 return Source.base_key(self) + (self.azimuth, self.dip)
1658 def get_factor(self):
1659 return float(pmt.magnitude_to_moment(self.magnitude))
1661 @property
1662 def m6(self):
1663 a = math.sqrt(4. / 3.) * self.get_factor()
1664 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1665 rotmat1 = pmt.euler_to_matrix(
1666 d2r * (self.dip - 90.),
1667 d2r * (self.azimuth - 90.),
1668 0.)
1669 m = rotmat1.T * m * rotmat1
1670 return pmt.to6(m)
1672 @property
1673 def m6_astuple(self):
1674 return tuple(self.m6.tolist())
1676 def discretize_basesource(self, store, target=None):
1677 factor = self.get_factor()
1678 times, amplitudes = self.effective_stf_pre().discretize_t(
1679 store.config.deltat, self.time)
1680 return meta.DiscretizedMTSource(
1681 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
1682 **self._dparams_base_repeated(times))
1684 def pyrocko_moment_tensor(self, store=None, target=None):
1685 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1687 def pyrocko_event(self, store=None, target=None, **kwargs):
1688 mt = self.pyrocko_moment_tensor(store, target)
1689 return Source.pyrocko_event(
1690 self, store, target,
1691 moment_tensor=self.pyrocko_moment_tensor(store, target),
1692 magnitude=float(mt.moment_magnitude()),
1693 **kwargs)
1696class VLVDSource(SourceWithDerivedMagnitude):
1697 '''
1698 Volumetric linear vector dipole source.
1700 This source is a parameterization for a restricted moment tensor point
1701 source, useful to represent dyke or sill like inflation or deflation
1702 sources. The restriction is such that the moment tensor is rotational
1703 symmetric. It can be represented by a superposition of a linear vector
1704 dipole (here we use a CLVD for convenience) and an isotropic component. The
1705 restricted moment tensor has 4 degrees of freedom: 2 independent
1706 eigenvalues and 2 rotation angles orienting the the symmetry axis.
1708 In this parameterization, the isotropic component is controlled by
1709 ``volume_change``. To define the moment tensor, it must be converted to the
1710 scalar moment of the the MT's isotropic component. For the conversion, the
1711 shear modulus at the source's position must be known. This value is
1712 extracted from the earth model defined in the GF store in use.
1714 The CLVD part by controlled by its scalar moment :math:`M_0`:
1715 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
1716 positiv or negativ CLVD (the sign of the largest eigenvalue).
1717 '''
1719 discretized_source_class = meta.DiscretizedMTSource
1721 azimuth = Float.T(
1722 default=0.0,
1723 help='azimuth direction of symmetry axis, clockwise from north [deg].')
1725 dip = Float.T(
1726 default=90.,
1727 help='dip direction of symmetry axis, downward from horizontal [deg].')
1729 volume_change = Float.T(
1730 default=0.,
1731 help='volume change of the inflation/deflation [m^3].')
1733 clvd_moment = Float.T(
1734 default=0.,
1735 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
1736 'controls the sign of the CLVD (the sign of its largest '
1737 'eigenvalue).')
1739 def get_moment_to_volume_change_ratio(self, store, target):
1740 if store is None or target is None:
1741 raise DerivedMagnitudeError(
1742 'Need earth model to convert between volume change and '
1743 'magnitude.')
1745 points = num.array(
1746 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1748 try:
1749 shear_moduli = store.config.get_shear_moduli(
1750 self.lat, self.lon,
1751 points=points,
1752 interpolation=target.interpolation)[0]
1753 except meta.OutOfBounds:
1754 raise DerivedMagnitudeError(
1755 'Could not get shear modulus at source position.')
1757 return float(3. * shear_moduli)
1759 def base_key(self):
1760 return Source.base_key(self) + \
1761 (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
1763 def get_magnitude(self, store=None, target=None):
1764 mt = self.pyrocko_moment_tensor(store, target)
1765 return float(pmt.moment_to_magnitude(mt.moment))
1767 def get_m6(self, store, target):
1768 a = math.sqrt(4. / 3.) * self.clvd_moment
1769 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1771 rotmat1 = pmt.euler_to_matrix(
1772 d2r * (self.dip - 90.),
1773 d2r * (self.azimuth - 90.),
1774 0.)
1775 m_clvd = rotmat1.T * m_clvd * rotmat1
1777 m_iso = self.volume_change * \
1778 self.get_moment_to_volume_change_ratio(store, target)
1780 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 0., 0.,) * math.sqrt(2./3)
1782 m = pmt.to6(m_clvd) + pmt.to6(m_iso)
1783 return m
1785 def get_moment(self, store=None, target=None):
1786 return float(pmt.magnitude_to_moment(
1787 self.get_magnitude(store, target)))
1789 def get_m6_astuple(self, store, target):
1790 m6 = self.get_m6(store, target)
1791 return tuple(m6.tolist())
1793 def discretize_basesource(self, store, target=None):
1794 times, amplitudes = self.effective_stf_pre().discretize_t(
1795 store.config.deltat, self.time)
1797 m6 = self.get_m6(store, target)
1798 m6 *= amplitudes / self.get_factor()
1800 return meta.DiscretizedMTSource(
1801 m6s=m6[num.newaxis, :],
1802 **self._dparams_base_repeated(times))
1804 def pyrocko_moment_tensor(self, store=None, target=None):
1805 m6_astuple = self.get_m6_astuple(store, target)
1806 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
1809class MTSource(Source):
1810 '''
1811 A moment tensor point source.
1812 '''
1814 discretized_source_class = meta.DiscretizedMTSource
1816 mnn = Float.T(
1817 default=1.,
1818 help='north-north component of moment tensor in [Nm]')
1820 mee = Float.T(
1821 default=1.,
1822 help='east-east component of moment tensor in [Nm]')
1824 mdd = Float.T(
1825 default=1.,
1826 help='down-down component of moment tensor in [Nm]')
1828 mne = Float.T(
1829 default=0.,
1830 help='north-east component of moment tensor in [Nm]')
1832 mnd = Float.T(
1833 default=0.,
1834 help='north-down component of moment tensor in [Nm]')
1836 med = Float.T(
1837 default=0.,
1838 help='east-down component of moment tensor in [Nm]')
1840 def __init__(self, **kwargs):
1841 if 'm6' in kwargs:
1842 for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
1843 kwargs.pop('m6')):
1844 kwargs[k] = float(v)
1846 Source.__init__(self, **kwargs)
1848 @property
1849 def m6(self):
1850 return num.array(self.m6_astuple)
1852 @property
1853 def m6_astuple(self):
1854 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
1856 @m6.setter
1857 def m6(self, value):
1858 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
1860 def base_key(self):
1861 return Source.base_key(self) + self.m6_astuple
1863 def discretize_basesource(self, store, target=None):
1864 times, amplitudes = self.effective_stf_pre().discretize_t(
1865 store.config.deltat, self.time)
1866 return meta.DiscretizedMTSource(
1867 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
1868 **self._dparams_base_repeated(times))
1870 def get_magnitude(self, store=None, target=None):
1871 m6 = self.m6
1872 return pmt.moment_to_magnitude(
1873 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
1874 math.sqrt(2.))
1876 def pyrocko_moment_tensor(self, store=None, target=None):
1877 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1879 def pyrocko_event(self, store=None, target=None, **kwargs):
1880 mt = self.pyrocko_moment_tensor(store, target)
1881 return Source.pyrocko_event(
1882 self, store, target,
1883 moment_tensor=self.pyrocko_moment_tensor(store, target),
1884 magnitude=float(mt.moment_magnitude()),
1885 **kwargs)
1887 @classmethod
1888 def from_pyrocko_event(cls, ev, **kwargs):
1889 d = {}
1890 mt = ev.moment_tensor
1891 if mt:
1892 d.update(m6=tuple(map(float, mt.m6())))
1893 else:
1894 if ev.magnitude is not None:
1895 mom = pmt.magnitude_to_moment(ev.magnitude)
1896 v = math.sqrt(2./3.) * mom
1897 d.update(m6=(v, v, v, 0., 0., 0.))
1899 d.update(kwargs)
1900 return super(MTSource, cls).from_pyrocko_event(ev, **d)
1903map_anchor = {
1904 'center': (0.0, 0.0),
1905 'center_left': (-1.0, 0.0),
1906 'center_right': (1.0, 0.0),
1907 'top': (0.0, -1.0),
1908 'top_left': (-1.0, -1.0),
1909 'top_right': (1.0, -1.0),
1910 'bottom': (0.0, 1.0),
1911 'bottom_left': (-1.0, 1.0),
1912 'bottom_right': (1.0, 1.0)}
1915class RectangularSource(SourceWithDerivedMagnitude):
1916 '''
1917 Classical Haskell source model modified for bilateral rupture.
1918 '''
1920 discretized_source_class = meta.DiscretizedMTSource
1922 magnitude = Float.T(
1923 optional=True,
1924 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1926 strike = Float.T(
1927 default=0.0,
1928 help='strike direction in [deg], measured clockwise from north')
1930 dip = Float.T(
1931 default=90.0,
1932 help='dip angle in [deg], measured downward from horizontal')
1934 rake = Float.T(
1935 default=0.0,
1936 help='rake angle in [deg], '
1937 'measured counter-clockwise from right-horizontal '
1938 'in on-plane view')
1940 length = Float.T(
1941 default=0.,
1942 help='length of rectangular source area [m]')
1944 width = Float.T(
1945 default=0.,
1946 help='width of rectangular source area [m]')
1948 anchor = StringChoice.T(
1949 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1950 'bottom_left', 'bottom_right'],
1951 default='center',
1952 optional=True,
1953 help='Anchor point for positioning the plane, can be: top, center or'
1954 'bottom and also top_left, top_right,bottom_left,'
1955 'bottom_right, center_left and center right')
1957 nucleation_x = Float.T(
1958 optional=True,
1959 help='horizontal position of rupture nucleation in normalized fault '
1960 'plane coordinates (-1 = left edge, +1 = right edge)')
1962 nucleation_y = Float.T(
1963 optional=True,
1964 help='down-dip position of rupture nucleation in normalized fault '
1965 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1967 velocity = Float.T(
1968 default=3500.,
1969 help='speed of rupture front [m/s]')
1971 slip = Float.T(
1972 optional=True,
1973 help='Slip on the rectangular source area [m]')
1975 opening_fraction = Float.T(
1976 default=0.,
1977 help='Determines fraction of slip related to opening. '
1978 '(``-1``: pure tensile closing, '
1979 '``0``: pure shear, '
1980 '``1``: pure tensile opening)')
1982 decimation_factor = Int.T(
1983 optional=True,
1984 default=1,
1985 help='Sub-source decimation factor, a larger decimation will'
1986 ' make the result inaccurate but shorten the necessary'
1987 ' computation time (use for testing puposes only).')
1989 def __init__(self, **kwargs):
1990 if 'moment' in kwargs:
1991 mom = kwargs.pop('moment')
1992 if 'magnitude' not in kwargs:
1993 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1995 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1997 def base_key(self):
1998 return SourceWithDerivedMagnitude.base_key(self) + (
1999 self.magnitude,
2000 self.slip,
2001 self.strike,
2002 self.dip,
2003 self.rake,
2004 self.length,
2005 self.width,
2006 self.nucleation_x,
2007 self.nucleation_y,
2008 self.velocity,
2009 self.decimation_factor,
2010 self.anchor)
2012 def check_conflicts(self):
2013 if self.magnitude is not None and self.slip is not None:
2014 raise DerivedMagnitudeError(
2015 'Magnitude and slip are both defined.')
2017 def get_magnitude(self, store=None, target=None):
2018 self.check_conflicts()
2019 if self.magnitude is not None:
2020 return self.magnitude
2022 elif self.slip is not None:
2023 if None in (store, target):
2024 raise DerivedMagnitudeError(
2025 'Magnitude for a rectangular source with slip defined '
2026 'can only be derived when earth model and target '
2027 'interpolation method are available.')
2029 amplitudes = self._discretize(store, target)[2]
2030 if amplitudes.ndim == 2:
2031 # CLVD component has no net moment, leave out
2032 return float(pmt.moment_to_magnitude(
2033 num.sum(num.abs(amplitudes[0:2, :]).sum())))
2034 else:
2035 return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
2037 else:
2038 return float(pmt.moment_to_magnitude(1.0))
2040 def get_factor(self):
2041 return 1.0
2043 def get_slip_tensile(self):
2044 return self.slip * self.opening_fraction
2046 def get_slip_shear(self):
2047 return self.slip - abs(self.get_slip_tensile)
2049 def _discretize(self, store, target):
2050 if self.nucleation_x is not None:
2051 nucx = self.nucleation_x * 0.5 * self.length
2052 else:
2053 nucx = None
2055 if self.nucleation_y is not None:
2056 nucy = self.nucleation_y * 0.5 * self.width
2057 else:
2058 nucy = None
2060 stf = self.effective_stf_pre()
2062 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
2063 store.config.deltas, store.config.deltat,
2064 self.time, self.north_shift, self.east_shift, self.depth,
2065 self.strike, self.dip, self.length, self.width, self.anchor,
2066 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
2067 decimation_factor=self.decimation_factor)
2069 if self.slip is not None:
2070 if target is not None:
2071 interpolation = target.interpolation
2072 else:
2073 interpolation = 'nearest_neighbor'
2074 logger.warn(
2075 'no target information available, will use '
2076 '"nearest_neighbor" interpolation when extracting shear '
2077 'modulus from earth model')
2079 shear_moduli = store.config.get_shear_moduli(
2080 self.lat, self.lon,
2081 points=points,
2082 interpolation=interpolation)
2084 tensile_slip = self.get_slip_tensile()
2085 shear_slip = self.slip - abs(tensile_slip)
2087 amplitudes_total = [shear_moduli * shear_slip]
2088 if tensile_slip != 0:
2089 bulk_moduli = store.config.get_bulk_moduli(
2090 self.lat, self.lon,
2091 points=points,
2092 interpolation=interpolation)
2094 tensile_iso = bulk_moduli * tensile_slip
2095 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
2097 amplitudes_total.extend([tensile_iso, tensile_clvd])
2099 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
2100 amplitudes * dl * dw
2102 else:
2103 # normalization to retain total moment
2104 amplitudes_norm = amplitudes / num.sum(amplitudes)
2105 moment = self.get_moment(store, target)
2107 amplitudes_total = [
2108 amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
2109 if self.opening_fraction != 0.:
2110 amplitudes_total.append(
2111 amplitudes_norm * self.opening_fraction * moment)
2113 amplitudes_total = num.vstack(amplitudes_total).squeeze()
2115 return points, times, num.atleast_1d(amplitudes_total), dl, dw
2117 def discretize_basesource(self, store, target=None):
2119 points, times, amplitudes, dl, dw = self._discretize(store, target)
2121 mot = pmt.MomentTensor(
2122 strike=self.strike, dip=self.dip, rake=self.rake)
2123 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
2125 if amplitudes.ndim == 1:
2126 m6s[:, :] *= amplitudes[:, num.newaxis]
2127 elif amplitudes.ndim == 2:
2128 # shear MT components
2129 rotmat1 = pmt.euler_to_matrix(
2130 d2r * self.dip, d2r * self.strike, d2r * -self.rake)
2131 m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
2133 if amplitudes.shape[0] == 2:
2134 # tensile MT components - moment/magnitude input
2135 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
2136 rot_tensile = pmt.to6(rotmat1.T * tensile * rotmat1)
2138 m6s_tensile = rot_tensile[
2139 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2140 m6s += m6s_tensile
2142 elif amplitudes.shape[0] == 3:
2143 # tensile MT components - slip input
2144 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
2145 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
2147 rot_iso = pmt.to6(rotmat1.T * iso * rotmat1)
2148 rot_clvd = pmt.to6(rotmat1.T * clvd * rotmat1)
2150 m6s_iso = rot_iso[
2151 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2152 m6s_clvd = rot_clvd[
2153 num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
2154 m6s += m6s_iso + m6s_clvd
2155 else:
2156 raise ValueError('Unknwown amplitudes shape!')
2157 else:
2158 raise ValueError(
2159 'Unexpected dimension of {}'.format(amplitudes.ndim))
2161 ds = meta.DiscretizedMTSource(
2162 lat=self.lat,
2163 lon=self.lon,
2164 times=times,
2165 north_shifts=points[:, 0],
2166 east_shifts=points[:, 1],
2167 depths=points[:, 2],
2168 m6s=m6s)
2170 return ds
2172 def outline(self, cs='xyz'):
2173 points = outline_rect_source(self.strike, self.dip, self.length,
2174 self.width, self.anchor)
2176 points[:, 0] += self.north_shift
2177 points[:, 1] += self.east_shift
2178 points[:, 2] += self.depth
2179 if cs == 'xyz':
2180 return points
2181 elif cs == 'xy':
2182 return points[:, :2]
2183 elif cs in ('latlon', 'lonlat'):
2184 latlon = ne_to_latlon(
2185 self.lat, self.lon, points[:, 0], points[:, 1])
2187 latlon = num.array(latlon).T
2188 if cs == 'latlon':
2189 return latlon
2190 else:
2191 return latlon[:, ::-1]
2193 def get_nucleation_abs_coord(self, cs='xy'):
2195 if self.nucleation_x is None:
2196 return None, None
2198 coords = from_plane_coords(self.strike, self.dip, self.length,
2199 self.width, self.depth, self.nucleation_x,
2200 self.nucleation_y, lat=self.lat,
2201 lon=self.lon, north_shift=self.north_shift,
2202 east_shift=self.east_shift, cs=cs)
2203 return coords
2205 def pyrocko_moment_tensor(self, store=None, target=None):
2206 return pmt.MomentTensor(
2207 strike=self.strike,
2208 dip=self.dip,
2209 rake=self.rake,
2210 scalar_moment=self.get_moment(store, target))
2212 def pyrocko_event(self, store=None, target=None, **kwargs):
2213 return SourceWithDerivedMagnitude.pyrocko_event(
2214 self, store, target,
2215 **kwargs)
2217 @classmethod
2218 def from_pyrocko_event(cls, ev, **kwargs):
2219 d = {}
2220 mt = ev.moment_tensor
2221 if mt:
2222 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2223 d.update(
2224 strike=float(strike),
2225 dip=float(dip),
2226 rake=float(rake),
2227 magnitude=float(mt.moment_magnitude()))
2229 d.update(kwargs)
2230 return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
2233class DoubleDCSource(SourceWithMagnitude):
2234 '''
2235 Two double-couple point sources separated in space and time.
2236 Moment share between the sub-sources is controlled by the
2237 parameter mix.
2238 The position of the subsources is dependent on the moment
2239 distribution between the two sources. Depth, east and north
2240 shift are given for the centroid between the two double-couples.
2241 The subsources will positioned according to their moment shares
2242 around this centroid position.
2243 This is done according to their delta parameters, which are
2244 therefore in relation to that centroid.
2245 Note that depth of the subsources therefore can be
2246 depth+/-delta_depth. For shallow earthquakes therefore
2247 the depth has to be chosen deeper to avoid sampling
2248 above surface.
2249 '''
2251 strike1 = Float.T(
2252 default=0.0,
2253 help='strike direction in [deg], measured clockwise from north')
2255 dip1 = Float.T(
2256 default=90.0,
2257 help='dip angle in [deg], measured downward from horizontal')
2259 azimuth = Float.T(
2260 default=0.0,
2261 help='azimuth to second double-couple [deg], '
2262 'measured at first, clockwise from north')
2264 rake1 = Float.T(
2265 default=0.0,
2266 help='rake angle in [deg], '
2267 'measured counter-clockwise from right-horizontal '
2268 'in on-plane view')
2270 strike2 = Float.T(
2271 default=0.0,
2272 help='strike direction in [deg], measured clockwise from north')
2274 dip2 = Float.T(
2275 default=90.0,
2276 help='dip angle in [deg], measured downward from horizontal')
2278 rake2 = Float.T(
2279 default=0.0,
2280 help='rake angle in [deg], '
2281 'measured counter-clockwise from right-horizontal '
2282 'in on-plane view')
2284 delta_time = Float.T(
2285 default=0.0,
2286 help='separation of double-couples in time (t2-t1) [s]')
2288 delta_depth = Float.T(
2289 default=0.0,
2290 help='difference in depth (z2-z1) [m]')
2292 distance = Float.T(
2293 default=0.0,
2294 help='distance between the two double-couples [m]')
2296 mix = Float.T(
2297 default=0.5,
2298 help='how to distribute the moment to the two doublecouples '
2299 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
2301 stf1 = STF.T(
2302 optional=True,
2303 help='Source time function of subsource 1 '
2304 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
2306 stf2 = STF.T(
2307 optional=True,
2308 help='Source time function of subsource 2 '
2309 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
2311 discretized_source_class = meta.DiscretizedMTSource
2313 def base_key(self):
2314 return (
2315 self.time, self.depth, self.lat, self.north_shift,
2316 self.lon, self.east_shift, type(self).__name__) + \
2317 self.effective_stf1_pre().base_key() + \
2318 self.effective_stf2_pre().base_key() + (
2319 self.strike1, self.dip1, self.rake1,
2320 self.strike2, self.dip2, self.rake2,
2321 self.delta_time, self.delta_depth,
2322 self.azimuth, self.distance, self.mix)
2324 def get_factor(self):
2325 return self.moment
2327 def effective_stf1_pre(self):
2328 return self.stf1 or self.stf or g_unit_pulse
2330 def effective_stf2_pre(self):
2331 return self.stf2 or self.stf or g_unit_pulse
2333 def effective_stf_post(self):
2334 return g_unit_pulse
2336 def split(self):
2337 a1 = 1.0 - self.mix
2338 a2 = self.mix
2339 delta_north = math.cos(self.azimuth * d2r) * self.distance
2340 delta_east = math.sin(self.azimuth * d2r) * self.distance
2342 dc1 = DCSource(
2343 lat=self.lat,
2344 lon=self.lon,
2345 time=self.time - self.delta_time * a2,
2346 north_shift=self.north_shift - delta_north * a2,
2347 east_shift=self.east_shift - delta_east * a2,
2348 depth=self.depth - self.delta_depth * a2,
2349 moment=self.moment * a1,
2350 strike=self.strike1,
2351 dip=self.dip1,
2352 rake=self.rake1,
2353 stf=self.stf1 or self.stf)
2355 dc2 = DCSource(
2356 lat=self.lat,
2357 lon=self.lon,
2358 time=self.time + self.delta_time * a1,
2359 north_shift=self.north_shift + delta_north * a1,
2360 east_shift=self.east_shift + delta_east * a1,
2361 depth=self.depth + self.delta_depth * a1,
2362 moment=self.moment * a2,
2363 strike=self.strike2,
2364 dip=self.dip2,
2365 rake=self.rake2,
2366 stf=self.stf2 or self.stf)
2368 return [dc1, dc2]
2370 def discretize_basesource(self, store, target=None):
2371 a1 = 1.0 - self.mix
2372 a2 = self.mix
2373 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
2374 rake=self.rake1, scalar_moment=a1)
2375 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
2376 rake=self.rake2, scalar_moment=a2)
2378 delta_north = math.cos(self.azimuth * d2r) * self.distance
2379 delta_east = math.sin(self.azimuth * d2r) * self.distance
2381 times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
2382 store.config.deltat, self.time - self.delta_time * a1)
2384 times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
2385 store.config.deltat, self.time + self.delta_time * a2)
2387 nt1 = times1.size
2388 nt2 = times2.size
2390 ds = meta.DiscretizedMTSource(
2391 lat=self.lat,
2392 lon=self.lon,
2393 times=num.concatenate((times1, times2)),
2394 north_shifts=num.concatenate((
2395 num.repeat(self.north_shift - delta_north * a1, nt1),
2396 num.repeat(self.north_shift + delta_north * a2, nt2))),
2397 east_shifts=num.concatenate((
2398 num.repeat(self.east_shift - delta_east * a1, nt1),
2399 num.repeat(self.east_shift + delta_east * a2, nt2))),
2400 depths=num.concatenate((
2401 num.repeat(self.depth - self.delta_depth * a1, nt1),
2402 num.repeat(self.depth + self.delta_depth * a2, nt2))),
2403 m6s=num.vstack((
2404 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
2405 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
2407 return ds
2409 def pyrocko_moment_tensor(self, store=None, target=None):
2410 a1 = 1.0 - self.mix
2411 a2 = self.mix
2412 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
2413 rake=self.rake1,
2414 scalar_moment=a1 * self.moment)
2415 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
2416 rake=self.rake2,
2417 scalar_moment=a2 * self.moment)
2418 return pmt.MomentTensor(m=mot1.m() + mot2.m())
2420 def pyrocko_event(self, store=None, target=None, **kwargs):
2421 return SourceWithMagnitude.pyrocko_event(
2422 self, store, target,
2423 moment_tensor=self.pyrocko_moment_tensor(store, target),
2424 **kwargs)
2426 @classmethod
2427 def from_pyrocko_event(cls, ev, **kwargs):
2428 d = {}
2429 mt = ev.moment_tensor
2430 if mt:
2431 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2432 d.update(
2433 strike1=float(strike),
2434 dip1=float(dip),
2435 rake1=float(rake),
2436 strike2=float(strike),
2437 dip2=float(dip),
2438 rake2=float(rake),
2439 mix=0.0,
2440 magnitude=float(mt.moment_magnitude()))
2442 d.update(kwargs)
2443 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
2444 source.stf1 = source.stf
2445 source.stf2 = HalfSinusoidSTF(effective_duration=0.)
2446 source.stf = None
2447 return source
2450class RingfaultSource(SourceWithMagnitude):
2451 '''
2452 A ring fault with vertical doublecouples.
2453 '''
2455 diameter = Float.T(
2456 default=1.0,
2457 help='diameter of the ring in [m]')
2459 sign = Float.T(
2460 default=1.0,
2461 help='inside of the ring moves up (+1) or down (-1)')
2463 strike = Float.T(
2464 default=0.0,
2465 help='strike direction of the ring plane, clockwise from north,'
2466 ' in [deg]')
2468 dip = Float.T(
2469 default=0.0,
2470 help='dip angle of the ring plane from horizontal in [deg]')
2472 npointsources = Int.T(
2473 default=360,
2474 help='number of point sources to use')
2476 discretized_source_class = meta.DiscretizedMTSource
2478 def base_key(self):
2479 return Source.base_key(self) + (
2480 self.strike, self.dip, self.diameter, self.npointsources)
2482 def get_factor(self):
2483 return self.sign * self.moment
2485 def discretize_basesource(self, store=None, target=None):
2486 n = self.npointsources
2487 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
2489 points = num.zeros((n, 3))
2490 points[:, 0] = num.cos(phi) * 0.5 * self.diameter
2491 points[:, 1] = num.sin(phi) * 0.5 * self.diameter
2493 rotmat = num.array(pmt.euler_to_matrix(
2494 self.dip * d2r, self.strike * d2r, 0.0))
2495 points = num.dot(rotmat.T, points.T).T # !!! ?
2497 points[:, 0] += self.north_shift
2498 points[:, 1] += self.east_shift
2499 points[:, 2] += self.depth
2501 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
2502 scalar_moment=1.0 / n).m())
2504 rotmats = num.transpose(
2505 [[num.cos(phi), num.sin(phi), num.zeros(n)],
2506 [-num.sin(phi), num.cos(phi), num.zeros(n)],
2507 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
2509 ms = num.zeros((n, 3, 3))
2510 for i in range(n):
2511 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
2512 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
2514 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
2515 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
2517 times, amplitudes = self.effective_stf_pre().discretize_t(
2518 store.config.deltat, self.time)
2520 nt = times.size
2522 return meta.DiscretizedMTSource(
2523 times=num.tile(times, n),
2524 lat=self.lat,
2525 lon=self.lon,
2526 north_shifts=num.repeat(points[:, 0], nt),
2527 east_shifts=num.repeat(points[:, 1], nt),
2528 depths=num.repeat(points[:, 2], nt),
2529 m6s=num.repeat(m6s, nt, axis=0) * num.tile(
2530 amplitudes, n)[:, num.newaxis])
2533class CombiSource(Source):
2534 '''
2535 Composite source model.
2536 '''
2538 discretized_source_class = meta.DiscretizedMTSource
2540 subsources = List.T(Source.T())
2542 def __init__(self, subsources=[], **kwargs):
2543 if not subsources:
2544 raise BadRequest(
2545 'Need at least one sub-source to create a CombiSource object.')
2547 lats = num.array(
2548 [subsource.lat for subsource in subsources], dtype=float)
2549 lons = num.array(
2550 [subsource.lon for subsource in subsources], dtype=float)
2552 lat, lon = lats[0], lons[0]
2553 if not num.all(lats == lat) and num.all(lons == lon):
2554 subsources = [s.clone() for s in subsources]
2555 for subsource in subsources[1:]:
2556 subsource.set_origin(lat, lon)
2558 depth = float(num.mean([p.depth for p in subsources]))
2559 time = float(num.mean([p.time for p in subsources]))
2560 north_shift = float(num.mean([p.north_shift for p in subsources]))
2561 east_shift = float(num.mean([p.east_shift for p in subsources]))
2562 kwargs.update(
2563 time=time,
2564 lat=float(lat),
2565 lon=float(lon),
2566 north_shift=north_shift,
2567 east_shift=east_shift,
2568 depth=depth)
2570 Source.__init__(self, subsources=subsources, **kwargs)
2572 def get_factor(self):
2573 return 1.0
2575 def discretize_basesource(self, store, target=None):
2576 dsources = []
2577 for sf in self.subsources:
2578 ds = sf.discretize_basesource(store, target)
2579 ds.m6s *= sf.get_factor()
2580 dsources.append(ds)
2582 return meta.DiscretizedMTSource.combine(dsources)
2585class SFSource(Source):
2586 '''
2587 A single force point source.
2589 Supported GF schemes: `'elastic5'`.
2590 '''
2592 discretized_source_class = meta.DiscretizedSFSource
2594 fn = Float.T(
2595 default=0.,
2596 help='northward component of single force [N]')
2598 fe = Float.T(
2599 default=0.,
2600 help='eastward component of single force [N]')
2602 fd = Float.T(
2603 default=0.,
2604 help='downward component of single force [N]')
2606 def __init__(self, **kwargs):
2607 Source.__init__(self, **kwargs)
2609 def base_key(self):
2610 return Source.base_key(self) + (self.fn, self.fe, self.fd)
2612 def get_factor(self):
2613 return 1.0
2615 def discretize_basesource(self, store, target=None):
2616 times, amplitudes = self.effective_stf_pre().discretize_t(
2617 store.config.deltat, self.time)
2618 forces = amplitudes[:, num.newaxis] * num.array(
2619 [[self.fn, self.fe, self.fd]], dtype=float)
2621 return meta.DiscretizedSFSource(forces=forces,
2622 **self._dparams_base_repeated(times))
2624 def pyrocko_event(self, store=None, target=None, **kwargs):
2625 return Source.pyrocko_event(
2626 self, store, target,
2627 **kwargs)
2629 @classmethod
2630 def from_pyrocko_event(cls, ev, **kwargs):
2631 d = {}
2632 d.update(kwargs)
2633 return super(SFSource, cls).from_pyrocko_event(ev, **d)
2636class PorePressurePointSource(Source):
2637 '''
2638 Excess pore pressure point source.
2640 For poro-elastic initial value problem where an excess pore pressure is
2641 brought into a small source volume.
2642 '''
2644 discretized_source_class = meta.DiscretizedPorePressureSource
2646 pp = Float.T(
2647 default=1.0,
2648 help='initial excess pore pressure in [Pa]')
2650 def base_key(self):
2651 return Source.base_key(self)
2653 def get_factor(self):
2654 return self.pp
2656 def discretize_basesource(self, store, target=None):
2657 return meta.DiscretizedPorePressureSource(pp=arr(1.0),
2658 **self._dparams_base())
2661class PorePressureLineSource(Source):
2662 '''
2663 Excess pore pressure line source.
2665 The line source is centered at (north_shift, east_shift, depth).
2666 '''
2668 discretized_source_class = meta.DiscretizedPorePressureSource
2670 pp = Float.T(
2671 default=1.0,
2672 help='initial excess pore pressure in [Pa]')
2674 length = Float.T(
2675 default=0.0,
2676 help='length of the line source [m]')
2678 azimuth = Float.T(
2679 default=0.0,
2680 help='azimuth direction, clockwise from north [deg]')
2682 dip = Float.T(
2683 default=90.,
2684 help='dip direction, downward from horizontal [deg]')
2686 def base_key(self):
2687 return Source.base_key(self) + (self.azimuth, self.dip, self.length)
2689 def get_factor(self):
2690 return self.pp
2692 def discretize_basesource(self, store, target=None):
2694 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
2696 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
2698 sa = math.sin(self.azimuth * d2r)
2699 ca = math.cos(self.azimuth * d2r)
2700 sd = math.sin(self.dip * d2r)
2701 cd = math.cos(self.dip * d2r)
2703 points = num.zeros((n, 3))
2704 points[:, 0] = self.north_shift + a * ca * cd
2705 points[:, 1] = self.east_shift + a * sa * cd
2706 points[:, 2] = self.depth + a * sd
2708 return meta.DiscretizedPorePressureSource(
2709 times=util.num_full(n, self.time),
2710 lat=self.lat,
2711 lon=self.lon,
2712 north_shifts=points[:, 0],
2713 east_shifts=points[:, 1],
2714 depths=points[:, 2],
2715 pp=num.ones(n) / n)
2718class Request(Object):
2719 '''
2720 Synthetic seismogram computation request.
2722 ::
2724 Request(**kwargs)
2725 Request(sources, targets, **kwargs)
2726 '''
2728 sources = List.T(
2729 Source.T(),
2730 help='list of sources for which to produce synthetics.')
2732 targets = List.T(
2733 Target.T(),
2734 help='list of targets for which to produce synthetics.')
2736 @classmethod
2737 def args2kwargs(cls, args):
2738 if len(args) not in (0, 2, 3):
2739 raise BadRequest('Invalid arguments.')
2741 if len(args) == 2:
2742 return dict(sources=args[0], targets=args[1])
2743 else:
2744 return {}
2746 def __init__(self, *args, **kwargs):
2747 kwargs.update(self.args2kwargs(args))
2748 sources = kwargs.pop('sources', [])
2749 targets = kwargs.pop('targets', [])
2751 if isinstance(sources, Source):
2752 sources = [sources]
2754 if isinstance(targets, Target) or isinstance(targets, StaticTarget):
2755 targets = [targets]
2757 Object.__init__(self, sources=sources, targets=targets, **kwargs)
2759 @property
2760 def targets_dynamic(self):
2761 return [t for t in self.targets if isinstance(t, Target)]
2763 @property
2764 def targets_static(self):
2765 return [t for t in self.targets if isinstance(t, StaticTarget)]
2767 @property
2768 def has_dynamic(self):
2769 return True if len(self.targets_dynamic) > 0 else False
2771 @property
2772 def has_statics(self):
2773 return True if len(self.targets_static) > 0 else False
2775 def subsources_map(self):
2776 m = defaultdict(list)
2777 for source in self.sources:
2778 m[source.base_key()].append(source)
2780 return m
2782 def subtargets_map(self):
2783 m = defaultdict(list)
2784 for target in self.targets:
2785 m[target.base_key()].append(target)
2787 return m
2789 def subrequest_map(self):
2790 ms = self.subsources_map()
2791 mt = self.subtargets_map()
2792 m = {}
2793 for (ks, ls) in ms.items():
2794 for (kt, lt) in mt.items():
2795 m[ks, kt] = (ls, lt)
2797 return m
2800class ProcessingStats(Object):
2801 t_perc_get_store_and_receiver = Float.T(default=0.)
2802 t_perc_discretize_source = Float.T(default=0.)
2803 t_perc_make_base_seismogram = Float.T(default=0.)
2804 t_perc_make_same_span = Float.T(default=0.)
2805 t_perc_post_process = Float.T(default=0.)
2806 t_perc_optimize = Float.T(default=0.)
2807 t_perc_stack = Float.T(default=0.)
2808 t_perc_static_get_store = Float.T(default=0.)
2809 t_perc_static_discretize_basesource = Float.T(default=0.)
2810 t_perc_static_sum_statics = Float.T(default=0.)
2811 t_perc_static_post_process = Float.T(default=0.)
2812 t_wallclock = Float.T(default=0.)
2813 t_cpu = Float.T(default=0.)
2814 n_read_blocks = Int.T(default=0)
2815 n_results = Int.T(default=0)
2816 n_subrequests = Int.T(default=0)
2817 n_stores = Int.T(default=0)
2818 n_records_stacked = Int.T(default=0)
2821class Response(Object):
2822 '''
2823 Resonse object to a synthetic seismogram computation request.
2824 '''
2826 request = Request.T()
2827 results_list = List.T(List.T(meta.SeismosizerResult.T()))
2828 stats = ProcessingStats.T()
2830 def pyrocko_traces(self):
2831 '''
2832 Return a list of requested
2833 :class:`~pyrocko.trace.Trace` instances.
2834 '''
2836 traces = []
2837 for results in self.results_list:
2838 for result in results:
2839 if not isinstance(result, meta.Result):
2840 continue
2841 traces.append(result.trace.pyrocko_trace())
2843 return traces
2845 def kite_scenes(self):
2846 '''
2847 Return a list of requested
2848 :class:`~kite.scenes` instances.
2849 '''
2850 kite_scenes = []
2851 for results in self.results_list:
2852 for result in results:
2853 if isinstance(result, meta.KiteSceneResult):
2854 sc = result.get_scene()
2855 kite_scenes.append(sc)
2857 return kite_scenes
2859 def static_results(self):
2860 '''
2861 Return a list of requested
2862 :class:`~pyrocko.gf.meta.StaticResult` instances.
2863 '''
2864 statics = []
2865 for results in self.results_list:
2866 for result in results:
2867 if not isinstance(result, meta.StaticResult):
2868 continue
2869 statics.append(result)
2871 return statics
2873 def iter_results(self, get='pyrocko_traces'):
2874 '''
2875 Generator function to iterate over results of request.
2877 Yields associated :py:class:`Source`,
2878 :class:`~pyrocko.gf.targets.Target`,
2879 :class:`~pyrocko.trace.Trace` instances in each iteration.
2880 '''
2882 for isource, source in enumerate(self.request.sources):
2883 for itarget, target in enumerate(self.request.targets):
2884 result = self.results_list[isource][itarget]
2885 if get == 'pyrocko_traces':
2886 yield source, target, result.trace.pyrocko_trace()
2887 elif get == 'results':
2888 yield source, target, result
2890 def snuffle(self, **kwargs):
2891 '''
2892 Open *snuffler* with requested traces.
2893 '''
2895 trace.snuffle(self.pyrocko_traces(), **kwargs)
2898class Engine(Object):
2899 '''
2900 Base class for synthetic seismogram calculators.
2901 '''
2903 def get_store_ids(self):
2904 '''
2905 Get list of available GF store IDs
2906 '''
2908 return []
2911class Rule(object):
2912 pass
2915class VectorRule(Rule):
2917 def __init__(self, quantity, differentiate=0, integrate=0):
2918 self.components = [quantity + '.' + c for c in 'ned']
2919 self.differentiate = differentiate
2920 self.integrate = integrate
2922 def required_components(self, target):
2923 n, e, d = self.components
2924 sa, ca, sd, cd = target.get_sin_cos_factors()
2926 comps = []
2927 if nonzero(ca * cd):
2928 comps.append(n)
2930 if nonzero(sa * cd):
2931 comps.append(e)
2933 if nonzero(sd):
2934 comps.append(d)
2936 return tuple(comps)
2938 def apply_(self, target, base_seismogram):
2939 n, e, d = self.components
2940 sa, ca, sd, cd = target.get_sin_cos_factors()
2942 if nonzero(ca * cd):
2943 data = base_seismogram[n].data * (ca * cd)
2944 deltat = base_seismogram[n].deltat
2945 else:
2946 data = 0.0
2948 if nonzero(sa * cd):
2949 data = data + base_seismogram[e].data * (sa * cd)
2950 deltat = base_seismogram[e].deltat
2952 if nonzero(sd):
2953 data = data + base_seismogram[d].data * sd
2954 deltat = base_seismogram[d].deltat
2956 if self.differentiate:
2957 data = util.diff_fd(self.differentiate, 4, deltat, data)
2959 if self.integrate:
2960 raise NotImplementedError('Integration is not implemented yet.')
2962 return data
2965class HorizontalVectorRule(Rule):
2967 def __init__(self, quantity, differentiate=0, integrate=0):
2968 self.components = [quantity + '.' + c for c in 'ne']
2969 self.differentiate = differentiate
2970 self.integrate = integrate
2972 def required_components(self, target):
2973 n, e = self.components
2974 sa, ca, _, _ = target.get_sin_cos_factors()
2976 comps = []
2977 if nonzero(ca):
2978 comps.append(n)
2980 if nonzero(sa):
2981 comps.append(e)
2983 return tuple(comps)
2985 def apply_(self, target, base_seismogram):
2986 n, e = self.components
2987 sa, ca, _, _ = target.get_sin_cos_factors()
2989 if nonzero(ca):
2990 data = base_seismogram[n].data * ca
2991 else:
2992 data = 0.0
2994 if nonzero(sa):
2995 data = data + base_seismogram[e].data * sa
2997 if self.differentiate:
2998 deltat = base_seismogram[e].deltat
2999 data = util.diff_fd(self.differentiate, 4, deltat, data)
3001 if self.integrate:
3002 raise NotImplementedError('Integration is not implemented yet.')
3004 return data
3007class ScalarRule(Rule):
3009 def __init__(self, quantity, differentiate=0):
3010 self.c = quantity
3012 def required_components(self, target):
3013 return (self.c, )
3015 def apply_(self, target, base_seismogram):
3016 data = base_seismogram[self.c].data.copy()
3017 deltat = base_seismogram[self.c].deltat
3018 if self.differentiate:
3019 data = util.diff_fd(self.differentiate, 4, deltat, data)
3021 return data
3024class StaticDisplacement(Rule):
3026 def required_components(self, target):
3027 return tuple(['displacement.%s' % c for c in list('ned')])
3029 def apply_(self, target, base_statics):
3030 if isinstance(target, SatelliteTarget):
3031 los_fac = target.get_los_factors()
3032 base_statics['displacement.los'] =\
3033 (los_fac[:, 0] * -base_statics['displacement.d'] +
3034 los_fac[:, 1] * base_statics['displacement.e'] +
3035 los_fac[:, 2] * base_statics['displacement.n'])
3036 return base_statics
3039channel_rules = {
3040 'displacement': [VectorRule('displacement')],
3041 'rotation': [VectorRule('rotation')],
3042 'velocity': [
3043 VectorRule('velocity'),
3044 VectorRule('displacement', differentiate=1)],
3045 'acceleration': [
3046 VectorRule('acceleration'),
3047 VectorRule('velocity', differentiate=1),
3048 VectorRule('displacement', differentiate=2)],
3049 'pore_pressure': [ScalarRule('pore_pressure')],
3050 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
3051 'darcy_velocity': [VectorRule('darcy_velocity')],
3052}
3054static_rules = {
3055 'displacement': [StaticDisplacement()]
3056}
3059class OutOfBoundsContext(Object):
3060 source = Source.T()
3061 target = Target.T()
3062 distance = Float.T()
3063 components = List.T(String.T())
3066def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
3067 dsource_cache = {}
3068 tcounters = list(range(6))
3070 store_ids = set()
3071 sources = set()
3072 targets = set()
3074 for itarget, target in enumerate(ptargets):
3075 target._id = itarget
3077 for w in work:
3078 _, _, isources, itargets = w
3080 sources.update([psources[isource] for isource in isources])
3081 targets.update([ptargets[itarget] for itarget in itargets])
3083 store_ids = set([t.store_id for t in targets])
3085 for isource, source in enumerate(psources):
3087 components = set()
3088 for itarget, target in enumerate(targets):
3089 rule = engine.get_rule(source, target)
3090 components.update(rule.required_components(target))
3092 for store_id in store_ids:
3093 store_targets = [t for t in targets if t.store_id == store_id]
3095 sample_rates = set([t.sample_rate for t in store_targets])
3096 interpolations = set([t.interpolation for t in store_targets])
3098 base_seismograms = []
3099 store_targets_out = []
3101 for samp_rate in sample_rates:
3102 for interp in interpolations:
3103 engine_targets = [
3104 t for t in store_targets if t.sample_rate == samp_rate
3105 and t.interpolation == interp]
3107 if not engine_targets:
3108 continue
3110 store_targets_out += engine_targets
3112 base_seismograms += engine.base_seismograms(
3113 source,
3114 engine_targets,
3115 components,
3116 dsource_cache,
3117 nthreads)
3119 for iseis, seismogram in enumerate(base_seismograms):
3120 for tr in seismogram.values():
3121 if tr.err != store.SeismosizerErrorEnum.SUCCESS:
3122 e = SeismosizerError(
3123 'Seismosizer failed with return code %i\n%s' % (
3124 tr.err, str(
3125 OutOfBoundsContext(
3126 source=source,
3127 target=store_targets[iseis],
3128 distance=source.distance_to(
3129 store_targets[iseis]),
3130 components=components))))
3131 raise e
3133 for seismogram, target in zip(base_seismograms, store_targets_out):
3135 try:
3136 result = engine._post_process_dynamic(
3137 seismogram, source, target)
3138 except SeismosizerError as e:
3139 result = e
3141 yield (isource, target._id, result), tcounters
3144def process_dynamic(work, psources, ptargets, engine, nthreads=0):
3145 dsource_cache = {}
3147 for w in work:
3148 _, _, isources, itargets = w
3150 sources = [psources[isource] for isource in isources]
3151 targets = [ptargets[itarget] for itarget in itargets]
3153 components = set()
3154 for target in targets:
3155 rule = engine.get_rule(sources[0], target)
3156 components.update(rule.required_components(target))
3158 for isource, source in zip(isources, sources):
3159 for itarget, target in zip(itargets, targets):
3161 try:
3162 base_seismogram, tcounters = engine.base_seismogram(
3163 source, target, components, dsource_cache, nthreads)
3164 except meta.OutOfBounds as e:
3165 e.context = OutOfBoundsContext(
3166 source=sources[0],
3167 target=targets[0],
3168 distance=sources[0].distance_to(targets[0]),
3169 components=components)
3170 raise
3172 n_records_stacked = 0
3173 t_optimize = 0.0
3174 t_stack = 0.0
3176 for _, tr in base_seismogram.items():
3177 n_records_stacked += tr.n_records_stacked
3178 t_optimize += tr.t_optimize
3179 t_stack += tr.t_stack
3181 try:
3182 result = engine._post_process_dynamic(
3183 base_seismogram, source, target)
3184 result.n_records_stacked = n_records_stacked
3185 result.n_shared_stacking = len(sources) *\
3186 len(targets)
3187 result.t_optimize = t_optimize
3188 result.t_stack = t_stack
3189 except SeismosizerError as e:
3190 result = e
3192 tcounters.append(xtime())
3193 yield (isource, itarget, result), tcounters
3196def process_static(work, psources, ptargets, engine, nthreads=0):
3197 for w in work:
3198 _, _, isources, itargets = w
3200 sources = [psources[isource] for isource in isources]
3201 targets = [ptargets[itarget] for itarget in itargets]
3203 for isource, source in zip(isources, sources):
3204 for itarget, target in zip(itargets, targets):
3205 components = engine.get_rule(source, target)\
3206 .required_components(target)
3208 try:
3209 base_statics, tcounters = engine.base_statics(
3210 source, target, components, nthreads)
3211 except meta.OutOfBounds as e:
3212 e.context = OutOfBoundsContext(
3213 source=sources[0],
3214 target=targets[0],
3215 distance=float('nan'),
3216 components=components)
3217 raise
3218 result = engine._post_process_statics(
3219 base_statics, source, target)
3220 tcounters.append(xtime())
3222 yield (isource, itarget, result), tcounters
3225class LocalEngine(Engine):
3226 '''
3227 Offline synthetic seismogram calculator.
3229 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and
3230 :py:attr:`store_dirs` with paths set in environment variables
3231 GF_STORE_SUPERDIRS and GF_STORE_DIRS.
3232 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and
3233 :py:attr:`store_dirs` with paths set in the user's config file.
3235 The config file can be found at :file:`~/.pyrocko/config.pf`
3237 .. code-block :: python
3239 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
3240 gf_store_superdirs: ['/home/pyrocko/gf_stores/']
3241 '''
3243 store_superdirs = List.T(
3244 String.T(),
3245 help='directories which are searched for Green\'s function stores')
3247 store_dirs = List.T(
3248 String.T(),
3249 help='additional individual Green\'s function store directories')
3251 default_store_id = String.T(
3252 optional=True,
3253 help='default store ID to be used when a request does not provide '
3254 'one')
3256 def __init__(self, **kwargs):
3257 use_env = kwargs.pop('use_env', False)
3258 use_config = kwargs.pop('use_config', False)
3259 Engine.__init__(self, **kwargs)
3260 if use_env:
3261 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
3262 env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
3263 if env_store_superdirs:
3264 self.store_superdirs.extend(env_store_superdirs.split(':'))
3266 if env_store_dirs:
3267 self.store_dirs.extend(env_store_dirs.split(':'))
3269 if use_config:
3270 c = config.config()
3271 self.store_superdirs.extend(c.gf_store_superdirs)
3272 self.store_dirs.extend(c.gf_store_dirs)
3274 self._check_store_dirs_type()
3275 self._id_to_store_dir = {}
3276 self._open_stores = {}
3277 self._effective_default_store_id = None
3279 def _check_store_dirs_type(self):
3280 for sdir in ['store_dirs', 'store_superdirs']:
3281 if not isinstance(self.__getattribute__(sdir), list):
3282 raise TypeError("{} of {} is not of type list".format(
3283 sdir, self.__class__.__name__))
3285 def _get_store_id(self, store_dir):
3286 store_ = store.Store(store_dir)
3287 store_id = store_.config.id
3288 store_.close()
3289 return store_id
3291 def _looks_like_store_dir(self, store_dir):
3292 return os.path.isdir(store_dir) and \
3293 all(os.path.isfile(pjoin(store_dir, x)) for x in
3294 ('index', 'traces', 'config'))
3296 def iter_store_dirs(self):
3297 store_dirs = set()
3298 for d in self.store_superdirs:
3299 if not os.path.exists(d):
3300 logger.warning('store_superdir not available: %s' % d)
3301 continue
3303 for entry in os.listdir(d):
3304 store_dir = os.path.realpath(pjoin(d, entry))
3305 if self._looks_like_store_dir(store_dir):
3306 store_dirs.add(store_dir)
3308 for store_dir in self.store_dirs:
3309 store_dirs.add(os.path.realpath(store_dir))
3311 return store_dirs
3313 def _scan_stores(self):
3314 for store_dir in self.iter_store_dirs():
3315 store_id = self._get_store_id(store_dir)
3316 if store_id not in self._id_to_store_dir:
3317 self._id_to_store_dir[store_id] = store_dir
3318 else:
3319 if store_dir != self._id_to_store_dir[store_id]:
3320 raise DuplicateStoreId(
3321 'GF store ID %s is used in (at least) two '
3322 'different stores. Locations are: %s and %s' %
3323 (store_id, self._id_to_store_dir[store_id], store_dir))
3325 def get_store_dir(self, store_id):
3326 '''
3327 Lookup directory given a GF store ID.
3328 '''
3330 if store_id not in self._id_to_store_dir:
3331 self._scan_stores()
3333 if store_id not in self._id_to_store_dir:
3334 raise NoSuchStore(store_id, self.iter_store_dirs())
3336 return self._id_to_store_dir[store_id]
3338 def get_store_ids(self):
3339 '''
3340 Get list of available store IDs.
3341 '''
3343 self._scan_stores()
3344 return sorted(self._id_to_store_dir.keys())
3346 def effective_default_store_id(self):
3347 if self._effective_default_store_id is None:
3348 if self.default_store_id is None:
3349 store_ids = self.get_store_ids()
3350 if len(store_ids) == 1:
3351 self._effective_default_store_id = self.get_store_ids()[0]
3352 else:
3353 raise NoDefaultStoreSet()
3354 else:
3355 self._effective_default_store_id = self.default_store_id
3357 return self._effective_default_store_id
3359 def get_store(self, store_id=None):
3360 '''
3361 Get a store from the engine.
3363 :param store_id: identifier of the store (optional)
3364 :returns: :py:class:`~pyrocko.gf.store.Store` object
3366 If no ``store_id`` is provided the store
3367 associated with the :py:gattr:`default_store_id` is returned.
3368 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
3369 undefined.
3370 '''
3372 if store_id is None:
3373 store_id = self.effective_default_store_id()
3375 if store_id not in self._open_stores:
3376 store_dir = self.get_store_dir(store_id)
3377 self._open_stores[store_id] = store.Store(store_dir)
3379 return self._open_stores[store_id]
3381 def get_store_config(self, store_id):
3382 store = self.get_store(store_id)
3383 return store.config
3385 def get_store_extra(self, store_id, key):
3386 store = self.get_store(store_id)
3387 return store.get_extra(key)
3389 def close_cashed_stores(self):
3390 '''
3391 Close and remove ids from cashed stores.
3392 '''
3393 store_ids = []
3394 for store_id, store_ in self._open_stores.items():
3395 store_.close()
3396 store_ids.append(store_id)
3398 for store_id in store_ids:
3399 self._open_stores.pop(store_id)
3401 def get_rule(self, source, target):
3402 cprovided = self.get_store(target.store_id).get_provided_components()
3404 if isinstance(target, StaticTarget):
3405 quantity = target.quantity
3406 available_rules = static_rules
3407 elif isinstance(target, Target):
3408 quantity = target.effective_quantity()
3409 available_rules = channel_rules
3411 try:
3412 for rule in available_rules[quantity]:
3413 cneeded = rule.required_components(target)
3414 if all(c in cprovided for c in cneeded):
3415 return rule
3417 except KeyError:
3418 pass
3420 raise BadRequest(
3421 'No rule to calculate "%s" with GFs from store "%s" '
3422 'for source model "%s".' % (
3423 target.effective_quantity(),
3424 target.store_id,
3425 source.__class__.__name__))
3427 def _cached_discretize_basesource(self, source, store, cache, target):
3428 if (source, store) not in cache:
3429 cache[source, store] = source.discretize_basesource(store, target)
3431 return cache[source, store]
3433 def base_seismograms(self, source, targets, components, dsource_cache,
3434 nthreads=0):
3436 target = targets[0]
3438 interp = set([t.interpolation for t in targets])
3439 if len(interp) > 1:
3440 raise BadRequest('Targets have different interpolation schemes.')
3442 rates = set([t.sample_rate for t in targets])
3443 if len(rates) > 1:
3444 raise BadRequest('Targets have different sample rates.')
3446 store_ = self.get_store(target.store_id)
3447 receivers = [t.receiver(store_) for t in targets]
3449 if target.sample_rate is not None:
3450 deltat = 1. / target.sample_rate
3451 rate = target.sample_rate
3452 else:
3453 deltat = None
3454 rate = store_.config.sample_rate
3456 tmin = num.fromiter(
3457 (t.tmin for t in targets), dtype=float, count=len(targets))
3458 tmax = num.fromiter(
3459 (t.tmax for t in targets), dtype=float, count=len(targets))
3461 itmin = num.floor(tmin * rate).astype(num.int64)
3462 itmax = num.ceil(tmax * rate).astype(num.int64)
3463 nsamples = itmax - itmin + 1
3465 mask = num.isnan(tmin)
3466 itmin[mask] = 0
3467 nsamples[mask] = -1
3469 base_source = self._cached_discretize_basesource(
3470 source, store_, dsource_cache, target)
3472 base_seismograms = store_.calc_seismograms(
3473 base_source, receivers, components,
3474 deltat=deltat,
3475 itmin=itmin, nsamples=nsamples,
3476 interpolation=target.interpolation,
3477 optimization=target.optimization,
3478 nthreads=nthreads)
3480 for i, base_seismogram in enumerate(base_seismograms):
3481 base_seismograms[i] = store.make_same_span(base_seismogram)
3483 return base_seismograms
3485 def base_seismogram(self, source, target, components, dsource_cache,
3486 nthreads):
3488 tcounters = [xtime()]
3490 store_ = self.get_store(target.store_id)
3491 receiver = target.receiver(store_)
3493 if target.tmin and target.tmax is not None:
3494 rate = store_.config.sample_rate
3495 itmin = int(num.floor(target.tmin * rate))
3496 itmax = int(num.ceil(target.tmax * rate))
3497 nsamples = itmax - itmin + 1
3498 else:
3499 itmin = None
3500 nsamples = None
3502 tcounters.append(xtime())
3503 base_source = self._cached_discretize_basesource(
3504 source, store_, dsource_cache, target)
3506 tcounters.append(xtime())
3508 if target.sample_rate is not None:
3509 deltat = 1. / target.sample_rate
3510 else:
3511 deltat = None
3513 base_seismogram = store_.seismogram(
3514 base_source, receiver, components,
3515 deltat=deltat,
3516 itmin=itmin, nsamples=nsamples,
3517 interpolation=target.interpolation,
3518 optimization=target.optimization,
3519 nthreads=nthreads)
3521 tcounters.append(xtime())
3523 base_seismogram = store.make_same_span(base_seismogram)
3525 tcounters.append(xtime())
3527 return base_seismogram, tcounters
3529 def base_statics(self, source, target, components, nthreads):
3530 tcounters = [xtime()]
3531 store_ = self.get_store(target.store_id)
3533 if target.tsnapshot is not None:
3534 rate = store_.config.sample_rate
3535 itsnapshot = int(num.floor(target.tsnapshot * rate))
3536 else:
3537 itsnapshot = None
3538 tcounters.append(xtime())
3540 base_source = source.discretize_basesource(store_, target=target)
3542 tcounters.append(xtime())
3544 base_statics = store_.statics(
3545 base_source,
3546 target,
3547 itsnapshot,
3548 components,
3549 target.interpolation,
3550 nthreads)
3552 tcounters.append(xtime())
3554 return base_statics, tcounters
3556 def _post_process_dynamic(self, base_seismogram, source, target):
3557 base_any = next(iter(base_seismogram.values()))
3558 deltat = base_any.deltat
3559 itmin = base_any.itmin
3561 rule = self.get_rule(source, target)
3562 data = rule.apply_(target, base_seismogram)
3564 factor = source.get_factor() * target.get_factor()
3565 if factor != 1.0:
3566 data = data * factor
3568 stf = source.effective_stf_post()
3570 times, amplitudes = stf.discretize_t(
3571 deltat, 0.0)
3573 # repeat end point to prevent boundary effects
3574 padded_data = num.empty(data.size + amplitudes.size, dtype=float)
3575 padded_data[:data.size] = data
3576 padded_data[data.size:] = data[-1]
3577 data = num.convolve(amplitudes, padded_data)
3579 tmin = itmin * deltat + times[0]
3581 tr = meta.SeismosizerTrace(
3582 codes=target.codes,
3583 data=data[:-amplitudes.size],
3584 deltat=deltat,
3585 tmin=tmin)
3587 return target.post_process(self, source, tr)
3589 def _post_process_statics(self, base_statics, source, starget):
3590 rule = self.get_rule(source, starget)
3591 data = rule.apply_(starget, base_statics)
3593 factor = source.get_factor()
3594 if factor != 1.0:
3595 for v in data.values():
3596 v *= factor
3598 return starget.post_process(self, source, base_statics)
3600 def process(self, *args, **kwargs):
3601 '''
3602 Process a request.
3604 ::
3606 process(**kwargs)
3607 process(request, **kwargs)
3608 process(sources, targets, **kwargs)
3610 The request can be given a a :py:class:`Request` object, or such an
3611 object is created using ``Request(**kwargs)`` for convenience.
3613 :returns: :py:class:`Response` object
3614 '''
3616 if len(args) not in (0, 1, 2):
3617 raise BadRequest('Invalid arguments.')
3619 if len(args) == 1:
3620 kwargs['request'] = args[0]
3622 elif len(args) == 2:
3623 kwargs.update(Request.args2kwargs(args))
3625 request = kwargs.pop('request', None)
3626 status_callback = kwargs.pop('status_callback', None)
3627 calc_timeseries = kwargs.pop('calc_timeseries', True)
3629 nprocs = kwargs.pop('nprocs', None)
3630 nthreads = kwargs.pop('nthreads', 1)
3631 if nprocs is not None:
3632 nthreads = nprocs
3634 if request is None:
3635 request = Request(**kwargs)
3637 if resource:
3638 rs0 = resource.getrusage(resource.RUSAGE_SELF)
3639 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
3640 tt0 = xtime()
3642 # make sure stores are open before fork()
3643 store_ids = set(target.store_id for target in request.targets)
3644 for store_id in store_ids:
3645 self.get_store(store_id)
3647 source_index = dict((x, i) for (i, x) in
3648 enumerate(request.sources))
3649 target_index = dict((x, i) for (i, x) in
3650 enumerate(request.targets))
3652 m = request.subrequest_map()
3654 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
3655 results_list = []
3657 for i in range(len(request.sources)):
3658 results_list.append([None] * len(request.targets))
3660 tcounters_dyn_list = []
3661 tcounters_static_list = []
3662 nsub = len(skeys)
3663 isub = 0
3665 # Processing dynamic targets through
3666 # parimap(process_subrequest_dynamic)
3668 if calc_timeseries:
3669 _process_dynamic = process_dynamic_timeseries
3670 else:
3671 _process_dynamic = process_dynamic
3673 if request.has_dynamic:
3674 work_dynamic = [
3675 (i, nsub,
3676 [source_index[source] for source in m[k][0]],
3677 [target_index[target] for target in m[k][1]
3678 if not isinstance(target, StaticTarget)])
3679 for (i, k) in enumerate(skeys)]
3681 for ii_results, tcounters_dyn in _process_dynamic(
3682 work_dynamic, request.sources, request.targets, self,
3683 nthreads):
3685 tcounters_dyn_list.append(num.diff(tcounters_dyn))
3686 isource, itarget, result = ii_results
3687 results_list[isource][itarget] = result
3689 if status_callback:
3690 status_callback(isub, nsub)
3692 isub += 1
3694 # Processing static targets through process_static
3695 if request.has_statics:
3696 work_static = [
3697 (i, nsub,
3698 [source_index[source] for source in m[k][0]],
3699 [target_index[target] for target in m[k][1]
3700 if isinstance(target, StaticTarget)])
3701 for (i, k) in enumerate(skeys)]
3703 for ii_results, tcounters_static in process_static(
3704 work_static, request.sources, request.targets, self,
3705 nthreads=nthreads):
3707 tcounters_static_list.append(num.diff(tcounters_static))
3708 isource, itarget, result = ii_results
3709 results_list[isource][itarget] = result
3711 if status_callback:
3712 status_callback(isub, nsub)
3714 isub += 1
3716 if status_callback:
3717 status_callback(nsub, nsub)
3719 tt1 = time.time()
3720 if resource:
3721 rs1 = resource.getrusage(resource.RUSAGE_SELF)
3722 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
3724 s = ProcessingStats()
3726 if request.has_dynamic:
3727 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
3728 t_dyn = float(num.sum(tcumu_dyn))
3729 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
3730 (s.t_perc_get_store_and_receiver,
3731 s.t_perc_discretize_source,
3732 s.t_perc_make_base_seismogram,
3733 s.t_perc_make_same_span,
3734 s.t_perc_post_process) = perc_dyn
3735 else:
3736 t_dyn = 0.
3738 if request.has_statics:
3739 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
3740 t_static = num.sum(tcumu_static)
3741 perc_static = map(float, tcumu_static / t_static * 100.)
3742 (s.t_perc_static_get_store,
3743 s.t_perc_static_discretize_basesource,
3744 s.t_perc_static_sum_statics,
3745 s.t_perc_static_post_process) = perc_static
3747 s.t_wallclock = tt1 - tt0
3748 if resource:
3749 s.t_cpu = (
3750 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
3751 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
3752 s.n_read_blocks = (
3753 (rs1.ru_inblock + rc1.ru_inblock) -
3754 (rs0.ru_inblock + rc0.ru_inblock))
3756 n_records_stacked = 0.
3757 for results in results_list:
3758 for result in results:
3759 if not isinstance(result, meta.Result):
3760 continue
3761 shr = float(result.n_shared_stacking)
3762 n_records_stacked += result.n_records_stacked / shr
3763 s.t_perc_optimize += result.t_optimize / shr
3764 s.t_perc_stack += result.t_stack / shr
3765 s.n_records_stacked = int(n_records_stacked)
3766 if t_dyn != 0.:
3767 s.t_perc_optimize /= t_dyn * 100
3768 s.t_perc_stack /= t_dyn * 100
3770 return Response(
3771 request=request,
3772 results_list=results_list,
3773 stats=s)
3776class RemoteEngine(Engine):
3777 '''
3778 Client for remote synthetic seismogram calculator.
3779 '''
3781 site = String.T(default=ws.g_default_site, optional=True)
3782 url = String.T(default=ws.g_url, optional=True)
3784 def process(self, request=None, status_callback=None, **kwargs):
3786 if request is None:
3787 request = Request(**kwargs)
3789 return ws.seismosizer(url=self.url, site=self.site, request=request)
3792g_engine = None
3795def get_engine(store_superdirs=[]):
3796 global g_engine
3797 if g_engine is None:
3798 g_engine = LocalEngine(use_env=True, use_config=True)
3800 for d in store_superdirs:
3801 if d not in g_engine.store_superdirs:
3802 g_engine.store_superdirs.append(d)
3804 return g_engine
3807class SourceGroup(Object):
3809 def __getattr__(self, k):
3810 return num.fromiter((getattr(s, k) for s in self),
3811 dtype=float)
3813 def __iter__(self):
3814 raise NotImplementedError(
3815 'This method should be implemented in subclass.')
3817 def __len__(self):
3818 raise NotImplementedError(
3819 'This method should be implemented in subclass.')
3822class SourceList(SourceGroup):
3823 sources = List.T(Source.T())
3825 def append(self, s):
3826 self.sources.append(s)
3828 def __iter__(self):
3829 return iter(self.sources)
3831 def __len__(self):
3832 return len(self.sources)
3835class SourceGrid(SourceGroup):
3837 base = Source.T()
3838 variables = Dict.T(String.T(), Range.T())
3839 order = List.T(String.T())
3841 def __len__(self):
3842 n = 1
3843 for (k, v) in self.make_coords(self.base):
3844 n *= len(list(v))
3846 return n
3848 def __iter__(self):
3849 for items in permudef(self.make_coords(self.base)):
3850 s = self.base.clone(**{k: v for (k, v) in items})
3851 s.regularize()
3852 yield s
3854 def ordered_params(self):
3855 ks = list(self.variables.keys())
3856 for k in self.order + list(self.base.keys()):
3857 if k in ks:
3858 yield k
3859 ks.remove(k)
3860 if ks:
3861 raise Exception('Invalid parameter "%s" for source type "%s".' %
3862 (ks[0], self.base.__class__.__name__))
3864 def make_coords(self, base):
3865 return [(param, self.variables[param].make(base=base[param]))
3866 for param in self.ordered_params()]
3869source_classes = [
3870 Source,
3871 SourceWithMagnitude,
3872 SourceWithDerivedMagnitude,
3873 ExplosionSource,
3874 RectangularExplosionSource,
3875 DCSource,
3876 CLVDSource,
3877 VLVDSource,
3878 MTSource,
3879 RectangularSource,
3880 DoubleDCSource,
3881 RingfaultSource,
3882 CombiSource,
3883 SFSource,
3884 PorePressurePointSource,
3885 PorePressureLineSource,
3886]
3888stf_classes = [
3889 STF,
3890 BoxcarSTF,
3891 TriangularSTF,
3892 HalfSinusoidSTF,
3893 ResonatorSTF,
3894]
3896__all__ = '''
3897SeismosizerError
3898BadRequest
3899NoSuchStore
3900DerivedMagnitudeError
3901STFMode
3902'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
3903Request
3904ProcessingStats
3905Response
3906Engine
3907LocalEngine
3908RemoteEngine
3909source_classes
3910get_engine
3911Range
3912SourceGroup
3913SourceList
3914SourceGrid
3915map_anchor
3916'''.split()