1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
10.. _coordinate-system-names:
12Coordinate systems
13..................
15Coordinate system names commonly used in source models.
17================= ============================================
18Name Description
19================= ============================================
20``'xyz'`` northing, easting, depth in [m]
21``'xy'`` northing, easting in [m]
22``'latlon'`` latitude, longitude in [deg]
23``'lonlat'`` longitude, latitude in [deg]
24``'latlondepth'`` latitude, longitude in [deg], depth in [m]
25================= ============================================
26'''
28from collections import defaultdict
29from functools import cmp_to_key
30import time
31import math
32import os
33import re
34import logging
35try:
36 import resource
37except ImportError:
38 resource = None
39from hashlib import sha1
41import numpy as num
42from scipy.interpolate import RegularGridInterpolator
44from pyrocko.guts import (Object, Float, String, StringChoice, List,
45 Timestamp, Int, SObject, ArgumentError, Dict,
46 ValidationError, Bool)
47from pyrocko.guts_array import Array
49from pyrocko import moment_tensor as pmt
50from pyrocko import trace, util, config, model, eikonal_ext
51from pyrocko.orthodrome import ne_to_latlon
52from pyrocko.model import Location
53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \
54 okada_ext, invert_fault_dislocations_bem
56from . import meta, store, ws
57from .tractions import TractionField, DirectedTractions
58from .targets import Target, StaticTarget, SatelliteTarget
60pjoin = os.path.join
62guts_prefix = 'pf'
64d2r = math.pi / 180.
65r2d = 180. / math.pi
66km = 1e3
68logger = logging.getLogger('pyrocko.gf.seismosizer')
71def cmp_none_aware(a, b):
72 if isinstance(a, tuple) and isinstance(b, tuple):
73 for xa, xb in zip(a, b):
74 rv = cmp_none_aware(xa, xb)
75 if rv != 0:
76 return rv
78 return 0
80 anone = a is None
81 bnone = b is None
83 if anone and bnone:
84 return 0
86 if anone:
87 return -1
89 if bnone:
90 return 1
92 return bool(a > b) - bool(a < b)
95def xtime():
96 return time.time()
99class SeismosizerError(Exception):
100 pass
103class BadRequest(SeismosizerError):
104 pass
107class DuplicateStoreId(Exception):
108 pass
111class NoDefaultStoreSet(Exception):
112 pass
115class ConversionError(Exception):
116 pass
119class NoSuchStore(BadRequest):
121 def __init__(self, store_id=None, dirs=None):
122 BadRequest.__init__(self)
123 self.store_id = store_id
124 self.dirs = dirs
126 def __str__(self):
127 if self.store_id is not None:
128 rstr = 'no GF store with id "%s" found.' % self.store_id
129 else:
130 rstr = 'GF store not found.'
132 if self.dirs is not None:
133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
134 return rstr
137def ufloat(s):
138 units = {
139 'k': 1e3,
140 'M': 1e6,
141 }
143 factor = 1.0
144 if s and s[-1] in units:
145 factor = units[s[-1]]
146 s = s[:-1]
147 if not s:
148 raise ValueError('unit without a number: \'%s\'' % s)
150 return float(s) * factor
153def ufloat_or_none(s):
154 if s:
155 return ufloat(s)
156 else:
157 return None
160def int_or_none(s):
161 if s:
162 return int(s)
163 else:
164 return None
167def nonzero(x, eps=1e-15):
168 return abs(x) > eps
171def permudef(ln, j=0):
172 if j < len(ln):
173 k, v = ln[j]
174 for y in v:
175 ln[j] = k, y
176 for s in permudef(ln, j + 1):
177 yield s
179 ln[j] = k, v
180 return
181 else:
182 yield ln
185def arr(x):
186 return num.atleast_1d(num.asarray(x))
189def discretize_rect_source(deltas, deltat, time, north, east, depth,
190 strike, dip, length, width,
191 anchor, velocity=None, stf=None,
192 nucleation_x=None, nucleation_y=None,
193 decimation_factor=1, pointsonly=False,
194 plane_coords=False,
195 aggressive_oversampling=False):
197 if stf is None:
198 stf = STF()
200 if not velocity and not pointsonly:
201 raise AttributeError('velocity is required in time mode')
203 mindeltagf = float(num.min(deltas))
204 if velocity:
205 mindeltagf = min(mindeltagf, deltat * velocity)
207 ln = length
208 wd = width
210 if aggressive_oversampling:
211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
213 else:
214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
217 n = int(nl * nw)
219 dl = ln / nl
220 dw = wd / nw
222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
225 points = num.zeros((n, 3))
226 points[:, 0] = num.tile(xl, nw)
227 points[:, 1] = num.repeat(xw, nl)
229 if nucleation_x is not None:
230 dist_x = num.abs(nucleation_x - points[:, 0])
231 else:
232 dist_x = num.zeros(n)
234 if nucleation_y is not None:
235 dist_y = num.abs(nucleation_y - points[:, 1])
236 else:
237 dist_y = num.zeros(n)
239 dist = num.sqrt(dist_x**2 + dist_y**2)
240 times = dist / velocity
242 anch_x, anch_y = map_anchor[anchor]
244 points[:, 0] -= anch_x * 0.5 * length
245 points[:, 1] -= anch_y * 0.5 * width
247 if plane_coords:
248 return points, dl, dw, nl, nw
250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
251 points = num.dot(rotmat.T, points.T).T
253 points[:, 0] += north
254 points[:, 1] += east
255 points[:, 2] += depth
257 if pointsonly:
258 return points, dl, dw, nl, nw
260 xtau, amplitudes = stf.discretize_t(deltat, time)
261 nt = xtau.size
263 points2 = num.repeat(points, nt, axis=0)
264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel()
265 amplitudes2 = num.tile(amplitudes, n)
267 return points2, times2, amplitudes2, dl, dw, nl, nw
270def check_rect_source_discretisation(points2, nl, nw, store):
271 # We assume a non-rotated fault plane
272 N_CRITICAL = 8
273 points = points2.T.reshape((3, nl, nw))
274 if points.size <= N_CRITICAL:
275 logger.warning('RectangularSource is defined by only %d sub-sources!'
276 % points.size)
277 return True
279 distances = num.sqrt(
280 (points[0, 0, :] - points[0, 1, :])**2 +
281 (points[1, 0, :] - points[1, 1, :])**2 +
282 (points[2, 0, :] - points[2, 1, :])**2)
284 depths = points[2, 0, :]
285 vs_profile = store.config.get_vs(
286 lat=0., lon=0.,
287 points=num.repeat(depths[:, num.newaxis], 3, axis=1),
288 interpolation='multilinear')
290 min_wavelength = vs_profile * (store.config.deltat * 2)
291 if not num.all(min_wavelength > distances / 2):
292 return False
293 return True
296def outline_rect_source(strike, dip, length, width, anchor):
297 ln = length
298 wd = width
299 points = num.array(
300 [[-0.5 * ln, -0.5 * wd, 0.],
301 [0.5 * ln, -0.5 * wd, 0.],
302 [0.5 * ln, 0.5 * wd, 0.],
303 [-0.5 * ln, 0.5 * wd, 0.],
304 [-0.5 * ln, -0.5 * wd, 0.]])
306 anch_x, anch_y = map_anchor[anchor]
307 points[:, 0] -= anch_x * 0.5 * length
308 points[:, 1] -= anch_y * 0.5 * width
310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
312 return num.dot(rotmat.T, points.T).T
315def from_plane_coords(
316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
317 lat=0., lon=0.,
318 north_shift=0, east_shift=0,
319 anchor='top', cs='xy'):
321 ln = length
322 wd = width
323 x_abs = []
324 y_abs = []
325 if not isinstance(x_plane_coords, list):
326 x_plane_coords = [x_plane_coords]
327 y_plane_coords = [y_plane_coords]
329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
330 points = num.array(
331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]])
337 anch_x, anch_y = map_anchor[anchor]
338 points[:, 0] -= anch_x * 0.5 * length
339 points[:, 1] -= anch_y * 0.5 * width
341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
343 points = num.dot(rotmat.T, points.T).T
344 points[:, 0] += north_shift
345 points[:, 1] += east_shift
346 points[:, 2] += depth
347 if cs in ('latlon', 'lonlat'):
348 latlon = ne_to_latlon(lat, lon,
349 points[:, 0], points[:, 1])
350 latlon = num.array(latlon).T
351 x_abs.append(latlon[1:2, 1])
352 y_abs.append(latlon[2:3, 0])
353 if cs == 'xy':
354 x_abs.append(points[1:2, 1])
355 y_abs.append(points[2:3, 0])
357 if cs == 'lonlat':
358 return y_abs, x_abs
359 else:
360 return x_abs, y_abs
363def points_on_rect_source(
364 strike, dip, length, width, anchor,
365 discretized_basesource=None, points_x=None, points_y=None):
367 ln = length
368 wd = width
370 if isinstance(points_x, list) or isinstance(points_x, float):
371 points_x = num.array([points_x])
372 if isinstance(points_y, list) or isinstance(points_y, float):
373 points_y = num.array([points_y])
375 if discretized_basesource:
376 ds = discretized_basesource
378 nl_patches = ds.nl + 1
379 nw_patches = ds.nw + 1
381 npoints = nl_patches * nw_patches
382 points = num.zeros((npoints, 3))
383 ln_patches = num.array([il for il in range(nl_patches)])
384 wd_patches = num.array([iw for iw in range(nw_patches)])
386 points_ln =\
387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1
388 points_wd =\
389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1
391 for il in range(nl_patches):
392 for iw in range(nw_patches):
393 points[il * nw_patches + iw, :] = num.array([
394 points_ln[il] * ln * 0.5,
395 points_wd[iw] * wd * 0.5, 0.0])
397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0:
398 points = num.zeros(shape=((len(points_x), 3)))
399 for i, (x, y) in enumerate(zip(points_x, points_y)):
400 points[i, :] = num.array(
401 [x * 0.5 * ln, y * 0.5 * wd, 0.0])
403 anch_x, anch_y = map_anchor[anchor]
405 points[:, 0] -= anch_x * 0.5 * ln
406 points[:, 1] -= anch_y * 0.5 * wd
408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
410 return num.dot(rotmat.T, points.T).T
413class InvalidGridDef(Exception):
414 pass
417class Range(SObject):
418 '''
419 Convenient range specification.
421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
423 Range('0 .. 10k : 1k')
424 Range(start=0., stop=10e3, step=1e3)
425 Range(0, 10e3, 1e3)
426 Range('0 .. 10k @ 11')
427 Range(start=0., stop=10*km, n=11)
429 Range(0, 10e3, n=11)
430 Range(values=[x*1e3 for x in range(11)])
432 Depending on the use context, it can be possible to omit any part of the
433 specification. E.g. in the context of extracting a subset of an already
434 existing range, the existing range's specification values would be filled
435 in where missing.
437 The values are distributed with equal spacing, unless the ``spacing``
438 argument is modified. The values can be created offset or relative to an
439 external base value with the ``relative`` argument if the use context
440 supports this.
442 The range specification can be expressed with a short string
443 representation::
445 'start .. stop @ num | spacing, relative'
446 'start .. stop : step | spacing, relative'
448 most parts of the expression can be omitted if not needed. Whitespace is
449 allowed for readability but can also be omitted.
450 '''
452 start = Float.T(optional=True)
453 stop = Float.T(optional=True)
454 step = Float.T(optional=True)
455 n = Int.T(optional=True)
456 values = Array.T(optional=True, dtype=float, shape=(None,))
458 spacing = StringChoice.T(
459 choices=['lin', 'log', 'symlog'],
460 default='lin',
461 optional=True)
463 relative = StringChoice.T(
464 choices=['', 'add', 'mult'],
465 default='',
466 optional=True)
468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
470 r'(\|(?P<stuff>.+))?$')
472 def __init__(self, *args, **kwargs):
473 d = {}
474 if len(args) == 1:
475 d = self.parse(args[0])
476 elif len(args) in (2, 3):
477 d['start'], d['stop'] = [float(x) for x in args[:2]]
478 if len(args) == 3:
479 d['step'] = float(args[2])
481 for k, v in kwargs.items():
482 if k in d:
483 raise ArgumentError('%s specified more than once' % k)
485 d[k] = v
487 SObject.__init__(self, **d)
489 def __str__(self):
490 def sfloat(x):
491 if x is not None:
492 return '%g' % x
493 else:
494 return ''
496 if self.values:
497 return ','.join('%g' % x for x in self.values)
499 if self.start is None and self.stop is None:
500 s0 = ''
501 else:
502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
504 s1 = ''
505 if self.step is not None:
506 s1 = [' : %g', ':%g'][s0 == ''] % self.step
507 elif self.n is not None:
508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n
510 if self.spacing == 'lin' and self.relative == '':
511 s2 = ''
512 else:
513 x = []
514 if self.spacing != 'lin':
515 x.append(self.spacing)
517 if self.relative != '':
518 x.append(self.relative)
520 s2 = ' | %s' % ','.join(x)
522 return s0 + s1 + s2
524 @classmethod
525 def parse(cls, s):
526 s = re.sub(r'\s+', '', s)
527 m = cls.pattern.match(s)
528 if not m:
529 try:
530 vals = [ufloat(x) for x in s.split(',')]
531 except Exception:
532 raise InvalidGridDef(
533 '"%s" is not a valid range specification' % s)
535 return dict(values=num.array(vals, dtype=float))
537 d = m.groupdict()
538 try:
539 start = ufloat_or_none(d['start'])
540 stop = ufloat_or_none(d['stop'])
541 step = ufloat_or_none(d['step'])
542 n = int_or_none(d['n'])
543 except Exception:
544 raise InvalidGridDef(
545 '"%s" is not a valid range specification' % s)
547 spacing = 'lin'
548 relative = ''
550 if d['stuff'] is not None:
551 t = d['stuff'].split(',')
552 for x in t:
553 if x in cls.spacing.choices:
554 spacing = x
555 elif x and x in cls.relative.choices:
556 relative = x
557 else:
558 raise InvalidGridDef(
559 '"%s" is not a valid range specification' % s)
561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
562 relative=relative)
564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
565 if self.values:
566 return self.values
568 start = self.start
569 stop = self.stop
570 step = self.step
571 n = self.n
573 swap = step is not None and step < 0.
574 if start is None:
575 start = [mi, ma][swap]
576 if stop is None:
577 stop = [ma, mi][swap]
578 if step is None and inc is not None:
579 step = [inc, -inc][ma < mi]
581 if start is None or stop is None:
582 raise InvalidGridDef(
583 'Cannot use range specification "%s" without start '
584 'and stop in this context' % self)
586 if step is None and n is None:
587 step = stop - start
589 if n is None:
590 if (step < 0) != (stop - start < 0):
591 raise InvalidGridDef(
592 'Range specification "%s" has inconsistent ordering '
593 '(step < 0 => stop > start)' % self)
595 n = int(round((stop - start) / step)) + 1
596 stop2 = start + (n - 1) * step
597 if abs(stop - stop2) > eps:
598 n = int(math.floor((stop - start) / step)) + 1
599 stop = start + (n - 1) * step
600 else:
601 stop = stop2
603 if start == stop:
604 n = 1
606 if self.spacing == 'lin':
607 vals = num.linspace(start, stop, n)
609 elif self.spacing in ('log', 'symlog'):
610 if start > 0. and stop > 0.:
611 vals = num.exp(num.linspace(num.log(start),
612 num.log(stop), n))
613 elif start < 0. and stop < 0.:
614 vals = -num.exp(num.linspace(num.log(-start),
615 num.log(-stop), n))
616 else:
617 raise InvalidGridDef(
618 'Log ranges should not include or cross zero '
619 '(in range specification "%s").' % self)
621 if self.spacing == 'symlog':
622 nvals = - vals
623 vals = num.concatenate((nvals[::-1], vals))
625 if self.relative in ('add', 'mult') and base is None:
626 raise InvalidGridDef(
627 'Cannot use relative range specification in this context.')
629 vals = self.make_relative(base, vals)
631 return list(map(float, vals))
633 def make_relative(self, base, vals):
634 if self.relative == 'add':
635 vals += base
637 if self.relative == 'mult':
638 vals *= base
640 return vals
643class GridDefElement(Object):
645 param = meta.StringID.T()
646 rs = Range.T()
648 def __init__(self, shorthand=None, **kwargs):
649 if shorthand is not None:
650 t = shorthand.split('=')
651 if len(t) != 2:
652 raise InvalidGridDef(
653 'Invalid grid specification element: %s' % shorthand)
655 sp, sr = t[0].strip(), t[1].strip()
657 kwargs['param'] = sp
658 kwargs['rs'] = Range(sr)
660 Object.__init__(self, **kwargs)
662 def shorthand(self):
663 return self.param + ' = ' + str(self.rs)
666class GridDef(Object):
668 elements = List.T(GridDefElement.T())
670 def __init__(self, shorthand=None, **kwargs):
671 if shorthand is not None:
672 t = shorthand.splitlines()
673 tt = []
674 for x in t:
675 x = x.strip()
676 if x:
677 tt.extend(x.split(';'))
679 elements = []
680 for se in tt:
681 elements.append(GridDef(se))
683 kwargs['elements'] = elements
685 Object.__init__(self, **kwargs)
687 def shorthand(self):
688 return '; '.join(str(x) for x in self.elements)
691class Cloneable(object):
693 def __iter__(self):
694 return iter(self.T.propnames)
696 def __getitem__(self, k):
697 if k not in self.keys():
698 raise KeyError(k)
700 return getattr(self, k)
702 def __setitem__(self, k, v):
703 if k not in self.keys():
704 raise KeyError(k)
706 return setattr(self, k, v)
708 def clone(self, **kwargs):
709 '''
710 Make a copy of the object.
712 A new object of the same class is created and initialized with the
713 parameters of the object on which this method is called on. If
714 ``kwargs`` are given, these are used to override any of the
715 initialization parameters.
716 '''
718 d = dict(self)
719 for k in d:
720 v = d[k]
721 if isinstance(v, Cloneable):
722 d[k] = v.clone()
724 d.update(kwargs)
725 return self.__class__(**d)
727 @classmethod
728 def keys(cls):
729 '''
730 Get list of the source model's parameter names.
731 '''
733 return cls.T.propnames
736class STF(Object, Cloneable):
738 '''
739 Base class for source time functions.
740 '''
742 def __init__(self, effective_duration=None, **kwargs):
743 if effective_duration is not None:
744 kwargs['duration'] = effective_duration / \
745 self.factor_duration_to_effective()
747 Object.__init__(self, **kwargs)
749 @classmethod
750 def factor_duration_to_effective(cls):
751 return 1.0
753 def centroid_time(self, tref):
754 return tref
756 @property
757 def effective_duration(self):
758 return self.duration * self.factor_duration_to_effective()
760 def discretize_t(self, deltat, tref):
761 tl = math.floor(tref / deltat) * deltat
762 th = math.ceil(tref / deltat) * deltat
763 if tl == th:
764 return num.array([tl], dtype=float), num.ones(1)
765 else:
766 return (
767 num.array([tl, th], dtype=float),
768 num.array([th - tref, tref - tl], dtype=float) / deltat)
770 def base_key(self):
771 return (type(self).__name__,)
774g_unit_pulse = STF()
777def sshift(times, amplitudes, tshift, deltat):
779 t0 = math.floor(tshift / deltat) * deltat
780 t1 = math.ceil(tshift / deltat) * deltat
781 if t0 == t1:
782 return times, amplitudes
784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float)
786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
789 times2 = num.arange(times.size + 1, dtype=float) * \
790 deltat + times[0] + t0
792 return times2, amplitudes2
795class BoxcarSTF(STF):
797 '''
798 Boxcar type source time function.
800 .. figure :: /static/stf-BoxcarSTF.svg
801 :width: 40%
802 :align: center
803 :alt: boxcar source time function
804 '''
806 duration = Float.T(
807 default=0.0,
808 help='duration of the boxcar')
810 anchor = Float.T(
811 default=0.0,
812 help='anchor point with respect to source.time: ('
813 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
817 @classmethod
818 def factor_duration_to_effective(cls):
819 return 1.0
821 def centroid_time(self, tref):
822 return tref - 0.5 * self.duration * self.anchor
824 def discretize_t(self, deltat, tref):
825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
827 tmin = round(tmin_stf / deltat) * deltat
828 tmax = round(tmax_stf / deltat) * deltat
829 nt = int(round((tmax - tmin) / deltat)) + 1
830 times = num.linspace(tmin, tmax, nt)
831 amplitudes = num.ones_like(times)
832 if times.size > 1:
833 t_edges = num.linspace(
834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
835 t = tmin_stf + self.duration * num.array(
836 [0.0, 0.0, 1.0, 1.0], dtype=float)
837 f = num.array([0., 1., 1., 0.], dtype=float)
838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
839 amplitudes /= num.sum(amplitudes)
841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
843 return sshift(times, amplitudes, -tshift, deltat)
845 def base_key(self):
846 return (type(self).__name__, self.duration, self.anchor)
849class TriangularSTF(STF):
851 '''
852 Triangular type source time function.
854 .. figure :: /static/stf-TriangularSTF.svg
855 :width: 40%
856 :align: center
857 :alt: triangular source time function
858 '''
860 duration = Float.T(
861 default=0.0,
862 help='baseline of the triangle')
864 peak_ratio = Float.T(
865 default=0.5,
866 help='fraction of time compared to duration, '
867 'when the maximum amplitude is reached')
869 anchor = Float.T(
870 default=0.0,
871 help='anchor point with respect to source.time: ('
872 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
876 @classmethod
877 def factor_duration_to_effective(cls, peak_ratio=None):
878 if peak_ratio is None:
879 peak_ratio = cls.peak_ratio.default()
881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
883 def __init__(self, effective_duration=None, **kwargs):
884 if effective_duration is not None:
885 kwargs['duration'] = effective_duration / \
886 self.factor_duration_to_effective(
887 kwargs.get('peak_ratio', None))
889 STF.__init__(self, **kwargs)
891 @property
892 def centroid_ratio(self):
893 ra = self.peak_ratio
894 rb = 1.0 - ra
895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
897 def centroid_time(self, tref):
898 ca = self.centroid_ratio
899 cb = 1.0 - ca
900 if self.anchor <= 0.:
901 return tref - ca * self.duration * self.anchor
902 else:
903 return tref - cb * self.duration * self.anchor
905 @property
906 def effective_duration(self):
907 return self.duration * self.factor_duration_to_effective(
908 self.peak_ratio)
910 def tminmax_stf(self, tref):
911 ca = self.centroid_ratio
912 cb = 1.0 - ca
913 if self.anchor <= 0.:
914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
915 tmax_stf = tmin_stf + self.duration
916 else:
917 tmax_stf = tref + cb * self.duration * (1. - self.anchor)
918 tmin_stf = tmax_stf - self.duration
920 return tmin_stf, tmax_stf
922 def discretize_t(self, deltat, tref):
923 tmin_stf, tmax_stf = self.tminmax_stf(tref)
925 tmin = round(tmin_stf / deltat) * deltat
926 tmax = round(tmax_stf / deltat) * deltat
927 nt = int(round((tmax - tmin) / deltat)) + 1
928 if nt > 1:
929 t_edges = num.linspace(
930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
931 t = tmin_stf + self.duration * num.array(
932 [0.0, self.peak_ratio, 1.0], dtype=float)
933 f = num.array([0., 1., 0.], dtype=float)
934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
935 amplitudes /= num.sum(amplitudes)
936 else:
937 amplitudes = num.ones(1)
939 times = num.linspace(tmin, tmax, nt)
940 return times, amplitudes
942 def base_key(self):
943 return (
944 type(self).__name__, self.duration, self.peak_ratio, self.anchor)
947class HalfSinusoidSTF(STF):
949 '''
950 Half sinusoid type source time function.
952 .. figure :: /static/stf-HalfSinusoidSTF.svg
953 :width: 40%
954 :align: center
955 :alt: half-sinusouid source time function
956 '''
958 duration = Float.T(
959 default=0.0,
960 help='duration of the half-sinusoid (baseline)')
962 anchor = Float.T(
963 default=0.0,
964 help='anchor point with respect to source.time: ('
965 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
969 exponent = Int.T(
970 default=1,
971 help='set to 2 to use square of the half-period sinusoidal function.')
973 def __init__(self, effective_duration=None, **kwargs):
974 if effective_duration is not None:
975 kwargs['duration'] = effective_duration / \
976 self.factor_duration_to_effective(
977 kwargs.get('exponent', 1))
979 STF.__init__(self, **kwargs)
981 @classmethod
982 def factor_duration_to_effective(cls, exponent):
983 if exponent == 1:
984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
985 elif exponent == 2:
986 return math.sqrt(math.pi**2 - 6) / math.pi
987 else:
988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
990 @property
991 def effective_duration(self):
992 return self.duration * self.factor_duration_to_effective(self.exponent)
994 def centroid_time(self, tref):
995 return tref - 0.5 * self.duration * self.anchor
997 def discretize_t(self, deltat, tref):
998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1000 tmin = round(tmin_stf / deltat) * deltat
1001 tmax = round(tmax_stf / deltat) * deltat
1002 nt = int(round((tmax - tmin) / deltat)) + 1
1003 if nt > 1:
1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
1007 if self.exponent == 1:
1008 fint = -num.cos(
1009 (t_edges - tmin_stf) * (math.pi / self.duration))
1011 elif self.exponent == 2:
1012 fint = (t_edges - tmin_stf) / self.duration \
1013 - 1.0 / (2.0 * math.pi) * num.sin(
1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
1015 else:
1016 raise ValueError(
1017 'Exponent for HalfSinusoidSTF must be 1 or 2.')
1019 amplitudes = fint[1:] - fint[:-1]
1020 amplitudes /= num.sum(amplitudes)
1021 else:
1022 amplitudes = num.ones(1)
1024 times = num.linspace(tmin, tmax, nt)
1025 return times, amplitudes
1027 def base_key(self):
1028 return (type(self).__name__, self.duration, self.anchor)
1031class SmoothRampSTF(STF):
1032 '''
1033 Smooth-ramp type source time function for near-field displacement.
1034 Based on moment function of double-couple point source proposed by Bruestle
1035 and Mueller (PEPI, 1983).
1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
1038 earthquakes from Love-wave modelling for regional distances, PEPI 32,
1039 312-324.
1041 .. figure :: /static/stf-SmoothRampSTF.svg
1042 :width: 40%
1043 :alt: smooth ramp source time function
1044 '''
1045 duration = Float.T(
1046 default=0.0,
1047 help='duration of the ramp (baseline)')
1049 rise_ratio = Float.T(
1050 default=0.5,
1051 help='fraction of time compared to duration, '
1052 'when the maximum amplitude is reached')
1054 anchor = Float.T(
1055 default=0.0,
1056 help='anchor point with respect to source.time: ('
1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
1061 def discretize_t(self, deltat, tref):
1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1064 tmin = round(tmin_stf / deltat) * deltat
1065 tmax = round(tmax_stf / deltat) * deltat
1066 D = round((tmax - tmin) / deltat) * deltat
1067 nt = int(round(D / deltat)) + 1
1068 times = num.linspace(tmin, tmax, nt)
1069 if nt > 1:
1070 rise_time = self.rise_ratio * self.duration
1071 amplitudes = num.ones_like(times)
1072 tp = tmin + rise_time
1073 ii = num.where(times <= tp)
1074 t_inc = times[ii]
1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
1079 amplitudes /= num.sum(amplitudes)
1080 else:
1081 amplitudes = num.ones(1)
1083 return times, amplitudes
1085 def base_key(self):
1086 return (type(self).__name__,
1087 self.duration, self.rise_ratio, self.anchor)
1090class ResonatorSTF(STF):
1091 '''
1092 Simple resonator like source time function.
1094 .. math ::
1096 f(t) = 0 for t < 0
1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
1100 .. figure :: /static/stf-SmoothRampSTF.svg
1101 :width: 40%
1102 :alt: smooth ramp source time function
1104 '''
1106 duration = Float.T(
1107 default=0.0,
1108 help='decay time')
1110 frequency = Float.T(
1111 default=1.0,
1112 help='resonance frequency')
1114 def discretize_t(self, deltat, tref):
1115 tmin_stf = tref
1116 tmax_stf = tref + self.duration * 3
1117 tmin = math.floor(tmin_stf / deltat) * deltat
1118 tmax = math.ceil(tmax_stf / deltat) * deltat
1119 times = util.arange2(tmin, tmax, deltat)
1120 amplitudes = num.exp(-(times - tref) / self.duration) \
1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref))
1123 return times, amplitudes
1125 def base_key(self):
1126 return (type(self).__name__,
1127 self.duration, self.frequency)
1130class STFMode(StringChoice):
1131 choices = ['pre', 'post']
1134class Source(Location, Cloneable):
1135 '''
1136 Base class for all source models.
1137 '''
1139 name = String.T(optional=True, default='')
1141 time = Timestamp.T(
1142 default=Timestamp.D('1970-01-01 00:00:00'),
1143 help='source origin time.')
1145 stf = STF.T(
1146 optional=True,
1147 help='source time function.')
1149 stf_mode = STFMode.T(
1150 default='post',
1151 help='whether to apply source time function in pre or '
1152 'post-processing.')
1154 def __init__(self, **kwargs):
1155 Location.__init__(self, **kwargs)
1157 def update(self, **kwargs):
1158 '''
1159 Change some of the source models parameters.
1161 Example::
1163 >>> from pyrocko import gf
1164 >>> s = gf.DCSource()
1165 >>> s.update(strike=66., dip=33.)
1166 >>> print(s)
1167 --- !pf.DCSource
1168 depth: 0.0
1169 time: 1970-01-01 00:00:00
1170 magnitude: 6.0
1171 strike: 66.0
1172 dip: 33.0
1173 rake: 0.0
1175 '''
1177 for (k, v) in kwargs.items():
1178 self[k] = v
1180 def grid(self, **variables):
1181 '''
1182 Create grid of source model variations.
1184 :returns: :py:class:`SourceGrid` instance.
1186 Example::
1188 >>> from pyrocko import gf
1189 >>> base = DCSource()
1190 >>> R = gf.Range
1191 >>> for s in base.grid(R('
1193 '''
1194 return SourceGrid(base=self, variables=variables)
1196 def base_key(self):
1197 '''
1198 Get key to decide about source discretization / GF stack sharing.
1200 When two source models differ only in amplitude and origin time, the
1201 discretization and the GF stacking can be done only once for a unit
1202 amplitude and a zero origin time and the amplitude and origin times of
1203 the seismograms can be applied during post-processing of the synthetic
1204 seismogram.
1206 For any derived parameterized source model, this method is called to
1207 decide if discretization and stacking of the source should be shared.
1208 When two source models return an equal vector of values discretization
1209 is shared.
1210 '''
1211 return (self.depth, self.lat, self.north_shift,
1212 self.lon, self.east_shift, self.time, type(self).__name__) + \
1213 self.effective_stf_pre().base_key()
1215 def get_factor(self):
1216 '''
1217 Get the scaling factor to be applied during post-processing.
1219 Discretization of the base seismogram is usually done for a unit
1220 amplitude, because a common factor can be efficiently multiplied to
1221 final seismograms. This eliminates to do repeat the stacking when
1222 creating seismograms for a series of source models only differing in
1223 amplitude.
1225 This method should return the scaling factor to apply in the
1226 post-processing (often this is simply the scalar moment of the source).
1227 '''
1229 return 1.0
1231 def effective_stf_pre(self):
1232 '''
1233 Return the STF applied before stacking of the Green's functions.
1235 This STF is used during discretization of the parameterized source
1236 models, i.e. to produce a temporal distribution of point sources.
1238 Handling of the STF before stacking of the GFs is less efficient but
1239 allows to use different source time functions for different parts of
1240 the source.
1241 '''
1243 if self.stf is not None and self.stf_mode == 'pre':
1244 return self.stf
1245 else:
1246 return g_unit_pulse
1248 def effective_stf_post(self):
1249 '''
1250 Return the STF applied after stacking of the Green's fuctions.
1252 This STF is used in the post-processing of the synthetic seismograms.
1254 Handling of the STF after stacking of the GFs is usually more efficient
1255 but is only possible when a common STF is used for all subsources.
1256 '''
1258 if self.stf is not None and self.stf_mode == 'post':
1259 return self.stf
1260 else:
1261 return g_unit_pulse
1263 def _dparams_base(self):
1264 return dict(times=arr(self.time),
1265 lat=self.lat, lon=self.lon,
1266 north_shifts=arr(self.north_shift),
1267 east_shifts=arr(self.east_shift),
1268 depths=arr(self.depth))
1270 def _hash(self):
1271 sha = sha1()
1272 for k in self.base_key():
1273 sha.update(str(k).encode())
1274 return sha.hexdigest()
1276 def _dparams_base_repeated(self, times):
1277 if times is None:
1278 return self._dparams_base()
1280 nt = times.size
1281 north_shifts = num.repeat(self.north_shift, nt)
1282 east_shifts = num.repeat(self.east_shift, nt)
1283 depths = num.repeat(self.depth, nt)
1284 return dict(times=times,
1285 lat=self.lat, lon=self.lon,
1286 north_shifts=north_shifts,
1287 east_shifts=east_shifts,
1288 depths=depths)
1290 def pyrocko_event(self, store=None, target=None, **kwargs):
1291 duration = None
1292 if self.stf:
1293 duration = self.stf.effective_duration
1295 return model.Event(
1296 lat=self.lat,
1297 lon=self.lon,
1298 north_shift=self.north_shift,
1299 east_shift=self.east_shift,
1300 time=self.time,
1301 name=self.name,
1302 depth=self.depth,
1303 duration=duration,
1304 **kwargs)
1306 def outline(self, cs='xyz'):
1307 points = num.atleast_2d(num.zeros([1, 3]))
1309 points[:, 0] += self.north_shift
1310 points[:, 1] += self.east_shift
1311 points[:, 2] += self.depth
1312 if cs == 'xyz':
1313 return points
1314 elif cs == 'xy':
1315 return points[:, :2]
1316 elif cs in ('latlon', 'lonlat'):
1317 latlon = ne_to_latlon(
1318 self.lat, self.lon, points[:, 0], points[:, 1])
1320 latlon = num.array(latlon).T
1321 if cs == 'latlon':
1322 return latlon
1323 else:
1324 return latlon[:, ::-1]
1326 @classmethod
1327 def from_pyrocko_event(cls, ev, **kwargs):
1328 if ev.depth is None:
1329 raise ConversionError(
1330 'Cannot convert event object to source object: '
1331 'no depth information available')
1333 stf = None
1334 if ev.duration is not None:
1335 stf = HalfSinusoidSTF(effective_duration=ev.duration)
1337 d = dict(
1338 name=ev.name,
1339 time=ev.time,
1340 lat=ev.lat,
1341 lon=ev.lon,
1342 north_shift=ev.north_shift,
1343 east_shift=ev.east_shift,
1344 depth=ev.depth,
1345 stf=stf)
1346 d.update(kwargs)
1347 return cls(**d)
1349 def get_magnitude(self):
1350 raise NotImplementedError(
1351 '%s does not implement get_magnitude()'
1352 % self.__class__.__name__)
1355class SourceWithMagnitude(Source):
1356 '''
1357 Base class for sources containing a moment magnitude.
1358 '''
1360 magnitude = Float.T(
1361 default=6.0,
1362 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1364 def __init__(self, **kwargs):
1365 if 'moment' in kwargs:
1366 mom = kwargs.pop('moment')
1367 if 'magnitude' not in kwargs:
1368 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1370 Source.__init__(self, **kwargs)
1372 @property
1373 def moment(self):
1374 return float(pmt.magnitude_to_moment(self.magnitude))
1376 @moment.setter
1377 def moment(self, value):
1378 self.magnitude = float(pmt.moment_to_magnitude(value))
1380 def pyrocko_event(self, store=None, target=None, **kwargs):
1381 return Source.pyrocko_event(
1382 self, store, target,
1383 magnitude=self.magnitude,
1384 **kwargs)
1386 @classmethod
1387 def from_pyrocko_event(cls, ev, **kwargs):
1388 d = {}
1389 if ev.magnitude:
1390 d.update(magnitude=ev.magnitude)
1392 d.update(kwargs)
1393 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
1395 def get_magnitude(self):
1396 return self.magnitude
1399class DerivedMagnitudeError(ValidationError):
1400 pass
1403class SourceWithDerivedMagnitude(Source):
1405 class __T(Source.T):
1407 def validate_extra(self, val):
1408 Source.T.validate_extra(self, val)
1409 val.check_conflicts()
1411 def check_conflicts(self):
1412 '''
1413 Check for parameter conflicts.
1415 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
1416 on conflicts.
1417 '''
1418 pass
1420 def get_magnitude(self, store=None, target=None):
1421 raise DerivedMagnitudeError('No magnitude set.')
1423 def get_moment(self, store=None, target=None):
1424 return float(pmt.magnitude_to_moment(
1425 self.get_magnitude(store, target)))
1427 def pyrocko_moment_tensor(self, store=None, target=None):
1428 raise NotImplementedError(
1429 '%s does not implement pyrocko_moment_tensor()'
1430 % self.__class__.__name__)
1432 def pyrocko_event(self, store=None, target=None, **kwargs):
1433 try:
1434 mt = self.pyrocko_moment_tensor(store, target)
1435 magnitude = self.get_magnitude()
1436 except (DerivedMagnitudeError, NotImplementedError):
1437 mt = None
1438 magnitude = None
1440 return Source.pyrocko_event(
1441 self, store, target,
1442 moment_tensor=mt,
1443 magnitude=magnitude,
1444 **kwargs)
1447class ExplosionSource(SourceWithDerivedMagnitude):
1448 '''
1449 An isotropic explosion point source.
1450 '''
1452 magnitude = Float.T(
1453 optional=True,
1454 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1456 volume_change = Float.T(
1457 optional=True,
1458 help='volume change of the explosion/implosion or '
1459 'the contracting/extending magmatic source. [m^3]')
1461 discretized_source_class = meta.DiscretizedExplosionSource
1463 def __init__(self, **kwargs):
1464 if 'moment' in kwargs:
1465 mom = kwargs.pop('moment')
1466 if 'magnitude' not in kwargs:
1467 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1469 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1471 def base_key(self):
1472 return SourceWithDerivedMagnitude.base_key(self) + \
1473 (self.volume_change,)
1475 def check_conflicts(self):
1476 if self.magnitude is not None and self.volume_change is not None:
1477 raise DerivedMagnitudeError(
1478 'Magnitude and volume_change are both defined.')
1480 def get_magnitude(self, store=None, target=None):
1481 self.check_conflicts()
1483 if self.magnitude is not None:
1484 return self.magnitude
1486 elif self.volume_change is not None:
1487 moment = self.volume_change * \
1488 self.get_moment_to_volume_change_ratio(store, target)
1490 return float(pmt.moment_to_magnitude(abs(moment)))
1491 else:
1492 return float(pmt.moment_to_magnitude(1.0))
1494 def get_volume_change(self, store=None, target=None):
1495 self.check_conflicts()
1497 if self.volume_change is not None:
1498 return self.volume_change
1500 elif self.magnitude is not None:
1501 moment = float(pmt.magnitude_to_moment(self.magnitude))
1502 return moment / self.get_moment_to_volume_change_ratio(
1503 store, target)
1505 else:
1506 return 1.0 / self.get_moment_to_volume_change_ratio(store)
1508 def get_moment_to_volume_change_ratio(self, store, target=None):
1509 if store is None:
1510 raise DerivedMagnitudeError(
1511 'Need earth model to convert between volume change and '
1512 'magnitude.')
1514 points = num.array(
1515 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1517 interpolation = target.interpolation if target else 'multilinear'
1518 try:
1519 shear_moduli = store.config.get_shear_moduli(
1520 self.lat, self.lon,
1521 points=points,
1522 interpolation=interpolation)[0]
1524 bulk_moduli = store.config.get_bulk_moduli(
1525 self.lat, self.lon,
1526 points=points,
1527 interpolation=interpolation)[0]
1528 except meta.OutOfBounds:
1529 raise DerivedMagnitudeError(
1530 'Could not get shear modulus at source position.')
1532 return float(2. * shear_moduli + bulk_moduli)
1534 def get_factor(self):
1535 return 1.0
1537 def discretize_basesource(self, store, target=None):
1538 times, amplitudes = self.effective_stf_pre().discretize_t(
1539 store.config.deltat, self.time)
1541 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
1543 if self.volume_change is not None:
1544 if self.volume_change < 0.:
1545 amplitudes *= -1
1547 return meta.DiscretizedExplosionSource(
1548 m0s=amplitudes,
1549 **self._dparams_base_repeated(times))
1551 def pyrocko_moment_tensor(self, store=None, target=None):
1552 a = self.get_moment(store, target) * math.sqrt(2. / 3.)
1553 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
1556class RectangularExplosionSource(ExplosionSource):
1557 '''
1558 Rectangular or line explosion source.
1559 '''
1561 discretized_source_class = meta.DiscretizedExplosionSource
1563 strike = Float.T(
1564 default=0.0,
1565 help='strike direction in [deg], measured clockwise from north')
1567 dip = Float.T(
1568 default=90.0,
1569 help='dip angle in [deg], measured downward from horizontal')
1571 length = Float.T(
1572 default=0.,
1573 help='length of rectangular source area [m]')
1575 width = Float.T(
1576 default=0.,
1577 help='width of rectangular source area [m]')
1579 anchor = StringChoice.T(
1580 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1581 'bottom_left', 'bottom_right'],
1582 default='center',
1583 optional=True,
1584 help='Anchor point for positioning the plane, can be: top, center or'
1585 'bottom and also top_left, top_right,bottom_left,'
1586 'bottom_right, center_left and center right')
1588 nucleation_x = Float.T(
1589 optional=True,
1590 help='horizontal position of rupture nucleation in normalized fault '
1591 'plane coordinates (-1 = left edge, +1 = right edge)')
1593 nucleation_y = Float.T(
1594 optional=True,
1595 help='down-dip position of rupture nucleation in normalized fault '
1596 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1598 velocity = Float.T(
1599 default=3500.,
1600 help='speed of explosion front [m/s]')
1602 aggressive_oversampling = Bool.T(
1603 default=False,
1604 help='Aggressive oversampling for basesource discretization. '
1605 'When using \'multilinear\' interpolation oversampling has'
1606 ' practically no effect.')
1608 def base_key(self):
1609 return Source.base_key(self) + (self.strike, self.dip, self.length,
1610 self.width, self.nucleation_x,
1611 self.nucleation_y, self.velocity,
1612 self.anchor)
1614 def discretize_basesource(self, store, target=None):
1616 if self.nucleation_x is not None:
1617 nucx = self.nucleation_x * 0.5 * self.length
1618 else:
1619 nucx = None
1621 if self.nucleation_y is not None:
1622 nucy = self.nucleation_y * 0.5 * self.width
1623 else:
1624 nucy = None
1626 stf = self.effective_stf_pre()
1628 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
1629 store.config.deltas, store.config.deltat,
1630 self.time, self.north_shift, self.east_shift, self.depth,
1631 self.strike, self.dip, self.length, self.width, self.anchor,
1632 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
1634 amplitudes /= num.sum(amplitudes)
1635 amplitudes *= self.get_moment(store, target)
1637 return meta.DiscretizedExplosionSource(
1638 lat=self.lat,
1639 lon=self.lon,
1640 times=times,
1641 north_shifts=points[:, 0],
1642 east_shifts=points[:, 1],
1643 depths=points[:, 2],
1644 m0s=amplitudes)
1646 def outline(self, cs='xyz'):
1647 points = outline_rect_source(self.strike, self.dip, self.length,
1648 self.width, self.anchor)
1650 points[:, 0] += self.north_shift
1651 points[:, 1] += self.east_shift
1652 points[:, 2] += self.depth
1653 if cs == 'xyz':
1654 return points
1655 elif cs == 'xy':
1656 return points[:, :2]
1657 elif cs in ('latlon', 'lonlat'):
1658 latlon = ne_to_latlon(
1659 self.lat, self.lon, points[:, 0], points[:, 1])
1661 latlon = num.array(latlon).T
1662 if cs == 'latlon':
1663 return latlon
1664 else:
1665 return latlon[:, ::-1]
1667 def get_nucleation_abs_coord(self, cs='xy'):
1669 if self.nucleation_x is None:
1670 return None, None
1672 coords = from_plane_coords(self.strike, self.dip, self.length,
1673 self.width, self.depth, self.nucleation_x,
1674 self.nucleation_y, lat=self.lat,
1675 lon=self.lon, north_shift=self.north_shift,
1676 east_shift=self.east_shift, cs=cs)
1677 return coords
1680class DCSource(SourceWithMagnitude):
1681 '''
1682 A double-couple point source.
1683 '''
1685 strike = Float.T(
1686 default=0.0,
1687 help='strike direction in [deg], measured clockwise from north')
1689 dip = Float.T(
1690 default=90.0,
1691 help='dip angle in [deg], measured downward from horizontal')
1693 rake = Float.T(
1694 default=0.0,
1695 help='rake angle in [deg], '
1696 'measured counter-clockwise from right-horizontal '
1697 'in on-plane view')
1699 discretized_source_class = meta.DiscretizedMTSource
1701 def base_key(self):
1702 return Source.base_key(self) + (self.strike, self.dip, self.rake)
1704 def get_factor(self):
1705 return float(pmt.magnitude_to_moment(self.magnitude))
1707 def discretize_basesource(self, store, target=None):
1708 mot = pmt.MomentTensor(
1709 strike=self.strike, dip=self.dip, rake=self.rake)
1711 times, amplitudes = self.effective_stf_pre().discretize_t(
1712 store.config.deltat, self.time)
1713 return meta.DiscretizedMTSource(
1714 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
1715 **self._dparams_base_repeated(times))
1717 def pyrocko_moment_tensor(self, store=None, target=None):
1718 return pmt.MomentTensor(
1719 strike=self.strike,
1720 dip=self.dip,
1721 rake=self.rake,
1722 scalar_moment=self.moment)
1724 def pyrocko_event(self, store=None, target=None, **kwargs):
1725 return SourceWithMagnitude.pyrocko_event(
1726 self, store, target,
1727 moment_tensor=self.pyrocko_moment_tensor(store, target),
1728 **kwargs)
1730 @classmethod
1731 def from_pyrocko_event(cls, ev, **kwargs):
1732 d = {}
1733 mt = ev.moment_tensor
1734 if mt:
1735 (strike, dip, rake), _ = mt.both_strike_dip_rake()
1736 d.update(
1737 strike=float(strike),
1738 dip=float(dip),
1739 rake=float(rake),
1740 magnitude=float(mt.moment_magnitude()))
1742 d.update(kwargs)
1743 return super(DCSource, cls).from_pyrocko_event(ev, **d)
1746class CLVDSource(SourceWithMagnitude):
1747 '''
1748 A pure CLVD point source.
1749 '''
1751 discretized_source_class = meta.DiscretizedMTSource
1753 azimuth = Float.T(
1754 default=0.0,
1755 help='azimuth direction of largest dipole, clockwise from north [deg]')
1757 dip = Float.T(
1758 default=90.,
1759 help='dip direction of largest dipole, downward from horizontal [deg]')
1761 def base_key(self):
1762 return Source.base_key(self) + (self.azimuth, self.dip)
1764 def get_factor(self):
1765 return float(pmt.magnitude_to_moment(self.magnitude))
1767 @property
1768 def m6(self):
1769 a = math.sqrt(4. / 3.) * self.get_factor()
1770 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1771 rotmat1 = pmt.euler_to_matrix(
1772 d2r * (self.dip - 90.),
1773 d2r * (self.azimuth - 90.),
1774 0.)
1775 m = num.dot(rotmat1.T, num.dot(m, rotmat1))
1776 return pmt.to6(m)
1778 @property
1779 def m6_astuple(self):
1780 return tuple(self.m6.tolist())
1782 def discretize_basesource(self, store, target=None):
1783 factor = self.get_factor()
1784 times, amplitudes = self.effective_stf_pre().discretize_t(
1785 store.config.deltat, self.time)
1786 return meta.DiscretizedMTSource(
1787 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
1788 **self._dparams_base_repeated(times))
1790 def pyrocko_moment_tensor(self, store=None, target=None):
1791 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1793 def pyrocko_event(self, store=None, target=None, **kwargs):
1794 mt = self.pyrocko_moment_tensor(store, target)
1795 return Source.pyrocko_event(
1796 self, store, target,
1797 moment_tensor=self.pyrocko_moment_tensor(store, target),
1798 magnitude=float(mt.moment_magnitude()),
1799 **kwargs)
1802class VLVDSource(SourceWithDerivedMagnitude):
1803 '''
1804 Volumetric linear vector dipole source.
1806 This source is a parameterization for a restricted moment tensor point
1807 source, useful to represent dyke or sill like inflation or deflation
1808 sources. The restriction is such that the moment tensor is rotational
1809 symmetric. It can be represented by a superposition of a linear vector
1810 dipole (here we use a CLVD for convenience) and an isotropic component. The
1811 restricted moment tensor has 4 degrees of freedom: 2 independent
1812 eigenvalues and 2 rotation angles orienting the the symmetry axis.
1814 In this parameterization, the isotropic component is controlled by
1815 ``volume_change``. To define the moment tensor, it must be converted to the
1816 scalar moment of the the MT's isotropic component. For the conversion, the
1817 shear modulus at the source's position must be known. This value is
1818 extracted from the earth model defined in the GF store in use.
1820 The CLVD part by controlled by its scalar moment :math:`M_0`:
1821 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
1822 positiv or negativ CLVD (the sign of the largest eigenvalue).
1823 '''
1825 discretized_source_class = meta.DiscretizedMTSource
1827 azimuth = Float.T(
1828 default=0.0,
1829 help='azimuth direction of symmetry axis, clockwise from north [deg].')
1831 dip = Float.T(
1832 default=90.,
1833 help='dip direction of symmetry axis, downward from horizontal [deg].')
1835 volume_change = Float.T(
1836 default=0.,
1837 help='volume change of the inflation/deflation [m^3].')
1839 clvd_moment = Float.T(
1840 default=0.,
1841 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
1842 'controls the sign of the CLVD (the sign of its largest '
1843 'eigenvalue).')
1845 def get_moment_to_volume_change_ratio(self, store, target):
1846 if store is None or target is None:
1847 raise DerivedMagnitudeError(
1848 'Need earth model to convert between volume change and '
1849 'magnitude.')
1851 points = num.array(
1852 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1854 try:
1855 shear_moduli = store.config.get_shear_moduli(
1856 self.lat, self.lon,
1857 points=points,
1858 interpolation=target.interpolation)[0]
1860 bulk_moduli = store.config.get_bulk_moduli(
1861 self.lat, self.lon,
1862 points=points,
1863 interpolation=target.interpolation)[0]
1864 except meta.OutOfBounds:
1865 raise DerivedMagnitudeError(
1866 'Could not get shear modulus at source position.')
1868 return float(2. * shear_moduli + bulk_moduli)
1870 def base_key(self):
1871 return Source.base_key(self) + \
1872 (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
1874 def get_magnitude(self, store=None, target=None):
1875 mt = self.pyrocko_moment_tensor(store, target)
1876 return float(pmt.moment_to_magnitude(mt.moment))
1878 def get_m6(self, store, target):
1879 a = math.sqrt(4. / 3.) * self.clvd_moment
1880 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1882 rotmat1 = pmt.euler_to_matrix(
1883 d2r * (self.dip - 90.),
1884 d2r * (self.azimuth - 90.),
1885 0.)
1886 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1))
1888 m_iso = self.volume_change * \
1889 self.get_moment_to_volume_change_ratio(store, target)
1891 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0.,
1892 0., 0.,) * math.sqrt(2. / 3)
1894 m = pmt.to6(m_clvd) + pmt.to6(m_iso)
1895 return m
1897 def get_moment(self, store=None, target=None):
1898 return float(pmt.magnitude_to_moment(
1899 self.get_magnitude(store, target)))
1901 def get_m6_astuple(self, store, target):
1902 m6 = self.get_m6(store, target)
1903 return tuple(m6.tolist())
1905 def discretize_basesource(self, store, target=None):
1906 times, amplitudes = self.effective_stf_pre().discretize_t(
1907 store.config.deltat, self.time)
1909 m6 = self.get_m6(store, target)
1910 m6 *= amplitudes / self.get_factor()
1912 return meta.DiscretizedMTSource(
1913 m6s=m6[num.newaxis, :],
1914 **self._dparams_base_repeated(times))
1916 def pyrocko_moment_tensor(self, store=None, target=None):
1917 m6_astuple = self.get_m6_astuple(store, target)
1918 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
1921class MTSource(Source):
1922 '''
1923 A moment tensor point source.
1924 '''
1926 discretized_source_class = meta.DiscretizedMTSource
1928 mnn = Float.T(
1929 default=1.,
1930 help='north-north component of moment tensor in [Nm]')
1932 mee = Float.T(
1933 default=1.,
1934 help='east-east component of moment tensor in [Nm]')
1936 mdd = Float.T(
1937 default=1.,
1938 help='down-down component of moment tensor in [Nm]')
1940 mne = Float.T(
1941 default=0.,
1942 help='north-east component of moment tensor in [Nm]')
1944 mnd = Float.T(
1945 default=0.,
1946 help='north-down component of moment tensor in [Nm]')
1948 med = Float.T(
1949 default=0.,
1950 help='east-down component of moment tensor in [Nm]')
1952 def __init__(self, **kwargs):
1953 if 'm6' in kwargs:
1954 for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
1955 kwargs.pop('m6')):
1956 kwargs[k] = float(v)
1958 Source.__init__(self, **kwargs)
1960 @property
1961 def m6(self):
1962 return num.array(self.m6_astuple)
1964 @property
1965 def m6_astuple(self):
1966 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
1968 @m6.setter
1969 def m6(self, value):
1970 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
1972 def base_key(self):
1973 return Source.base_key(self) + self.m6_astuple
1975 def discretize_basesource(self, store, target=None):
1976 times, amplitudes = self.effective_stf_pre().discretize_t(
1977 store.config.deltat, self.time)
1978 return meta.DiscretizedMTSource(
1979 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
1980 **self._dparams_base_repeated(times))
1982 def get_magnitude(self, store=None, target=None):
1983 m6 = self.m6
1984 return pmt.moment_to_magnitude(
1985 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
1986 math.sqrt(2.))
1988 def pyrocko_moment_tensor(self, store=None, target=None):
1989 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1991 def pyrocko_event(self, store=None, target=None, **kwargs):
1992 mt = self.pyrocko_moment_tensor(store, target)
1993 return Source.pyrocko_event(
1994 self, store, target,
1995 moment_tensor=self.pyrocko_moment_tensor(store, target),
1996 magnitude=float(mt.moment_magnitude()),
1997 **kwargs)
1999 @classmethod
2000 def from_pyrocko_event(cls, ev, **kwargs):
2001 d = {}
2002 mt = ev.moment_tensor
2003 if mt:
2004 d.update(m6=tuple(map(float, mt.m6())))
2005 else:
2006 if ev.magnitude is not None:
2007 mom = pmt.magnitude_to_moment(ev.magnitude)
2008 v = math.sqrt(2. / 3.) * mom
2009 d.update(m6=(v, v, v, 0., 0., 0.))
2011 d.update(kwargs)
2012 return super(MTSource, cls).from_pyrocko_event(ev, **d)
2015map_anchor = {
2016 'center': (0.0, 0.0),
2017 'center_left': (-1.0, 0.0),
2018 'center_right': (1.0, 0.0),
2019 'top': (0.0, -1.0),
2020 'top_left': (-1.0, -1.0),
2021 'top_right': (1.0, -1.0),
2022 'bottom': (0.0, 1.0),
2023 'bottom_left': (-1.0, 1.0),
2024 'bottom_right': (1.0, 1.0)}
2027class RectangularSource(SourceWithDerivedMagnitude):
2028 '''
2029 Classical Haskell source model modified for bilateral rupture.
2030 '''
2032 discretized_source_class = meta.DiscretizedMTSource
2034 magnitude = Float.T(
2035 optional=True,
2036 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
2038 strike = Float.T(
2039 default=0.0,
2040 help='strike direction in [deg], measured clockwise from north')
2042 dip = Float.T(
2043 default=90.0,
2044 help='dip angle in [deg], measured downward from horizontal')
2046 rake = Float.T(
2047 default=0.0,
2048 help='rake angle in [deg], '
2049 'measured counter-clockwise from right-horizontal '
2050 'in on-plane view')
2052 length = Float.T(
2053 default=0.,
2054 help='length of rectangular source area [m]')
2056 width = Float.T(
2057 default=0.,
2058 help='width of rectangular source area [m]')
2060 anchor = StringChoice.T(
2061 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2062 'bottom_left', 'bottom_right'],
2063 default='center',
2064 optional=True,
2065 help='Anchor point for positioning the plane, can be: ``top, center '
2066 'bottom, top_left, top_right,bottom_left,'
2067 'bottom_right, center_left, center right``.')
2069 nucleation_x = Float.T(
2070 optional=True,
2071 help='horizontal position of rupture nucleation in normalized fault '
2072 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)')
2074 nucleation_y = Float.T(
2075 optional=True,
2076 help='down-dip position of rupture nucleation in normalized fault '
2077 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)')
2079 velocity = Float.T(
2080 default=3500.,
2081 help='speed of rupture front [m/s]')
2083 slip = Float.T(
2084 optional=True,
2085 help='Slip on the rectangular source area [m]')
2087 opening_fraction = Float.T(
2088 default=0.,
2089 help='Determines fraction of slip related to opening. '
2090 '(``-1``: pure tensile closing, '
2091 '``0``: pure shear, '
2092 '``1``: pure tensile opening)')
2094 decimation_factor = Int.T(
2095 optional=True,
2096 default=1,
2097 help='Sub-source decimation factor, a larger decimation will'
2098 ' make the result inaccurate but shorten the necessary'
2099 ' computation time (use for testing puposes only).')
2101 aggressive_oversampling = Bool.T(
2102 default=False,
2103 help='Aggressive oversampling for basesource discretization. '
2104 'When using \'multilinear\' interpolation oversampling has'
2105 ' practically no effect.')
2107 def __init__(self, **kwargs):
2108 if 'moment' in kwargs:
2109 mom = kwargs.pop('moment')
2110 if 'magnitude' not in kwargs:
2111 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
2113 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2115 def base_key(self):
2116 return SourceWithDerivedMagnitude.base_key(self) + (
2117 self.magnitude,
2118 self.slip,
2119 self.strike,
2120 self.dip,
2121 self.rake,
2122 self.length,
2123 self.width,
2124 self.nucleation_x,
2125 self.nucleation_y,
2126 self.velocity,
2127 self.decimation_factor,
2128 self.anchor)
2130 def check_conflicts(self):
2131 if self.magnitude is not None and self.slip is not None:
2132 raise DerivedMagnitudeError(
2133 'Magnitude and slip are both defined.')
2135 def get_magnitude(self, store=None, target=None):
2136 self.check_conflicts()
2137 if self.magnitude is not None:
2138 return self.magnitude
2140 elif self.slip is not None:
2141 if None in (store, target):
2142 raise DerivedMagnitudeError(
2143 'Magnitude for a rectangular source with slip defined '
2144 'can only be derived when earth model and target '
2145 'interpolation method are available.')
2147 amplitudes = self._discretize(store, target)[2]
2148 if amplitudes.ndim == 2:
2149 # CLVD component has no net moment, leave out
2150 return float(pmt.moment_to_magnitude(
2151 num.sum(num.abs(amplitudes[0:2, :]).sum())))
2152 else:
2153 return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
2155 else:
2156 return float(pmt.moment_to_magnitude(1.0))
2158 def get_factor(self):
2159 return 1.0
2161 def get_slip_tensile(self):
2162 return self.slip * self.opening_fraction
2164 def get_slip_shear(self):
2165 return self.slip - abs(self.get_slip_tensile)
2167 def _discretize(self, store, target):
2168 if self.nucleation_x is not None:
2169 nucx = self.nucleation_x * 0.5 * self.length
2170 else:
2171 nucx = None
2173 if self.nucleation_y is not None:
2174 nucy = self.nucleation_y * 0.5 * self.width
2175 else:
2176 nucy = None
2178 stf = self.effective_stf_pre()
2180 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
2181 store.config.deltas, store.config.deltat,
2182 self.time, self.north_shift, self.east_shift, self.depth,
2183 self.strike, self.dip, self.length, self.width, self.anchor,
2184 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
2185 decimation_factor=self.decimation_factor,
2186 aggressive_oversampling=self.aggressive_oversampling)
2188 if self.slip is not None:
2189 if target is not None:
2190 interpolation = target.interpolation
2191 else:
2192 interpolation = 'nearest_neighbor'
2193 logger.warning(
2194 'no target information available, will use '
2195 '"nearest_neighbor" interpolation when extracting shear '
2196 'modulus from earth model')
2198 shear_moduli = store.config.get_shear_moduli(
2199 self.lat, self.lon,
2200 points=points,
2201 interpolation=interpolation)
2203 tensile_slip = self.get_slip_tensile()
2204 shear_slip = self.slip - abs(tensile_slip)
2206 amplitudes_total = [shear_moduli * shear_slip]
2207 if tensile_slip != 0:
2208 bulk_moduli = store.config.get_bulk_moduli(
2209 self.lat, self.lon,
2210 points=points,
2211 interpolation=interpolation)
2213 tensile_iso = bulk_moduli * tensile_slip
2214 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
2216 amplitudes_total.extend([tensile_iso, tensile_clvd])
2218 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
2219 amplitudes * dl * dw
2221 else:
2222 # normalization to retain total moment
2223 amplitudes_norm = amplitudes / num.sum(amplitudes)
2224 moment = self.get_moment(store, target)
2226 amplitudes_total = [
2227 amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
2228 if self.opening_fraction != 0.:
2229 amplitudes_total.append(
2230 amplitudes_norm * self.opening_fraction * moment)
2232 amplitudes_total = num.vstack(amplitudes_total).squeeze()
2234 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw
2236 def discretize_basesource(self, store, target=None):
2238 points, times, amplitudes, dl, dw, nl, nw = self._discretize(
2239 store, target)
2241 mot = pmt.MomentTensor(
2242 strike=self.strike, dip=self.dip, rake=self.rake)
2243 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
2245 if amplitudes.ndim == 1:
2246 m6s[:, :] *= amplitudes[:, num.newaxis]
2247 elif amplitudes.ndim == 2:
2248 # shear MT components
2249 rotmat1 = pmt.euler_to_matrix(
2250 d2r * self.dip, d2r * self.strike, d2r * -self.rake)
2251 m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
2253 if amplitudes.shape[0] == 2:
2254 # tensile MT components - moment/magnitude input
2255 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
2256 rot_tensile = pmt.to6(
2257 num.dot(rotmat1.T, num.dot(tensile, rotmat1)))
2259 m6s_tensile = rot_tensile[
2260 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2261 m6s += m6s_tensile
2263 elif amplitudes.shape[0] == 3:
2264 # tensile MT components - slip input
2265 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
2266 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
2268 rot_iso = pmt.to6(
2269 num.dot(rotmat1.T, num.dot(iso, rotmat1)))
2270 rot_clvd = pmt.to6(
2271 num.dot(rotmat1.T, num.dot(clvd, rotmat1)))
2273 m6s_iso = rot_iso[
2274 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2275 m6s_clvd = rot_clvd[
2276 num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
2277 m6s += m6s_iso + m6s_clvd
2278 else:
2279 raise ValueError('Unknwown amplitudes shape!')
2280 else:
2281 raise ValueError(
2282 'Unexpected dimension of {}'.format(amplitudes.ndim))
2284 ds = meta.DiscretizedMTSource(
2285 lat=self.lat,
2286 lon=self.lon,
2287 times=times,
2288 north_shifts=points[:, 0],
2289 east_shifts=points[:, 1],
2290 depths=points[:, 2],
2291 m6s=m6s,
2292 dl=dl,
2293 dw=dw,
2294 nl=nl,
2295 nw=nw)
2297 return ds
2299 def outline(self, cs='xyz'):
2300 points = outline_rect_source(self.strike, self.dip, self.length,
2301 self.width, self.anchor)
2303 points[:, 0] += self.north_shift
2304 points[:, 1] += self.east_shift
2305 points[:, 2] += self.depth
2306 if cs == 'xyz':
2307 return points
2308 elif cs == 'xy':
2309 return points[:, :2]
2310 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2311 latlon = ne_to_latlon(
2312 self.lat, self.lon, points[:, 0], points[:, 1])
2314 latlon = num.array(latlon).T
2315 if cs == 'latlon':
2316 return latlon
2317 elif cs == 'lonlat':
2318 return latlon[:, ::-1]
2319 else:
2320 return num.concatenate(
2321 (latlon, points[:, 2].reshape((len(points), 1))),
2322 axis=1)
2324 def points_on_source(self, cs='xyz', **kwargs):
2326 points = points_on_rect_source(
2327 self.strike, self.dip, self.length, self.width,
2328 self.anchor, **kwargs)
2330 points[:, 0] += self.north_shift
2331 points[:, 1] += self.east_shift
2332 points[:, 2] += self.depth
2333 if cs == 'xyz':
2334 return points
2335 elif cs == 'xy':
2336 return points[:, :2]
2337 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2338 latlon = ne_to_latlon(
2339 self.lat, self.lon, points[:, 0], points[:, 1])
2341 latlon = num.array(latlon).T
2342 if cs == 'latlon':
2343 return latlon
2344 elif cs == 'lonlat':
2345 return latlon[:, ::-1]
2346 else:
2347 return num.concatenate(
2348 (latlon, points[:, 2].reshape((len(points), 1))),
2349 axis=1)
2351 def get_nucleation_abs_coord(self, cs='xy'):
2353 if self.nucleation_x is None:
2354 return None, None
2356 coords = from_plane_coords(self.strike, self.dip, self.length,
2357 self.width, self.depth, self.nucleation_x,
2358 self.nucleation_y, lat=self.lat,
2359 lon=self.lon, north_shift=self.north_shift,
2360 east_shift=self.east_shift, cs=cs)
2361 return coords
2363 def pyrocko_moment_tensor(self, store=None, target=None):
2364 return pmt.MomentTensor(
2365 strike=self.strike,
2366 dip=self.dip,
2367 rake=self.rake,
2368 scalar_moment=self.get_moment(store, target))
2370 def pyrocko_event(self, store=None, target=None, **kwargs):
2371 return SourceWithDerivedMagnitude.pyrocko_event(
2372 self, store, target,
2373 **kwargs)
2375 @classmethod
2376 def from_pyrocko_event(cls, ev, **kwargs):
2377 d = {}
2378 mt = ev.moment_tensor
2379 if mt:
2380 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2381 d.update(
2382 strike=float(strike),
2383 dip=float(dip),
2384 rake=float(rake),
2385 magnitude=float(mt.moment_magnitude()))
2387 d.update(kwargs)
2388 return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
2391class PseudoDynamicRupture(SourceWithDerivedMagnitude):
2392 '''
2393 Combined Eikonal and Okada quasi-dynamic rupture model.
2395 Details are described in :doc:`/topics/pseudo-dynamic-rupture`.
2396 Note: attribute `stf` is not used so far, but kept for future applications.
2397 '''
2399 discretized_source_class = meta.DiscretizedMTSource
2401 strike = Float.T(
2402 default=0.0,
2403 help='Strike direction in [deg], measured clockwise from north.')
2405 dip = Float.T(
2406 default=0.0,
2407 help='Dip angle in [deg], measured downward from horizontal.')
2409 length = Float.T(
2410 default=10. * km,
2411 help='Length of rectangular source area in [m].')
2413 width = Float.T(
2414 default=5. * km,
2415 help='Width of rectangular source area in [m].')
2417 anchor = StringChoice.T(
2418 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2419 'bottom_left', 'bottom_right'],
2420 default='center',
2421 optional=True,
2422 help='Anchor point for positioning the plane, can be: ``top, center, '
2423 'bottom, top_left, top_right, bottom_left, '
2424 'bottom_right, center_left, center_right``.')
2426 nucleation_x__ = Array.T(
2427 default=num.array([0.]),
2428 dtype=num.float64,
2429 serialize_as='list',
2430 help='Horizontal position of rupture nucleation in normalized fault '
2431 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).')
2433 nucleation_y__ = Array.T(
2434 default=num.array([0.]),
2435 dtype=num.float64,
2436 serialize_as='list',
2437 help='Down-dip position of rupture nucleation in normalized fault '
2438 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).')
2440 nucleation_time__ = Array.T(
2441 optional=True,
2442 help='Time in [s] after origin, when nucleation points defined by '
2443 '``nucleation_x`` and ``nucleation_y`` rupture.',
2444 dtype=num.float64,
2445 serialize_as='list')
2447 gamma = Float.T(
2448 default=0.8,
2449 help='Scaling factor between rupture velocity and S-wave velocity: '
2450 r':math:`v_r = \gamma * v_s`.')
2452 nx = Int.T(
2453 default=2,
2454 help='Number of discrete source patches in x direction (along '
2455 'strike).')
2457 ny = Int.T(
2458 default=2,
2459 help='Number of discrete source patches in y direction (down dip).')
2461 slip = Float.T(
2462 optional=True,
2463 help='Maximum slip of the rectangular source [m]. '
2464 'Setting the slip the tractions/stress field '
2465 'will be normalized to accomodate the desired maximum slip.')
2467 rake = Float.T(
2468 optional=True,
2469 help='Rake angle in [deg], '
2470 'measured counter-clockwise from right-horizontal '
2471 'in on-plane view. Rake is translated into homogenous tractions '
2472 'in strike and up-dip direction. ``rake`` is mutually exclusive '
2473 'with tractions parameter.')
2475 patches = List.T(
2476 OkadaSource.T(),
2477 optional=True,
2478 help='List of all boundary elements/sub faults/fault patches.')
2480 patch_mask__ = Array.T(
2481 dtype=bool,
2482 serialize_as='list',
2483 shape=(None,),
2484 optional=True,
2485 help='Mask for all boundary elements/sub faults/fault patches. True '
2486 'leaves the patch in the calculation, False excludes the patch.')
2488 tractions = TractionField.T(
2489 optional=True,
2490 help='Traction field the rupture plane is exposed to. See the'
2491 ':py:mod:`pyrocko.gf.tractions` module for more details. '
2492 'If ``tractions=None`` and ``rake`` is given'
2493 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will'
2494 ' be used.')
2496 coef_mat = Array.T(
2497 optional=True,
2498 help='Coefficient matrix linking traction and dislocation field.',
2499 dtype=num.float64,
2500 shape=(None, None))
2502 eikonal_decimation = Int.T(
2503 optional=True,
2504 default=1,
2505 help='Sub-source eikonal factor, a smaller eikonal factor will'
2506 ' increase the accuracy of rupture front calculation but'
2507 ' increases also the computation time.')
2509 decimation_factor = Int.T(
2510 optional=True,
2511 default=1,
2512 help='Sub-source decimation factor, a larger decimation will'
2513 ' make the result inaccurate but shorten the necessary'
2514 ' computation time (use for testing puposes only).')
2516 nthreads = Int.T(
2517 optional=True,
2518 default=1,
2519 help='Number of threads for Okada forward modelling, '
2520 'matrix inversion and calculation of point subsources. '
2521 'Note: for small/medium matrices 1 thread is most efficient.')
2523 pure_shear = Bool.T(
2524 optional=True,
2525 default=False,
2526 help='Calculate only shear tractions and omit tensile tractions.')
2528 smooth_rupture = Bool.T(
2529 default=True,
2530 help='Smooth the tractions by weighting partially ruptured'
2531 ' fault patches.')
2533 aggressive_oversampling = Bool.T(
2534 default=False,
2535 help='Aggressive oversampling for basesource discretization. '
2536 'When using \'multilinear\' interpolation oversampling has'
2537 ' practically no effect.')
2539 def __init__(self, **kwargs):
2540 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2541 self._interpolators = {}
2542 self.check_conflicts()
2544 @property
2545 def nucleation_x(self):
2546 return self.nucleation_x__
2548 @nucleation_x.setter
2549 def nucleation_x(self, nucleation_x):
2550 if isinstance(nucleation_x, list):
2551 nucleation_x = num.array(nucleation_x)
2553 elif not isinstance(
2554 nucleation_x, num.ndarray) and nucleation_x is not None:
2556 nucleation_x = num.array([nucleation_x])
2557 self.nucleation_x__ = nucleation_x
2559 @property
2560 def nucleation_y(self):
2561 return self.nucleation_y__
2563 @nucleation_y.setter
2564 def nucleation_y(self, nucleation_y):
2565 if isinstance(nucleation_y, list):
2566 nucleation_y = num.array(nucleation_y)
2568 elif not isinstance(nucleation_y, num.ndarray) \
2569 and nucleation_y is not None:
2570 nucleation_y = num.array([nucleation_y])
2572 self.nucleation_y__ = nucleation_y
2574 @property
2575 def nucleation(self):
2576 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2578 if (nucl_x is None) or (nucl_y is None):
2579 return None
2581 assert nucl_x.shape[0] == nucl_y.shape[0]
2583 return num.concatenate(
2584 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1)
2586 @nucleation.setter
2587 def nucleation(self, nucleation):
2588 if isinstance(nucleation, list):
2589 nucleation = num.array(nucleation)
2591 assert nucleation.shape[1] == 2
2593 self.nucleation_x = nucleation[:, 0]
2594 self.nucleation_y = nucleation[:, 1]
2596 @property
2597 def nucleation_time(self):
2598 return self.nucleation_time__
2600 @nucleation_time.setter
2601 def nucleation_time(self, nucleation_time):
2602 if not isinstance(nucleation_time, num.ndarray) \
2603 and nucleation_time is not None:
2604 nucleation_time = num.array([nucleation_time])
2606 self.nucleation_time__ = nucleation_time
2608 @property
2609 def patch_mask(self):
2610 if (self.patch_mask__ is not None and
2611 self.patch_mask__.shape == (self.nx * self.ny,)):
2613 return self.patch_mask__
2614 else:
2615 return num.ones(self.nx * self.ny, dtype=bool)
2617 @patch_mask.setter
2618 def patch_mask(self, patch_mask):
2619 if isinstance(patch_mask, list):
2620 patch_mask = num.array(patch_mask)
2622 self.patch_mask__ = patch_mask
2624 def get_tractions(self):
2625 '''
2626 Get source traction vectors.
2628 If :py:attr:`rake` is given, unit length directed traction vectors
2629 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned,
2630 else the given :py:attr:`tractions` are used.
2632 :returns:
2633 Traction vectors per patch.
2634 :rtype:
2635 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``.
2636 '''
2638 if self.rake is not None:
2639 if num.isnan(self.rake):
2640 raise ValueError('Rake must be a real number, not NaN.')
2642 logger.warning(
2643 'Tractions are derived based on the given source rake.')
2644 tractions = DirectedTractions(rake=self.rake)
2645 else:
2646 tractions = self.tractions
2647 return tractions.get_tractions(self.nx, self.ny, self.patches)
2649 def base_key(self):
2650 return SourceWithDerivedMagnitude.base_key(self) + (
2651 self.slip,
2652 self.strike,
2653 self.dip,
2654 self.rake,
2655 self.length,
2656 self.width,
2657 float(self.nucleation_x.mean()),
2658 float(self.nucleation_y.mean()),
2659 self.decimation_factor,
2660 self.anchor,
2661 self.pure_shear,
2662 self.gamma,
2663 tuple(self.patch_mask))
2665 def check_conflicts(self):
2666 if self.tractions and self.rake:
2667 raise AttributeError(
2668 'Tractions and rake are mutually exclusive.')
2669 if self.tractions is None and self.rake is None:
2670 self.rake = 0.
2672 def get_magnitude(self, store=None, target=None):
2673 self.check_conflicts()
2674 if self.slip is not None or self.tractions is not None:
2675 if store is None:
2676 raise DerivedMagnitudeError(
2677 'Magnitude for a rectangular source with slip or '
2678 'tractions defined can only be derived when earth model '
2679 'is set.')
2681 moment_rate, calc_times = self.discretize_basesource(
2682 store, target=target).get_moment_rate(store.config.deltat)
2684 deltat = num.concatenate((
2685 (num.diff(calc_times)[0],),
2686 num.diff(calc_times)))
2688 return float(pmt.moment_to_magnitude(
2689 num.sum(moment_rate * deltat)))
2691 else:
2692 return float(pmt.moment_to_magnitude(1.0))
2694 def get_factor(self):
2695 return 1.0
2697 def outline(self, cs='xyz'):
2698 '''
2699 Get source outline corner coordinates.
2701 :param cs:
2702 :ref:`Output coordinate system <coordinate-system-names>`.
2703 :type cs:
2704 optional, str
2706 :returns:
2707 Corner points in desired coordinate system.
2708 :rtype:
2709 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``.
2710 '''
2711 points = outline_rect_source(self.strike, self.dip, self.length,
2712 self.width, self.anchor)
2714 points[:, 0] += self.north_shift
2715 points[:, 1] += self.east_shift
2716 points[:, 2] += self.depth
2717 if cs == 'xyz':
2718 return points
2719 elif cs == 'xy':
2720 return points[:, :2]
2721 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2722 latlon = ne_to_latlon(
2723 self.lat, self.lon, points[:, 0], points[:, 1])
2725 latlon = num.array(latlon).T
2726 if cs == 'latlon':
2727 return latlon
2728 elif cs == 'lonlat':
2729 return latlon[:, ::-1]
2730 else:
2731 return num.concatenate(
2732 (latlon, points[:, 2].reshape((len(points), 1))),
2733 axis=1)
2735 def points_on_source(self, cs='xyz', **kwargs):
2736 '''
2737 Convert relative plane coordinates to geographical coordinates.
2739 Given x and y coordinates (relative source coordinates between -1.
2740 and 1.) are converted to desired geographical coordinates. Coordinates
2741 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x``
2742 and ``points_y``.
2744 :param cs:
2745 :ref:`Output coordinate system <coordinate-system-names>`.
2746 :type cs:
2747 optional, str
2749 :returns:
2750 Point coordinates in desired coordinate system.
2751 :rtype:
2752 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``.
2753 '''
2754 points = points_on_rect_source(
2755 self.strike, self.dip, self.length, self.width,
2756 self.anchor, **kwargs)
2758 points[:, 0] += self.north_shift
2759 points[:, 1] += self.east_shift
2760 points[:, 2] += self.depth
2761 if cs == 'xyz':
2762 return points
2763 elif cs == 'xy':
2764 return points[:, :2]
2765 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2766 latlon = ne_to_latlon(
2767 self.lat, self.lon, points[:, 0], points[:, 1])
2769 latlon = num.array(latlon).T
2770 if cs == 'latlon':
2771 return latlon
2772 elif cs == 'lonlat':
2773 return latlon[:, ::-1]
2774 else:
2775 return num.concatenate(
2776 (latlon, points[:, 2].reshape((len(points), 1))),
2777 axis=1)
2779 def pyrocko_moment_tensor(self, store=None, target=None):
2780 if store is not None:
2781 if not self.patches:
2782 self.discretize_patches(store)
2784 data = self.get_slip()
2785 else:
2786 data = self.get_tractions()
2788 weights = num.linalg.norm(data, axis=1)
2789 weights /= weights.sum()
2791 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d
2792 rake = num.average(rakes, weights=weights)
2794 return pmt.MomentTensor(
2795 strike=self.strike,
2796 dip=self.dip,
2797 rake=rake,
2798 scalar_moment=self.get_moment(store, target))
2800 def pyrocko_event(self, store=None, target=None, **kwargs):
2801 return SourceWithDerivedMagnitude.pyrocko_event(
2802 self, store, target,
2803 **kwargs)
2805 @classmethod
2806 def from_pyrocko_event(cls, ev, **kwargs):
2807 d = {}
2808 mt = ev.moment_tensor
2809 if mt:
2810 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2811 d.update(
2812 strike=float(strike),
2813 dip=float(dip),
2814 rake=float(rake))
2816 d.update(kwargs)
2817 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d)
2819 def _discretize_points(self, store, *args, **kwargs):
2820 '''
2821 Discretize source plane with equal vertical and horizontal spacing.
2823 Additional ``*args`` and ``**kwargs`` are passed to
2824 :py:meth:`points_on_source`.
2826 :param store:
2827 Green's function database (needs to cover whole region of the
2828 source).
2829 :type store:
2830 :py:class:`~pyrocko.gf.store.Store`
2832 :returns:
2833 Number of points in strike and dip direction, distance
2834 between adjacent points, coordinates (latlondepth) and coordinates
2835 (xy on fault) for discrete points.
2836 :rtype:
2837 (int, int, float, :py:class:`~numpy.ndarray`,
2838 :py:class:`~numpy.ndarray`).
2839 '''
2840 anch_x, anch_y = map_anchor[self.anchor]
2842 npoints = int(self.width // km) + 1
2843 points = num.zeros((npoints, 3))
2844 points[:, 1] = num.linspace(-1., 1., npoints)
2845 points[:, 1] = (points[:, 1] - anch_y) * self.width/2
2847 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)
2848 points = num.dot(rotmat.T, points.T).T
2849 points[:, 2] += self.depth
2851 vs_min = store.config.get_vs(
2852 self.lat, self.lon, points,
2853 interpolation='nearest_neighbor')
2854 vr_min = max(vs_min.min(), .5*km) * self.gamma
2856 oversampling = 10.
2857 delta_l = self.length / (self.nx * oversampling)
2858 delta_w = self.width / (self.ny * oversampling)
2860 delta = self.eikonal_decimation * num.min([
2861 store.config.deltat * vr_min / oversampling,
2862 delta_l, delta_w] + [
2863 deltas for deltas in store.config.deltas])
2865 delta = delta_w / num.ceil(delta_w / delta)
2867 nx = int(num.ceil(self.length / delta)) + 1
2868 ny = int(num.ceil(self.width / delta)) + 1
2870 rem_l = (nx-1)*delta - self.length
2871 lim_x = rem_l / self.length
2873 points_xy = num.zeros((nx * ny, 2))
2874 points_xy[:, 0] = num.repeat(
2875 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny)
2876 points_xy[:, 1] = num.tile(
2877 num.linspace(-1., 1., ny), nx)
2879 points = self.points_on_source(
2880 points_x=points_xy[:, 0],
2881 points_y=points_xy[:, 1],
2882 **kwargs)
2884 return nx, ny, delta, points, points_xy
2886 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor',
2887 points=None):
2888 '''
2889 Get rupture velocity for discrete points on source plane.
2891 :param store:
2892 Green's function database (needs to cover the whole region of the
2893 source)
2894 :type store:
2895 optional, :py:class:`~pyrocko.gf.store.Store`
2897 :param interpolation:
2898 Interpolation method to use (choose between ``'nearest_neighbor'``
2899 and ``'multilinear'``).
2900 :type interpolation:
2901 optional, str
2903 :param points:
2904 Coordinates on fault (-1.:1.) of discrete points.
2905 :type points:
2906 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)``
2908 :returns:
2909 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete
2910 points.
2911 :rtype:
2912 :py:class:`~numpy.ndarray`: ``(n_points, )``.
2913 '''
2915 if points is None:
2916 _, _, _, points, _ = self._discretize_points(store, cs='xyz')
2918 return store.config.get_vs(
2919 self.lat, self.lon,
2920 points=points,
2921 interpolation=interpolation) * self.gamma
2923 def discretize_time(
2924 self, store, interpolation='nearest_neighbor',
2925 vr=None, times=None, *args, **kwargs):
2926 '''
2927 Get rupture start time for discrete points on source plane.
2929 :param store:
2930 Green's function database (needs to cover whole region of the
2931 source)
2932 :type store:
2933 :py:class:`~pyrocko.gf.store.Store`
2935 :param interpolation:
2936 Interpolation method to use (choose between ``'nearest_neighbor'``
2937 and ``'multilinear'``).
2938 :type interpolation:
2939 optional, str
2941 :param vr:
2942 Array, containing rupture user defined rupture velocity values.
2943 :type vr:
2944 optional, :py:class:`~numpy.ndarray`
2946 :param times:
2947 Array, containing zeros, where rupture is starting, real positive
2948 numbers at later secondary nucleation points and -1, where time
2949 will be calculated. If not given, rupture starts at nucleation_x,
2950 nucleation_y. Times are given for discrete points with equal
2951 horizontal and vertical spacing.
2952 :type times:
2953 optional, :py:class:`~numpy.ndarray`
2955 :returns:
2956 Coordinates (latlondepth), coordinates (xy), rupture velocity,
2957 rupture propagation time of discrete points.
2958 :rtype:
2959 :py:class:`~numpy.ndarray`: ``(n_points, 3)``,
2960 :py:class:`~numpy.ndarray`: ``(n_points, 2)``,
2961 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``,
2962 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``.
2963 '''
2964 nx, ny, delta, points, points_xy = self._discretize_points(
2965 store, cs='xyz')
2967 if vr is None or vr.shape != tuple((nx, ny)):
2968 if vr:
2969 logger.warning(
2970 'Given rupture velocities are not in right shape: '
2971 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny))
2972 vr = self._discretize_rupture_v(store, interpolation, points)\
2973 .reshape(nx, ny)
2975 if vr.shape != tuple((nx, ny)):
2976 logger.warning(
2977 'Given rupture velocities are not in right shape. Therefore'
2978 ' standard rupture velocity array is used.')
2980 def initialize_times():
2981 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2983 if nucl_x.shape != nucl_y.shape:
2984 raise ValueError(
2985 'Nucleation coordinates have different shape.')
2987 dist_points = num.array([
2988 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1)
2989 for x, y in zip(nucl_x, nucl_y)])
2990 nucl_indices = num.argmin(dist_points, axis=1)
2992 if self.nucleation_time is None:
2993 nucl_times = num.zeros_like(nucl_indices)
2994 else:
2995 if self.nucleation_time.shape == nucl_x.shape:
2996 nucl_times = self.nucleation_time
2997 else:
2998 raise ValueError(
2999 'Nucleation coordinates and times have different '
3000 'shapes')
3002 t = num.full(nx * ny, -1.)
3003 t[nucl_indices] = nucl_times
3004 return t.reshape(nx, ny)
3006 if times is None:
3007 times = initialize_times()
3008 elif times.shape != tuple((nx, ny)):
3009 times = initialize_times()
3010 logger.warning(
3011 'Given times are not in right shape. Therefore standard time'
3012 ' array is used.')
3014 eikonal_ext.eikonal_solver_fmm_cartesian(
3015 speeds=vr, times=times, delta=delta)
3017 return points, points_xy, vr, times
3019 def get_vr_time_interpolators(
3020 self, store, interpolation='nearest_neighbor', force=False,
3021 **kwargs):
3022 '''
3023 Get interpolators for rupture velocity and rupture time.
3025 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3027 :param store:
3028 Green's function database (needs to cover whole region of the
3029 source).
3030 :type store:
3031 :py:class:`~pyrocko.gf.store.Store`
3033 :param interpolation:
3034 Interpolation method to use (choose between ``'nearest_neighbor'``
3035 and ``'multilinear'``).
3036 :type interpolation:
3037 optional, str
3039 :param force:
3040 Force recalculation of the interpolators (e.g. after change of
3041 nucleation point locations/times). Default is ``False``.
3042 :type force:
3043 optional, bool
3044 '''
3045 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'}
3046 if interpolation not in interp_map:
3047 raise TypeError(
3048 'Interpolation method %s not available' % interpolation)
3050 if not self._interpolators.get(interpolation, False) or force:
3051 _, points_xy, vr, times = self.discretize_time(
3052 store, **kwargs)
3054 if self.length <= 0.:
3055 raise ValueError(
3056 'length must be larger then 0. not %g' % self.length)
3058 if self.width <= 0.:
3059 raise ValueError(
3060 'width must be larger then 0. not %g' % self.width)
3062 nx, ny = times.shape
3063 anch_x, anch_y = map_anchor[self.anchor]
3065 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2.
3066 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2.
3068 ascont = num.ascontiguousarray
3070 self._interpolators[interpolation] = (
3071 nx, ny, times, vr,
3072 RegularGridInterpolator(
3073 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])),
3074 times,
3075 method=interp_map[interpolation]),
3076 RegularGridInterpolator(
3077 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])),
3078 vr,
3079 method=interp_map[interpolation]))
3081 return self._interpolators[interpolation]
3083 def discretize_patches(
3084 self, store, interpolation='nearest_neighbor', force=False,
3085 grid_shape=(),
3086 **kwargs):
3087 '''
3088 Get rupture start time and OkadaSource elements for points on rupture.
3090 All source elements and their corresponding center points are
3091 calculated and stored in the :py:attr:`patches` attribute.
3093 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3095 :param store:
3096 Green's function database (needs to cover whole region of the
3097 source).
3098 :type store:
3099 :py:class:`~pyrocko.gf.store.Store`
3101 :param interpolation:
3102 Interpolation method to use (choose between ``'nearest_neighbor'``
3103 and ``'multilinear'``).
3104 :type interpolation:
3105 optional, str
3107 :param force:
3108 Force recalculation of the vr and time interpolators ( e.g. after
3109 change of nucleation point locations/times). Default is ``False``.
3110 :type force:
3111 optional, bool
3113 :param grid_shape:
3114 Desired sub fault patch grid size (nlength, nwidth). Either factor
3115 or grid_shape should be set.
3116 :type grid_shape:
3117 optional, tuple of int
3118 '''
3119 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3120 self.get_vr_time_interpolators(
3121 store,
3122 interpolation=interpolation, force=force, **kwargs)
3123 anch_x, anch_y = map_anchor[self.anchor]
3125 al = self.length / 2.
3126 aw = self.width / 2.
3127 al1 = -(al + anch_x * al)
3128 al2 = al - anch_x * al
3129 aw1 = -aw + anch_y * aw
3130 aw2 = aw + anch_y * aw
3131 assert num.abs([al1, al2]).sum() == self.length
3132 assert num.abs([aw1, aw2]).sum() == self.width
3134 def get_lame(*a, **kw):
3135 shear_mod = store.config.get_shear_moduli(*a, **kw)
3136 lamb = store.config.get_vp(*a, **kw)**2 \
3137 * store.config.get_rho(*a, **kw) - 2. * shear_mod
3138 return shear_mod, lamb / (2. * (lamb + shear_mod))
3140 shear_mod, poisson = get_lame(
3141 self.lat, self.lon,
3142 num.array([[self.north_shift, self.east_shift, self.depth]]),
3143 interpolation=interpolation)
3145 okada_src = OkadaSource(
3146 lat=self.lat, lon=self.lon,
3147 strike=self.strike, dip=self.dip,
3148 north_shift=self.north_shift, east_shift=self.east_shift,
3149 depth=self.depth,
3150 al1=al1, al2=al2, aw1=aw1, aw2=aw2,
3151 poisson=poisson.mean(),
3152 shearmod=shear_mod.mean(),
3153 opening=kwargs.get('opening', 0.))
3155 if not (self.nx and self.ny):
3156 if grid_shape:
3157 self.nx, self.ny = grid_shape
3158 else:
3159 self.nx = nx
3160 self.ny = ny
3162 source_disc, source_points = okada_src.discretize(self.nx, self.ny)
3164 shear_mod, poisson = get_lame(
3165 self.lat, self.lon,
3166 num.array([src.source_patch()[:3] for src in source_disc]),
3167 interpolation=interpolation)
3169 if (self.nx, self.ny) != (nx, ny):
3170 times_interp = time_interpolator(
3171 num.ascontiguousarray(source_points[:, :2]))
3172 vr_interp = vr_interpolator(
3173 num.ascontiguousarray(source_points[:, :2]))
3174 else:
3175 times_interp = times.T.ravel()
3176 vr_interp = vr.T.ravel()
3178 for isrc, src in enumerate(source_disc):
3179 src.vr = vr_interp[isrc]
3180 src.time = times_interp[isrc] + self.time
3182 self.patches = source_disc
3184 def discretize_basesource(self, store, target=None):
3185 '''
3186 Prepare source for synthetic waveform calculation.
3188 :param store:
3189 Green's function database (needs to cover whole region of the
3190 source).
3191 :type store:
3192 :py:class:`~pyrocko.gf.store.Store`
3194 :param target:
3195 Target information.
3196 :type target:
3197 optional, :py:class:`~pyrocko.gf.targets.Target`
3199 :returns:
3200 Source discretized by a set of moment tensors and times.
3201 :rtype:
3202 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource`
3203 '''
3204 if not target:
3205 interpolation = 'nearest_neighbor'
3206 else:
3207 interpolation = target.interpolation
3209 if not self.patches:
3210 self.discretize_patches(store, interpolation)
3212 if self.coef_mat is None:
3213 self.calc_coef_mat()
3215 delta_slip, slip_times = self.get_delta_slip(store)
3216 npatches = self.nx * self.ny
3217 ntimes = slip_times.size
3219 anch_x, anch_y = map_anchor[self.anchor]
3221 pln = self.length / self.nx
3222 pwd = self.width / self.ny
3224 patch_coords = num.array([
3225 (p.ix, p.iy)
3226 for p in self.patches]).reshape(self.nx, self.ny, 2)
3228 # boundary condition is zero-slip
3229 # is not valid to avoid unwished interpolation effects
3230 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3))
3231 slip_grid[1:-1, 1:-1, :, :] = \
3232 delta_slip.reshape(self.nx, self.ny, ntimes, 3)
3234 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :]
3235 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :]
3236 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :]
3237 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :]
3239 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :]
3240 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :]
3241 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :]
3242 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :]
3244 def make_grid(patch_parameter):
3245 grid = num.zeros((self.nx + 2, self.ny + 2))
3246 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny)
3248 grid[0, 0] = grid[1, 1]
3249 grid[0, -1] = grid[1, -2]
3250 grid[-1, 0] = grid[-2, 1]
3251 grid[-1, -1] = grid[-2, -2]
3253 grid[1:-1, 0] = grid[1:-1, 1]
3254 grid[1:-1, -1] = grid[1:-1, -2]
3255 grid[0, 1:-1] = grid[1, 1:-1]
3256 grid[-1, 1:-1] = grid[-2, 1:-1]
3258 return grid
3260 lamb = self.get_patch_attribute('lamb')
3261 mu = self.get_patch_attribute('shearmod')
3263 lamb_grid = make_grid(lamb)
3264 mu_grid = make_grid(mu)
3266 coords_x = num.zeros(self.nx + 2)
3267 coords_x[1:-1] = patch_coords[:, 0, 0]
3268 coords_x[0] = coords_x[1] - pln / 2
3269 coords_x[-1] = coords_x[-2] + pln / 2
3271 coords_y = num.zeros(self.ny + 2)
3272 coords_y[1:-1] = patch_coords[0, :, 1]
3273 coords_y[0] = coords_y[1] - pwd / 2
3274 coords_y[-1] = coords_y[-2] + pwd / 2
3276 slip_interp = RegularGridInterpolator(
3277 (coords_x, coords_y, slip_times),
3278 slip_grid, method='nearest')
3280 lamb_interp = RegularGridInterpolator(
3281 (coords_x, coords_y),
3282 lamb_grid, method='nearest')
3284 mu_interp = RegularGridInterpolator(
3285 (coords_x, coords_y),
3286 mu_grid, method='nearest')
3288 # discretize basesources
3289 mindeltagf = min(tuple(
3290 (self.length / self.nx, self.width / self.ny) +
3291 tuple(store.config.deltas)))
3293 nl = int((1. / self.decimation_factor) *
3294 num.ceil(pln / mindeltagf)) + 1
3295 nw = int((1. / self.decimation_factor) *
3296 num.ceil(pwd / mindeltagf)) + 1
3297 nsrc_patch = int(nl * nw)
3298 dl = pln / nl
3299 dw = pwd / nw
3301 patch_area = dl * dw
3303 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl)
3304 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw)
3306 base_coords = num.zeros((nsrc_patch, 3))
3307 base_coords[:, 0] = num.tile(xl, nw)
3308 base_coords[:, 1] = num.repeat(xw, nl)
3309 base_coords = num.tile(base_coords, (npatches, 1))
3311 center_coords = num.zeros((npatches, 3))
3312 center_coords[:, 0] = num.repeat(
3313 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2
3314 center_coords[:, 1] = num.tile(
3315 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2
3317 base_coords -= center_coords.repeat(nsrc_patch, axis=0)
3318 nbaselocs = base_coords.shape[0]
3320 base_interp = base_coords.repeat(ntimes, axis=0)
3322 base_times = num.tile(slip_times, nbaselocs)
3323 base_interp[:, 0] -= anch_x * self.length / 2
3324 base_interp[:, 1] -= anch_y * self.width / 2
3325 base_interp[:, 2] = base_times
3327 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3328 store, interpolation=interpolation)
3330 time_eikonal_max = time_interpolator.values.max()
3332 nbasesrcs = base_interp.shape[0]
3333 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3)
3334 lamb = lamb_interp(base_interp[:, :2]).ravel()
3335 mu = mu_interp(base_interp[:, :2]).ravel()
3337 if False:
3338 try:
3339 import matplotlib.pyplot as plt
3340 coords = base_coords.copy()
3341 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1)
3342 plt.scatter(coords[:, 0], coords[:, 1], c=norm)
3343 plt.show()
3344 except AttributeError:
3345 pass
3347 base_interp[:, 2] = 0.
3348 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
3349 base_interp = num.dot(rotmat.T, base_interp.T).T
3350 base_interp[:, 0] += self.north_shift
3351 base_interp[:, 1] += self.east_shift
3352 base_interp[:, 2] += self.depth
3354 slip_strike = delta_slip[:, :, 0].ravel()
3355 slip_dip = delta_slip[:, :, 1].ravel()
3356 slip_norm = delta_slip[:, :, 2].ravel()
3358 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0)
3359 slip_rake = r2d * num.arctan2(slip_dip, slip_strike)
3361 m6s = okada_ext.patch2m6(
3362 strikes=num.full(nbasesrcs, self.strike, dtype=float),
3363 dips=num.full(nbasesrcs, self.dip, dtype=float),
3364 rakes=slip_rake,
3365 disl_shear=slip_shear,
3366 disl_norm=slip_norm,
3367 lamb=lamb,
3368 mu=mu,
3369 nthreads=self.nthreads)
3371 m6s *= patch_area
3373 dl = -self.patches[0].al1 + self.patches[0].al2
3374 dw = -self.patches[0].aw1 + self.patches[0].aw2
3376 base_times[base_times > time_eikonal_max] = time_eikonal_max
3378 ds = meta.DiscretizedMTSource(
3379 lat=self.lat,
3380 lon=self.lon,
3381 times=base_times + self.time,
3382 north_shifts=base_interp[:, 0],
3383 east_shifts=base_interp[:, 1],
3384 depths=base_interp[:, 2],
3385 m6s=m6s,
3386 dl=dl,
3387 dw=dw,
3388 nl=self.nx,
3389 nw=self.ny)
3391 return ds
3393 def calc_coef_mat(self):
3394 '''
3395 Calculate coefficients connecting tractions and dislocations.
3396 '''
3397 if not self.patches:
3398 raise ValueError(
3399 'Patches are needed. Please calculate them first.')
3401 self.coef_mat = make_okada_coefficient_matrix(
3402 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear)
3404 def get_patch_attribute(self, attr):
3405 '''
3406 Get patch attributes.
3408 :param attr:
3409 Name of selected attribute (see
3410 :py:class`pyrocko.modelling.okada.OkadaSource`).
3411 :type attr:
3412 str
3414 :returns:
3415 Array with attribute value for each fault patch.
3416 :rtype:
3417 :py:class:`~numpy.ndarray`
3419 '''
3420 if not self.patches:
3421 raise ValueError(
3422 'Patches are needed. Please calculate them first.')
3423 return num.array([getattr(p, attr) for p in self.patches])
3425 def get_slip(
3426 self,
3427 time=None,
3428 scale_slip=True,
3429 interpolation='nearest_neighbor',
3430 **kwargs):
3431 '''
3432 Get slip per subfault patch for given time after rupture start.
3434 :param time:
3435 Time after origin [s], for which slip is computed. If not
3436 given, final static slip is returned.
3437 :type time:
3438 optional, float > 0.
3440 :param scale_slip:
3441 If ``True`` and :py:attr:`slip` given, all slip values are scaled
3442 to fit the given maximum slip.
3443 :type scale_slip:
3444 optional, bool
3446 :param interpolation:
3447 Interpolation method to use (choose between ``'nearest_neighbor'``
3448 and ``'multilinear'``).
3449 :type interpolation:
3450 optional, str
3452 :returns:
3453 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`)
3454 for each source patch.
3455 :rtype:
3456 :py:class:`~numpy.ndarray`: ``(n_sources, 3)``
3457 '''
3459 if self.patches is None:
3460 raise ValueError(
3461 'Please discretize the source first (discretize_patches())')
3462 npatches = len(self.patches)
3463 tractions = self.get_tractions()
3464 time_patch_max = self.get_patch_attribute('time').max() - self.time
3466 time_patch = time
3467 if time is None:
3468 time_patch = time_patch_max
3470 if self.coef_mat is None:
3471 self.calc_coef_mat()
3473 if tractions.shape != (npatches, 3):
3474 raise AttributeError(
3475 'The traction vector is of invalid shape.'
3476 ' Required shape is (npatches, 3)')
3478 patch_mask = num.ones(npatches, dtype=bool)
3479 if self.patch_mask is not None:
3480 patch_mask = self.patch_mask
3482 times = self.get_patch_attribute('time') - self.time
3483 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches
3484 relevant_sources = num.nonzero(times <= time_patch)[0]
3485 disloc_est = num.zeros_like(tractions)
3487 if self.smooth_rupture:
3488 patch_activation = num.zeros(npatches)
3490 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3491 self.get_vr_time_interpolators(
3492 store, interpolation=interpolation)
3494 # Getting the native Eikonal grid, bit hackish
3495 points_x = num.round(time_interpolator.grid[0], decimals=2)
3496 points_y = num.round(time_interpolator.grid[1], decimals=2)
3497 times_eikonal = time_interpolator.values
3499 time_max = time
3500 if time is None:
3501 time_max = times_eikonal.max()
3503 for ip, p in enumerate(self.patches):
3504 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2)
3505 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2)
3507 idx_length = num.logical_and(
3508 points_x >= ul[0], points_x <= lr[0])
3509 idx_width = num.logical_and(
3510 points_y >= ul[1], points_y <= lr[1])
3512 times_patch = times_eikonal[num.ix_(idx_length, idx_width)]
3513 if times_patch.size == 0:
3514 raise AttributeError('could not use smooth_rupture')
3516 patch_activation[ip] = \
3517 (times_patch <= time_max).sum() / times_patch.size
3519 if time_patch == 0 and time_patch != time_patch_max:
3520 patch_activation[ip] = 0.
3522 patch_activation[~patch_mask] = 0. # exlcude unmasked patches
3524 relevant_sources = num.nonzero(patch_activation > 0.)[0]
3526 if relevant_sources.size == 0:
3527 return disloc_est
3529 indices_disl = num.repeat(relevant_sources * 3, 3)
3530 indices_disl[1::3] += 1
3531 indices_disl[2::3] += 2
3533 disloc_est[relevant_sources] = invert_fault_dislocations_bem(
3534 stress_field=tractions[relevant_sources, :].ravel(),
3535 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3536 pure_shear=self.pure_shear, nthreads=self.nthreads,
3537 epsilon=None,
3538 **kwargs)
3540 if self.smooth_rupture:
3541 disloc_est *= patch_activation[:, num.newaxis]
3543 if scale_slip and self.slip is not None:
3544 disloc_tmax = num.zeros(npatches)
3546 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3)
3547 indices_disl[1::3] += 1
3548 indices_disl[2::3] += 2
3550 disloc_tmax[patch_mask] = num.linalg.norm(
3551 invert_fault_dislocations_bem(
3552 stress_field=tractions[patch_mask, :].ravel(),
3553 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3554 pure_shear=self.pure_shear, nthreads=self.nthreads,
3555 epsilon=None,
3556 **kwargs), axis=1)
3558 disloc_tmax_max = disloc_tmax.max()
3559 if disloc_tmax_max == 0.:
3560 logger.warning(
3561 'slip scaling not performed. Maximum slip is 0.')
3563 disloc_est *= self.slip / disloc_tmax_max
3565 return disloc_est
3567 def get_delta_slip(
3568 self,
3569 store=None,
3570 deltat=None,
3571 delta=True,
3572 interpolation='nearest_neighbor',
3573 **kwargs):
3574 '''
3575 Get slip change snapshots.
3577 The time interval, within which the slip changes are computed is
3578 determined by the sampling rate of the Green's function database or
3579 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`.
3581 :param store:
3582 Green's function database (needs to cover whole region of of the
3583 source). Its sampling interval is used as time increment for slip
3584 difference calculation. Either ``deltat`` or ``store`` should be
3585 given.
3586 :type store:
3587 optional, :py:class:`~pyrocko.gf.store.Store`
3589 :param deltat:
3590 Time interval for slip difference calculation [s]. Either
3591 ``deltat`` or ``store`` should be given.
3592 :type deltat:
3593 optional, float
3595 :param delta:
3596 If ``True``, slip differences between two time steps are given. If
3597 ``False``, cumulative slip for all time steps.
3598 :type delta:
3599 optional, bool
3601 :param interpolation:
3602 Interpolation method to use (choose between ``'nearest_neighbor'``
3603 and ``'multilinear'``).
3604 :type interpolation:
3605 optional, str
3607 :returns:
3608 Displacement changes(:math:`\\Delta u_{strike},
3609 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and
3610 time; corner times, for which delta slip is computed. The order of
3611 displacement changes array is:
3613 .. math::
3615 &[[\\\\
3616 &[\\Delta u_{strike, patch1, t1},
3617 \\Delta u_{dip, patch1, t1},
3618 \\Delta u_{tensile, patch1, t1}],\\\\
3619 &[\\Delta u_{strike, patch1, t2},
3620 \\Delta u_{dip, patch1, t2},
3621 \\Delta u_{tensile, patch1, t2}]\\\\
3622 &], [\\\\
3623 &[\\Delta u_{strike, patch2, t1}, ...],\\\\
3624 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\
3626 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3627 :py:class:`~numpy.ndarray`: ``(n_times, )``
3628 '''
3629 if store and deltat:
3630 raise AttributeError(
3631 'Argument collision. '
3632 'Please define only the store or the deltat argument.')
3634 if store:
3635 deltat = store.config.deltat
3637 if not deltat:
3638 raise AttributeError('Please give a GF store or set deltat.')
3640 npatches = len(self.patches)
3642 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3643 store, interpolation=interpolation)
3644 tmax = time_interpolator.values.max()
3646 calc_times = num.arange(0., tmax + deltat, deltat)
3647 calc_times[calc_times > tmax] = tmax
3649 disloc_est = num.zeros((npatches, calc_times.size, 3))
3651 for itime, t in enumerate(calc_times):
3652 disloc_est[:, itime, :] = self.get_slip(
3653 time=t, scale_slip=False, **kwargs)
3655 if self.slip:
3656 disloc_tmax = num.linalg.norm(
3657 self.get_slip(scale_slip=False, time=tmax),
3658 axis=1)
3660 disloc_tmax_max = disloc_tmax.max()
3661 if disloc_tmax_max == 0.:
3662 logger.warning(
3663 'Slip scaling not performed. Maximum slip is 0.')
3664 else:
3665 disloc_est *= self.slip / disloc_tmax_max
3667 if not delta:
3668 return disloc_est, calc_times
3670 # if we have only one timestep there is no gradient
3671 if calc_times.size > 1:
3672 disloc_init = disloc_est[:, 0, :]
3673 disloc_est = num.diff(disloc_est, axis=1)
3674 disloc_est = num.concatenate((
3675 disloc_init[:, num.newaxis, :], disloc_est), axis=1)
3677 calc_times = calc_times
3679 return disloc_est, calc_times
3681 def get_slip_rate(self, *args, **kwargs):
3682 '''
3683 Get slip rate inverted from patches.
3685 The time interval, within which the slip rates are computed is
3686 determined by the sampling rate of the Green's function database or
3687 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to
3688 :py:meth:`get_delta_slip`.
3690 :returns:
3691 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`,
3692 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`)
3693 for each source patch and time; corner times, for which slip rate
3694 is computed. The order of sliprate array is:
3696 .. math::
3698 &[[\\\\
3699 &[\\Delta u_{strike, patch1, t1}/\\Delta t,
3700 \\Delta u_{dip, patch1, t1}/\\Delta t,
3701 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\
3702 &[\\Delta u_{strike, patch1, t2}/\\Delta t,
3703 \\Delta u_{dip, patch1, t2}/\\Delta t,
3704 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\
3705 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\
3706 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\
3708 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3709 :py:class:`~numpy.ndarray`: ``(n_times, )``
3710 '''
3711 ddisloc_est, calc_times = self.get_delta_slip(
3712 *args, delta=True, **kwargs)
3714 dt = num.concatenate(
3715 [(num.diff(calc_times)[0], ), num.diff(calc_times)])
3716 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt
3718 return slip_rate, calc_times
3720 def get_moment_rate_patches(self, *args, **kwargs):
3721 '''
3722 Get scalar seismic moment rate for each patch individually.
3724 Additional ``*args`` and ``**kwargs`` are passed to
3725 :py:meth:`get_slip_rate`.
3727 :returns:
3728 Seismic moment rate for each source patch and time; corner times,
3729 for which patch moment rate is computed based on slip rate. The
3730 order of the moment rate array is:
3732 .. math::
3734 &[\\\\
3735 &[(\\Delta M / \\Delta t)_{patch1, t1},
3736 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\
3737 &[(\\Delta M / \\Delta t)_{patch2, t1},
3738 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\
3739 &[...]]\\\\
3741 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``,
3742 :py:class:`~numpy.ndarray`: ``(n_times, )``
3743 '''
3744 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs)
3746 shear_mod = self.get_patch_attribute('shearmod')
3747 p_length = self.get_patch_attribute('length')
3748 p_width = self.get_patch_attribute('width')
3750 dA = p_length * p_width
3752 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis]
3754 return mom_rate, calc_times
3756 def get_moment_rate(self, store, target=None, deltat=None):
3757 '''
3758 Get seismic source moment rate for the total source (STF).
3760 :param store:
3761 Green's function database (needs to cover whole region of of the
3762 source). Its ``deltat`` [s] is used as time increment for slip
3763 difference calculation. Either ``deltat`` or ``store`` should be
3764 given.
3765 :type store:
3766 :py:class:`~pyrocko.gf.store.Store`
3768 :param target:
3769 Target information, needed for interpolation method.
3770 :type target:
3771 optional, :py:class:`~pyrocko.gf.targets.Target`
3773 :param deltat:
3774 Time increment for slip difference calculation [s]. If not given
3775 ``store.deltat`` is used.
3776 :type deltat:
3777 optional, float
3779 :return:
3780 Seismic moment rate [Nm/s] for each time; corner times, for which
3781 moment rate is computed. The order of the moment rate array is:
3783 .. math::
3785 &[\\\\
3786 &(\\Delta M / \\Delta t)_{t1},\\\\
3787 &(\\Delta M / \\Delta t)_{t2},\\\\
3788 &...]\\\\
3790 :rtype:
3791 :py:class:`~numpy.ndarray`: ``(n_times, )``,
3792 :py:class:`~numpy.ndarray`: ``(n_times, )``
3793 '''
3794 if not deltat:
3795 deltat = store.config.deltat
3796 return self.discretize_basesource(
3797 store, target=target).get_moment_rate(deltat)
3799 def get_moment(self, *args, **kwargs):
3800 '''
3801 Get seismic cumulative moment.
3803 Additional ``*args`` and ``**kwargs`` are passed to
3804 :py:meth:`get_magnitude`.
3806 :returns:
3807 Cumulative seismic moment in [Nm].
3808 :rtype:
3809 float
3810 '''
3811 return float(pmt.magnitude_to_moment(self.get_magnitude(
3812 *args, **kwargs)))
3814 def rescale_slip(self, magnitude=None, moment=None, **kwargs):
3815 '''
3816 Rescale source slip based on given target magnitude or seismic moment.
3818 Rescale the maximum source slip to fit the source moment magnitude or
3819 seismic moment to the given target values. Either ``magnitude`` or
3820 ``moment`` need to be given. Additional ``**kwargs`` are passed to
3821 :py:meth:`get_moment`.
3823 :param magnitude:
3824 Target moment magnitude :math:`M_\\mathrm{w}` as in
3825 [Hanks and Kanamori, 1979]
3826 :type magnitude:
3827 optional, float
3829 :param moment:
3830 Target seismic moment :math:`M_0` [Nm].
3831 :type moment:
3832 optional, float
3833 '''
3834 if self.slip is None:
3835 self.slip = 1.
3836 logger.warning('No slip found for rescaling. '
3837 'An initial slip of 1 m is assumed.')
3839 if magnitude is None and moment is None:
3840 raise ValueError(
3841 'Either target magnitude or moment need to be given.')
3843 moment_init = self.get_moment(**kwargs)
3845 if magnitude is not None:
3846 moment = pmt.magnitude_to_moment(magnitude)
3848 self.slip *= moment / moment_init
3850 def get_centroid(self, store, *args, **kwargs):
3851 '''
3852 Centroid of the pseudo dynamic rupture model.
3854 The centroid location and time are derived from the locations and times
3855 of the individual patches weighted with their moment contribution.
3856 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`.
3858 :param store:
3859 Green's function database (needs to cover whole region of of the
3860 source). Its ``deltat`` [s] is used as time increment for slip
3861 difference calculation. Either ``deltat`` or ``store`` should be
3862 given.
3863 :type store:
3864 :py:class:`~pyrocko.gf.store.Store`
3866 :returns:
3867 The centroid location and associated moment tensor.
3868 :rtype:
3869 :py:class:`pyrocko.model.Event`
3870 '''
3871 _, _, _, _, time, _ = self.get_vr_time_interpolators(store)
3872 t_max = time.values.max()
3874 moment_rate, times = self.get_moment_rate_patches(deltat=t_max)
3876 moment = num.sum(moment_rate * times, axis=1)
3877 weights = moment / moment.sum()
3879 norths = self.get_patch_attribute('north_shift')
3880 easts = self.get_patch_attribute('east_shift')
3881 depths = self.get_patch_attribute('depth')
3883 centroid_n = num.sum(weights * norths)
3884 centroid_e = num.sum(weights * easts)
3885 centroid_d = num.sum(weights * depths)
3887 centroid_lat, centroid_lon = ne_to_latlon(
3888 self.lat, self.lon, centroid_n, centroid_e)
3890 moment_rate_, times = self.get_moment_rate(store)
3891 delta_times = num.concatenate((
3892 [times[1] - times[0]],
3893 num.diff(times)))
3894 moment_src = delta_times * moment_rate
3896 centroid_t = num.sum(
3897 moment_src / num.sum(moment_src) * times) + self.time
3899 mt = self.pyrocko_moment_tensor(store, *args, **kwargs)
3901 return model.Event(
3902 lat=centroid_lat,
3903 lon=centroid_lon,
3904 depth=centroid_d,
3905 time=centroid_t,
3906 moment_tensor=mt,
3907 magnitude=mt.magnitude,
3908 duration=t_max)
3911class DoubleDCSource(SourceWithMagnitude):
3912 '''
3913 Two double-couple point sources separated in space and time.
3914 Moment share between the sub-sources is controlled by the
3915 parameter mix.
3916 The position of the subsources is dependent on the moment
3917 distribution between the two sources. Depth, east and north
3918 shift are given for the centroid between the two double-couples.
3919 The subsources will positioned according to their moment shares
3920 around this centroid position.
3921 This is done according to their delta parameters, which are
3922 therefore in relation to that centroid.
3923 Note that depth of the subsources therefore can be
3924 depth+/-delta_depth. For shallow earthquakes therefore
3925 the depth has to be chosen deeper to avoid sampling
3926 above surface.
3927 '''
3929 strike1 = Float.T(
3930 default=0.0,
3931 help='strike direction in [deg], measured clockwise from north')
3933 dip1 = Float.T(
3934 default=90.0,
3935 help='dip angle in [deg], measured downward from horizontal')
3937 azimuth = Float.T(
3938 default=0.0,
3939 help='azimuth to second double-couple [deg], '
3940 'measured at first, clockwise from north')
3942 rake1 = Float.T(
3943 default=0.0,
3944 help='rake angle in [deg], '
3945 'measured counter-clockwise from right-horizontal '
3946 'in on-plane view')
3948 strike2 = Float.T(
3949 default=0.0,
3950 help='strike direction in [deg], measured clockwise from north')
3952 dip2 = Float.T(
3953 default=90.0,
3954 help='dip angle in [deg], measured downward from horizontal')
3956 rake2 = Float.T(
3957 default=0.0,
3958 help='rake angle in [deg], '
3959 'measured counter-clockwise from right-horizontal '
3960 'in on-plane view')
3962 delta_time = Float.T(
3963 default=0.0,
3964 help='separation of double-couples in time (t2-t1) [s]')
3966 delta_depth = Float.T(
3967 default=0.0,
3968 help='difference in depth (z2-z1) [m]')
3970 distance = Float.T(
3971 default=0.0,
3972 help='distance between the two double-couples [m]')
3974 mix = Float.T(
3975 default=0.5,
3976 help='how to distribute the moment to the two doublecouples '
3977 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
3979 stf1 = STF.T(
3980 optional=True,
3981 help='Source time function of subsource 1 '
3982 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
3984 stf2 = STF.T(
3985 optional=True,
3986 help='Source time function of subsource 2 '
3987 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
3989 discretized_source_class = meta.DiscretizedMTSource
3991 def base_key(self):
3992 return (
3993 self.time, self.depth, self.lat, self.north_shift,
3994 self.lon, self.east_shift, type(self).__name__) + \
3995 self.effective_stf1_pre().base_key() + \
3996 self.effective_stf2_pre().base_key() + (
3997 self.strike1, self.dip1, self.rake1,
3998 self.strike2, self.dip2, self.rake2,
3999 self.delta_time, self.delta_depth,
4000 self.azimuth, self.distance, self.mix)
4002 def get_factor(self):
4003 return self.moment
4005 def effective_stf1_pre(self):
4006 return self.stf1 or self.stf or g_unit_pulse
4008 def effective_stf2_pre(self):
4009 return self.stf2 or self.stf or g_unit_pulse
4011 def effective_stf_post(self):
4012 return g_unit_pulse
4014 def split(self):
4015 a1 = 1.0 - self.mix
4016 a2 = self.mix
4017 delta_north = math.cos(self.azimuth * d2r) * self.distance
4018 delta_east = math.sin(self.azimuth * d2r) * self.distance
4020 dc1 = DCSource(
4021 lat=self.lat,
4022 lon=self.lon,
4023 time=self.time - self.delta_time * a2,
4024 north_shift=self.north_shift - delta_north * a2,
4025 east_shift=self.east_shift - delta_east * a2,
4026 depth=self.depth - self.delta_depth * a2,
4027 moment=self.moment * a1,
4028 strike=self.strike1,
4029 dip=self.dip1,
4030 rake=self.rake1,
4031 stf=self.stf1 or self.stf)
4033 dc2 = DCSource(
4034 lat=self.lat,
4035 lon=self.lon,
4036 time=self.time + self.delta_time * a1,
4037 north_shift=self.north_shift + delta_north * a1,
4038 east_shift=self.east_shift + delta_east * a1,
4039 depth=self.depth + self.delta_depth * a1,
4040 moment=self.moment * a2,
4041 strike=self.strike2,
4042 dip=self.dip2,
4043 rake=self.rake2,
4044 stf=self.stf2 or self.stf)
4046 return [dc1, dc2]
4048 def discretize_basesource(self, store, target=None):
4049 a1 = 1.0 - self.mix
4050 a2 = self.mix
4051 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4052 rake=self.rake1, scalar_moment=a1)
4053 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4054 rake=self.rake2, scalar_moment=a2)
4056 delta_north = math.cos(self.azimuth * d2r) * self.distance
4057 delta_east = math.sin(self.azimuth * d2r) * self.distance
4059 times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
4060 store.config.deltat, self.time - self.delta_time * a2)
4062 times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
4063 store.config.deltat, self.time + self.delta_time * a1)
4065 nt1 = times1.size
4066 nt2 = times2.size
4068 ds = meta.DiscretizedMTSource(
4069 lat=self.lat,
4070 lon=self.lon,
4071 times=num.concatenate((times1, times2)),
4072 north_shifts=num.concatenate((
4073 num.repeat(self.north_shift - delta_north * a2, nt1),
4074 num.repeat(self.north_shift + delta_north * a1, nt2))),
4075 east_shifts=num.concatenate((
4076 num.repeat(self.east_shift - delta_east * a2, nt1),
4077 num.repeat(self.east_shift + delta_east * a1, nt2))),
4078 depths=num.concatenate((
4079 num.repeat(self.depth - self.delta_depth * a2, nt1),
4080 num.repeat(self.depth + self.delta_depth * a1, nt2))),
4081 m6s=num.vstack((
4082 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
4083 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
4085 return ds
4087 def pyrocko_moment_tensor(self, store=None, target=None):
4088 a1 = 1.0 - self.mix
4089 a2 = self.mix
4090 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4091 rake=self.rake1,
4092 scalar_moment=a1 * self.moment)
4093 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4094 rake=self.rake2,
4095 scalar_moment=a2 * self.moment)
4096 return pmt.MomentTensor(m=mot1.m() + mot2.m())
4098 def pyrocko_event(self, store=None, target=None, **kwargs):
4099 return SourceWithMagnitude.pyrocko_event(
4100 self, store, target,
4101 moment_tensor=self.pyrocko_moment_tensor(store, target),
4102 **kwargs)
4104 @classmethod
4105 def from_pyrocko_event(cls, ev, **kwargs):
4106 d = {}
4107 mt = ev.moment_tensor
4108 if mt:
4109 (strike, dip, rake), _ = mt.both_strike_dip_rake()
4110 d.update(
4111 strike1=float(strike),
4112 dip1=float(dip),
4113 rake1=float(rake),
4114 strike2=float(strike),
4115 dip2=float(dip),
4116 rake2=float(rake),
4117 mix=0.0,
4118 magnitude=float(mt.moment_magnitude()))
4120 d.update(kwargs)
4121 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
4122 source.stf1 = source.stf
4123 source.stf2 = HalfSinusoidSTF(effective_duration=0.)
4124 source.stf = None
4125 return source
4128class RingfaultSource(SourceWithMagnitude):
4129 '''
4130 A ring fault with vertical doublecouples.
4131 '''
4133 diameter = Float.T(
4134 default=1.0,
4135 help='diameter of the ring in [m]')
4137 sign = Float.T(
4138 default=1.0,
4139 help='inside of the ring moves up (+1) or down (-1)')
4141 strike = Float.T(
4142 default=0.0,
4143 help='strike direction of the ring plane, clockwise from north,'
4144 ' in [deg]')
4146 dip = Float.T(
4147 default=0.0,
4148 help='dip angle of the ring plane from horizontal in [deg]')
4150 npointsources = Int.T(
4151 default=360,
4152 help='number of point sources to use')
4154 discretized_source_class = meta.DiscretizedMTSource
4156 def base_key(self):
4157 return Source.base_key(self) + (
4158 self.strike, self.dip, self.diameter, self.npointsources)
4160 def get_factor(self):
4161 return self.sign * self.moment
4163 def discretize_basesource(self, store=None, target=None):
4164 n = self.npointsources
4165 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
4167 points = num.zeros((n, 3))
4168 points[:, 0] = num.cos(phi) * 0.5 * self.diameter
4169 points[:, 1] = num.sin(phi) * 0.5 * self.diameter
4171 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
4172 points = num.dot(rotmat.T, points.T).T # !!! ?
4174 points[:, 0] += self.north_shift
4175 points[:, 1] += self.east_shift
4176 points[:, 2] += self.depth
4178 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
4179 scalar_moment=1.0 / n).m())
4181 rotmats = num.transpose(
4182 [[num.cos(phi), num.sin(phi), num.zeros(n)],
4183 [-num.sin(phi), num.cos(phi), num.zeros(n)],
4184 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
4186 ms = num.zeros((n, 3, 3))
4187 for i in range(n):
4188 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
4189 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
4191 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
4192 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
4194 times, amplitudes = self.effective_stf_pre().discretize_t(
4195 store.config.deltat, self.time)
4197 nt = times.size
4199 return meta.DiscretizedMTSource(
4200 times=num.tile(times, n),
4201 lat=self.lat,
4202 lon=self.lon,
4203 north_shifts=num.repeat(points[:, 0], nt),
4204 east_shifts=num.repeat(points[:, 1], nt),
4205 depths=num.repeat(points[:, 2], nt),
4206 m6s=num.repeat(m6s, nt, axis=0) * num.tile(
4207 amplitudes, n)[:, num.newaxis])
4210class CombiSource(Source):
4211 '''
4212 Composite source model.
4213 '''
4215 discretized_source_class = meta.DiscretizedMTSource
4217 subsources = List.T(Source.T())
4219 def __init__(self, subsources=[], **kwargs):
4220 if not subsources:
4221 raise BadRequest(
4222 'Need at least one sub-source to create a CombiSource object.')
4224 lats = num.array(
4225 [subsource.lat for subsource in subsources], dtype=float)
4226 lons = num.array(
4227 [subsource.lon for subsource in subsources], dtype=float)
4229 lat, lon = lats[0], lons[0]
4230 if not num.all(lats == lat) and num.all(lons == lon):
4231 subsources = [s.clone() for s in subsources]
4232 for subsource in subsources[1:]:
4233 subsource.set_origin(lat, lon)
4235 depth = float(num.mean([p.depth for p in subsources]))
4236 time = float(num.mean([p.time for p in subsources]))
4237 north_shift = float(num.mean([p.north_shift for p in subsources]))
4238 east_shift = float(num.mean([p.east_shift for p in subsources]))
4239 kwargs.update(
4240 time=time,
4241 lat=float(lat),
4242 lon=float(lon),
4243 north_shift=north_shift,
4244 east_shift=east_shift,
4245 depth=depth)
4247 Source.__init__(self, subsources=subsources, **kwargs)
4249 def get_factor(self):
4250 return 1.0
4252 def discretize_basesource(self, store, target=None):
4253 dsources = []
4254 for sf in self.subsources:
4255 ds = sf.discretize_basesource(store, target)
4256 ds.m6s *= sf.get_factor()
4257 dsources.append(ds)
4259 return meta.DiscretizedMTSource.combine(dsources)
4262class SFSource(Source):
4263 '''
4264 A single force point source.
4266 Supported GF schemes: `'elastic5'`.
4267 '''
4269 discretized_source_class = meta.DiscretizedSFSource
4271 fn = Float.T(
4272 default=0.,
4273 help='northward component of single force [N]')
4275 fe = Float.T(
4276 default=0.,
4277 help='eastward component of single force [N]')
4279 fd = Float.T(
4280 default=0.,
4281 help='downward component of single force [N]')
4283 def __init__(self, **kwargs):
4284 Source.__init__(self, **kwargs)
4286 def base_key(self):
4287 return Source.base_key(self) + (self.fn, self.fe, self.fd)
4289 def get_factor(self):
4290 return 1.0
4292 def discretize_basesource(self, store, target=None):
4293 times, amplitudes = self.effective_stf_pre().discretize_t(
4294 store.config.deltat, self.time)
4295 forces = amplitudes[:, num.newaxis] * num.array(
4296 [[self.fn, self.fe, self.fd]], dtype=float)
4298 return meta.DiscretizedSFSource(forces=forces,
4299 **self._dparams_base_repeated(times))
4301 def pyrocko_event(self, store=None, target=None, **kwargs):
4302 return Source.pyrocko_event(
4303 self, store, target,
4304 **kwargs)
4306 @classmethod
4307 def from_pyrocko_event(cls, ev, **kwargs):
4308 d = {}
4309 d.update(kwargs)
4310 return super(SFSource, cls).from_pyrocko_event(ev, **d)
4313class PorePressurePointSource(Source):
4314 '''
4315 Excess pore pressure point source.
4317 For poro-elastic initial value problem where an excess pore pressure is
4318 brought into a small source volume.
4319 '''
4321 discretized_source_class = meta.DiscretizedPorePressureSource
4323 pp = Float.T(
4324 default=1.0,
4325 help='initial excess pore pressure in [Pa]')
4327 def base_key(self):
4328 return Source.base_key(self)
4330 def get_factor(self):
4331 return self.pp
4333 def discretize_basesource(self, store, target=None):
4334 return meta.DiscretizedPorePressureSource(pp=arr(1.0),
4335 **self._dparams_base())
4338class PorePressureLineSource(Source):
4339 '''
4340 Excess pore pressure line source.
4342 The line source is centered at (north_shift, east_shift, depth).
4343 '''
4345 discretized_source_class = meta.DiscretizedPorePressureSource
4347 pp = Float.T(
4348 default=1.0,
4349 help='initial excess pore pressure in [Pa]')
4351 length = Float.T(
4352 default=0.0,
4353 help='length of the line source [m]')
4355 azimuth = Float.T(
4356 default=0.0,
4357 help='azimuth direction, clockwise from north [deg]')
4359 dip = Float.T(
4360 default=90.,
4361 help='dip direction, downward from horizontal [deg]')
4363 def base_key(self):
4364 return Source.base_key(self) + (self.azimuth, self.dip, self.length)
4366 def get_factor(self):
4367 return self.pp
4369 def discretize_basesource(self, store, target=None):
4371 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
4373 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
4375 sa = math.sin(self.azimuth * d2r)
4376 ca = math.cos(self.azimuth * d2r)
4377 sd = math.sin(self.dip * d2r)
4378 cd = math.cos(self.dip * d2r)
4380 points = num.zeros((n, 3))
4381 points[:, 0] = self.north_shift + a * ca * cd
4382 points[:, 1] = self.east_shift + a * sa * cd
4383 points[:, 2] = self.depth + a * sd
4385 return meta.DiscretizedPorePressureSource(
4386 times=util.num_full(n, self.time),
4387 lat=self.lat,
4388 lon=self.lon,
4389 north_shifts=points[:, 0],
4390 east_shifts=points[:, 1],
4391 depths=points[:, 2],
4392 pp=num.ones(n) / n)
4395class Request(Object):
4396 '''
4397 Synthetic seismogram computation request.
4399 ::
4401 Request(**kwargs)
4402 Request(sources, targets, **kwargs)
4403 '''
4405 sources = List.T(
4406 Source.T(),
4407 help='list of sources for which to produce synthetics.')
4409 targets = List.T(
4410 Target.T(),
4411 help='list of targets for which to produce synthetics.')
4413 @classmethod
4414 def args2kwargs(cls, args):
4415 if len(args) not in (0, 2, 3):
4416 raise BadRequest('Invalid arguments.')
4418 if len(args) == 2:
4419 return dict(sources=args[0], targets=args[1])
4420 else:
4421 return {}
4423 def __init__(self, *args, **kwargs):
4424 kwargs.update(self.args2kwargs(args))
4425 sources = kwargs.pop('sources', [])
4426 targets = kwargs.pop('targets', [])
4428 if isinstance(sources, Source):
4429 sources = [sources]
4431 if isinstance(targets, Target) or isinstance(targets, StaticTarget):
4432 targets = [targets]
4434 Object.__init__(self, sources=sources, targets=targets, **kwargs)
4436 @property
4437 def targets_dynamic(self):
4438 return [t for t in self.targets if isinstance(t, Target)]
4440 @property
4441 def targets_static(self):
4442 return [t for t in self.targets if isinstance(t, StaticTarget)]
4444 @property
4445 def has_dynamic(self):
4446 return True if len(self.targets_dynamic) > 0 else False
4448 @property
4449 def has_statics(self):
4450 return True if len(self.targets_static) > 0 else False
4452 def subsources_map(self):
4453 m = defaultdict(list)
4454 for source in self.sources:
4455 m[source.base_key()].append(source)
4457 return m
4459 def subtargets_map(self):
4460 m = defaultdict(list)
4461 for target in self.targets:
4462 m[target.base_key()].append(target)
4464 return m
4466 def subrequest_map(self):
4467 ms = self.subsources_map()
4468 mt = self.subtargets_map()
4469 m = {}
4470 for (ks, ls) in ms.items():
4471 for (kt, lt) in mt.items():
4472 m[ks, kt] = (ls, lt)
4474 return m
4477class ProcessingStats(Object):
4478 t_perc_get_store_and_receiver = Float.T(default=0.)
4479 t_perc_discretize_source = Float.T(default=0.)
4480 t_perc_make_base_seismogram = Float.T(default=0.)
4481 t_perc_make_same_span = Float.T(default=0.)
4482 t_perc_post_process = Float.T(default=0.)
4483 t_perc_optimize = Float.T(default=0.)
4484 t_perc_stack = Float.T(default=0.)
4485 t_perc_static_get_store = Float.T(default=0.)
4486 t_perc_static_discretize_basesource = Float.T(default=0.)
4487 t_perc_static_sum_statics = Float.T(default=0.)
4488 t_perc_static_post_process = Float.T(default=0.)
4489 t_wallclock = Float.T(default=0.)
4490 t_cpu = Float.T(default=0.)
4491 n_read_blocks = Int.T(default=0)
4492 n_results = Int.T(default=0)
4493 n_subrequests = Int.T(default=0)
4494 n_stores = Int.T(default=0)
4495 n_records_stacked = Int.T(default=0)
4498class Response(Object):
4499 '''
4500 Resonse object to a synthetic seismogram computation request.
4501 '''
4503 request = Request.T()
4504 results_list = List.T(List.T(meta.SeismosizerResult.T()))
4505 stats = ProcessingStats.T()
4507 def pyrocko_traces(self):
4508 '''
4509 Return a list of requested
4510 :class:`~pyrocko.trace.Trace` instances.
4511 '''
4513 traces = []
4514 for results in self.results_list:
4515 for result in results:
4516 if not isinstance(result, meta.Result):
4517 continue
4518 traces.append(result.trace.pyrocko_trace())
4520 return traces
4522 def kite_scenes(self):
4523 '''
4524 Return a list of requested
4525 :class:`~kite.scenes` instances.
4526 '''
4527 kite_scenes = []
4528 for results in self.results_list:
4529 for result in results:
4530 if isinstance(result, meta.KiteSceneResult):
4531 sc = result.get_scene()
4532 kite_scenes.append(sc)
4534 return kite_scenes
4536 def static_results(self):
4537 '''
4538 Return a list of requested
4539 :class:`~pyrocko.gf.meta.StaticResult` instances.
4540 '''
4541 statics = []
4542 for results in self.results_list:
4543 for result in results:
4544 if not isinstance(result, meta.StaticResult):
4545 continue
4546 statics.append(result)
4548 return statics
4550 def iter_results(self, get='pyrocko_traces'):
4551 '''
4552 Generator function to iterate over results of request.
4554 Yields associated :py:class:`Source`,
4555 :class:`~pyrocko.gf.targets.Target`,
4556 :class:`~pyrocko.trace.Trace` instances in each iteration.
4557 '''
4559 for isource, source in enumerate(self.request.sources):
4560 for itarget, target in enumerate(self.request.targets):
4561 result = self.results_list[isource][itarget]
4562 if get == 'pyrocko_traces':
4563 yield source, target, result.trace.pyrocko_trace()
4564 elif get == 'results':
4565 yield source, target, result
4567 def snuffle(self, **kwargs):
4568 '''
4569 Open *snuffler* with requested traces.
4570 '''
4572 trace.snuffle(self.pyrocko_traces(), **kwargs)
4575class Engine(Object):
4576 '''
4577 Base class for synthetic seismogram calculators.
4578 '''
4580 def get_store_ids(self):
4581 '''
4582 Get list of available GF store IDs
4583 '''
4585 return []
4588class Rule(object):
4589 pass
4592class VectorRule(Rule):
4594 def __init__(self, quantity, differentiate=0, integrate=0):
4595 self.components = [quantity + '.' + c for c in 'ned']
4596 self.differentiate = differentiate
4597 self.integrate = integrate
4599 def required_components(self, target):
4600 n, e, d = self.components
4601 sa, ca, sd, cd = target.get_sin_cos_factors()
4603 comps = []
4604 if nonzero(ca * cd):
4605 comps.append(n)
4607 if nonzero(sa * cd):
4608 comps.append(e)
4610 if nonzero(sd):
4611 comps.append(d)
4613 return tuple(comps)
4615 def apply_(self, target, base_seismogram):
4616 n, e, d = self.components
4617 sa, ca, sd, cd = target.get_sin_cos_factors()
4619 if nonzero(ca * cd):
4620 data = base_seismogram[n].data * (ca * cd)
4621 deltat = base_seismogram[n].deltat
4622 else:
4623 data = 0.0
4625 if nonzero(sa * cd):
4626 data = data + base_seismogram[e].data * (sa * cd)
4627 deltat = base_seismogram[e].deltat
4629 if nonzero(sd):
4630 data = data + base_seismogram[d].data * sd
4631 deltat = base_seismogram[d].deltat
4633 if self.differentiate:
4634 data = util.diff_fd(self.differentiate, 4, deltat, data)
4636 if self.integrate:
4637 raise NotImplementedError('Integration is not implemented yet.')
4639 return data
4642class HorizontalVectorRule(Rule):
4644 def __init__(self, quantity, differentiate=0, integrate=0):
4645 self.components = [quantity + '.' + c for c in 'ne']
4646 self.differentiate = differentiate
4647 self.integrate = integrate
4649 def required_components(self, target):
4650 n, e = self.components
4651 sa, ca, _, _ = target.get_sin_cos_factors()
4653 comps = []
4654 if nonzero(ca):
4655 comps.append(n)
4657 if nonzero(sa):
4658 comps.append(e)
4660 return tuple(comps)
4662 def apply_(self, target, base_seismogram):
4663 n, e = self.components
4664 sa, ca, _, _ = target.get_sin_cos_factors()
4666 if nonzero(ca):
4667 data = base_seismogram[n].data * ca
4668 else:
4669 data = 0.0
4671 if nonzero(sa):
4672 data = data + base_seismogram[e].data * sa
4674 if self.differentiate:
4675 deltat = base_seismogram[e].deltat
4676 data = util.diff_fd(self.differentiate, 4, deltat, data)
4678 if self.integrate:
4679 raise NotImplementedError('Integration is not implemented yet.')
4681 return data
4684class ScalarRule(Rule):
4686 def __init__(self, quantity, differentiate=0):
4687 self.c = '%s.scalar' % quantity
4688 self.differentiate = differentiate
4690 def required_components(self, target):
4691 return (self.c, )
4693 def apply_(self, target, base_seismogram):
4694 data = base_seismogram[self.c].data.copy()
4695 deltat = base_seismogram[self.c].deltat
4696 if self.differentiate:
4697 data = util.diff_fd(self.differentiate, 4, deltat, data)
4699 return data
4702class StaticDisplacement(Rule):
4704 def required_components(self, target):
4705 return tuple(['displacement.%s' % c for c in list('ned')])
4707 def apply_(self, target, base_statics):
4708 if isinstance(target, SatelliteTarget):
4709 los_fac = target.get_los_factors()
4710 base_statics['displacement.los'] =\
4711 (los_fac[:, 0] * -base_statics['displacement.d'] +
4712 los_fac[:, 1] * base_statics['displacement.e'] +
4713 los_fac[:, 2] * base_statics['displacement.n'])
4714 return base_statics
4717channel_rules = {
4718 'displacement': [VectorRule('displacement')],
4719 'rotation': [VectorRule('rotation')],
4720 'velocity': [
4721 VectorRule('velocity'),
4722 VectorRule('displacement', differentiate=1)],
4723 'acceleration': [
4724 VectorRule('acceleration'),
4725 VectorRule('velocity', differentiate=1),
4726 VectorRule('displacement', differentiate=2)],
4727 'pore_pressure': [ScalarRule('pore_pressure')],
4728 'pressure': [ScalarRule('pressure')],
4729 'volume_change': [ScalarRule('volume_change')],
4730 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
4731 'darcy_velocity': [VectorRule('darcy_velocity')],
4732}
4734static_rules = {
4735 'displacement': [StaticDisplacement()]
4736}
4739class OutOfBoundsContext(Object):
4740 source = Source.T()
4741 target = Target.T()
4742 distance = Float.T()
4743 components = List.T(String.T())
4746def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
4747 dsource_cache = {}
4748 tcounters = list(range(6))
4750 store_ids = set()
4751 sources = set()
4752 targets = set()
4754 for itarget, target in enumerate(ptargets):
4755 target._id = itarget
4757 for w in work:
4758 _, _, isources, itargets = w
4760 sources.update([psources[isource] for isource in isources])
4761 targets.update([ptargets[itarget] for itarget in itargets])
4763 store_ids = set([t.store_id for t in targets])
4765 for isource, source in enumerate(psources):
4767 components = set()
4768 for itarget, target in enumerate(targets):
4769 rule = engine.get_rule(source, target)
4770 components.update(rule.required_components(target))
4772 for store_id in store_ids:
4773 store_targets = [t for t in targets if t.store_id == store_id]
4775 sample_rates = set([t.sample_rate for t in store_targets])
4776 interpolations = set([t.interpolation for t in store_targets])
4778 base_seismograms = []
4779 store_targets_out = []
4781 for samp_rate in sample_rates:
4782 for interp in interpolations:
4783 engine_targets = [
4784 t for t in store_targets if t.sample_rate == samp_rate
4785 and t.interpolation == interp]
4787 if not engine_targets:
4788 continue
4790 store_targets_out += engine_targets
4792 base_seismograms += engine.base_seismograms(
4793 source,
4794 engine_targets,
4795 components,
4796 dsource_cache,
4797 nthreads)
4799 for iseis, seismogram in enumerate(base_seismograms):
4800 for tr in seismogram.values():
4801 if tr.err != store.SeismosizerErrorEnum.SUCCESS:
4802 e = SeismosizerError(
4803 'Seismosizer failed with return code %i\n%s' % (
4804 tr.err, str(
4805 OutOfBoundsContext(
4806 source=source,
4807 target=store_targets[iseis],
4808 distance=source.distance_to(
4809 store_targets[iseis]),
4810 components=components))))
4811 raise e
4813 for seismogram, target in zip(base_seismograms, store_targets_out):
4815 try:
4816 result = engine._post_process_dynamic(
4817 seismogram, source, target)
4818 except SeismosizerError as e:
4819 result = e
4821 yield (isource, target._id, result), tcounters
4824def process_dynamic(work, psources, ptargets, engine, nthreads=0):
4825 dsource_cache = {}
4827 for w in work:
4828 _, _, isources, itargets = w
4830 sources = [psources[isource] for isource in isources]
4831 targets = [ptargets[itarget] for itarget in itargets]
4833 components = set()
4834 for target in targets:
4835 rule = engine.get_rule(sources[0], target)
4836 components.update(rule.required_components(target))
4838 for isource, source in zip(isources, sources):
4839 for itarget, target in zip(itargets, targets):
4841 try:
4842 base_seismogram, tcounters = engine.base_seismogram(
4843 source, target, components, dsource_cache, nthreads)
4844 except meta.OutOfBounds as e:
4845 e.context = OutOfBoundsContext(
4846 source=sources[0],
4847 target=targets[0],
4848 distance=sources[0].distance_to(targets[0]),
4849 components=components)
4850 raise
4852 n_records_stacked = 0
4853 t_optimize = 0.0
4854 t_stack = 0.0
4856 for _, tr in base_seismogram.items():
4857 n_records_stacked += tr.n_records_stacked
4858 t_optimize += tr.t_optimize
4859 t_stack += tr.t_stack
4861 try:
4862 result = engine._post_process_dynamic(
4863 base_seismogram, source, target)
4864 result.n_records_stacked = n_records_stacked
4865 result.n_shared_stacking = len(sources) *\
4866 len(targets)
4867 result.t_optimize = t_optimize
4868 result.t_stack = t_stack
4869 except SeismosizerError as e:
4870 result = e
4872 tcounters.append(xtime())
4873 yield (isource, itarget, result), tcounters
4876def process_static(work, psources, ptargets, engine, nthreads=0):
4877 for w in work:
4878 _, _, isources, itargets = w
4880 sources = [psources[isource] for isource in isources]
4881 targets = [ptargets[itarget] for itarget in itargets]
4883 for isource, source in zip(isources, sources):
4884 for itarget, target in zip(itargets, targets):
4885 components = engine.get_rule(source, target)\
4886 .required_components(target)
4888 try:
4889 base_statics, tcounters = engine.base_statics(
4890 source, target, components, nthreads)
4891 except meta.OutOfBounds as e:
4892 e.context = OutOfBoundsContext(
4893 source=sources[0],
4894 target=targets[0],
4895 distance=float('nan'),
4896 components=components)
4897 raise
4898 result = engine._post_process_statics(
4899 base_statics, source, target)
4900 tcounters.append(xtime())
4902 yield (isource, itarget, result), tcounters
4905class LocalEngine(Engine):
4906 '''
4907 Offline synthetic seismogram calculator.
4909 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and
4910 :py:attr:`store_dirs` with paths set in environment variables
4911 GF_STORE_SUPERDIRS and GF_STORE_DIRS.
4912 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and
4913 :py:attr:`store_dirs` with paths set in the user's config file.
4915 The config file can be found at :file:`~/.pyrocko/config.pf`
4917 .. code-block :: python
4919 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
4920 gf_store_superdirs: ['/home/pyrocko/gf_stores/']
4921 '''
4923 store_superdirs = List.T(
4924 String.T(),
4925 help='directories which are searched for Green\'s function stores')
4927 store_dirs = List.T(
4928 String.T(),
4929 help='additional individual Green\'s function store directories')
4931 default_store_id = String.T(
4932 optional=True,
4933 help='default store ID to be used when a request does not provide '
4934 'one')
4936 def __init__(self, **kwargs):
4937 use_env = kwargs.pop('use_env', False)
4938 use_config = kwargs.pop('use_config', False)
4939 Engine.__init__(self, **kwargs)
4940 if use_env:
4941 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
4942 env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
4943 if env_store_superdirs:
4944 self.store_superdirs.extend(env_store_superdirs.split(':'))
4946 if env_store_dirs:
4947 self.store_dirs.extend(env_store_dirs.split(':'))
4949 if use_config:
4950 c = config.config()
4951 self.store_superdirs.extend(c.gf_store_superdirs)
4952 self.store_dirs.extend(c.gf_store_dirs)
4954 self._check_store_dirs_type()
4955 self._id_to_store_dir = {}
4956 self._open_stores = {}
4957 self._effective_default_store_id = None
4959 def _check_store_dirs_type(self):
4960 for sdir in ['store_dirs', 'store_superdirs']:
4961 if not isinstance(self.__getattribute__(sdir), list):
4962 raise TypeError("{} of {} is not of type list".format(
4963 sdir, self.__class__.__name__))
4965 def _get_store_id(self, store_dir):
4966 store_ = store.Store(store_dir)
4967 store_id = store_.config.id
4968 store_.close()
4969 return store_id
4971 def _looks_like_store_dir(self, store_dir):
4972 return os.path.isdir(store_dir) and \
4973 all(os.path.isfile(pjoin(store_dir, x)) for x in
4974 ('index', 'traces', 'config'))
4976 def iter_store_dirs(self):
4977 store_dirs = set()
4978 for d in self.store_superdirs:
4979 if not os.path.exists(d):
4980 logger.warning('store_superdir not available: %s' % d)
4981 continue
4983 for entry in os.listdir(d):
4984 store_dir = os.path.realpath(pjoin(d, entry))
4985 if self._looks_like_store_dir(store_dir):
4986 store_dirs.add(store_dir)
4988 for store_dir in self.store_dirs:
4989 store_dirs.add(os.path.realpath(store_dir))
4991 return store_dirs
4993 def _scan_stores(self):
4994 for store_dir in self.iter_store_dirs():
4995 store_id = self._get_store_id(store_dir)
4996 if store_id not in self._id_to_store_dir:
4997 self._id_to_store_dir[store_id] = store_dir
4998 else:
4999 if store_dir != self._id_to_store_dir[store_id]:
5000 raise DuplicateStoreId(
5001 'GF store ID %s is used in (at least) two '
5002 'different stores. Locations are: %s and %s' %
5003 (store_id, self._id_to_store_dir[store_id], store_dir))
5005 def get_store_dir(self, store_id):
5006 '''
5007 Lookup directory given a GF store ID.
5008 '''
5010 if store_id not in self._id_to_store_dir:
5011 self._scan_stores()
5013 if store_id not in self._id_to_store_dir:
5014 raise NoSuchStore(store_id, self.iter_store_dirs())
5016 return self._id_to_store_dir[store_id]
5018 def get_store_ids(self):
5019 '''
5020 Get list of available store IDs.
5021 '''
5023 self._scan_stores()
5024 return sorted(self._id_to_store_dir.keys())
5026 def effective_default_store_id(self):
5027 if self._effective_default_store_id is None:
5028 if self.default_store_id is None:
5029 store_ids = self.get_store_ids()
5030 if len(store_ids) == 1:
5031 self._effective_default_store_id = self.get_store_ids()[0]
5032 else:
5033 raise NoDefaultStoreSet()
5034 else:
5035 self._effective_default_store_id = self.default_store_id
5037 return self._effective_default_store_id
5039 def get_store(self, store_id=None):
5040 '''
5041 Get a store from the engine.
5043 :param store_id: identifier of the store (optional)
5044 :returns: :py:class:`~pyrocko.gf.store.Store` object
5046 If no ``store_id`` is provided the store
5047 associated with the :py:gattr:`default_store_id` is returned.
5048 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
5049 undefined.
5050 '''
5052 if store_id is None:
5053 store_id = self.effective_default_store_id()
5055 if store_id not in self._open_stores:
5056 store_dir = self.get_store_dir(store_id)
5057 self._open_stores[store_id] = store.Store(store_dir)
5059 return self._open_stores[store_id]
5061 def get_store_config(self, store_id):
5062 store = self.get_store(store_id)
5063 return store.config
5065 def get_store_extra(self, store_id, key):
5066 store = self.get_store(store_id)
5067 return store.get_extra(key)
5069 def close_cashed_stores(self):
5070 '''
5071 Close and remove ids from cashed stores.
5072 '''
5073 store_ids = []
5074 for store_id, store_ in self._open_stores.items():
5075 store_.close()
5076 store_ids.append(store_id)
5078 for store_id in store_ids:
5079 self._open_stores.pop(store_id)
5081 def get_rule(self, source, target):
5082 cprovided = self.get_store(target.store_id).get_provided_components()
5084 if isinstance(target, StaticTarget):
5085 quantity = target.quantity
5086 available_rules = static_rules
5087 elif isinstance(target, Target):
5088 quantity = target.effective_quantity()
5089 available_rules = channel_rules
5091 try:
5092 for rule in available_rules[quantity]:
5093 cneeded = rule.required_components(target)
5094 if all(c in cprovided for c in cneeded):
5095 return rule
5097 except KeyError:
5098 pass
5100 raise BadRequest(
5101 'No rule to calculate "%s" with GFs from store "%s" '
5102 'for source model "%s".' % (
5103 target.effective_quantity(),
5104 target.store_id,
5105 source.__class__.__name__))
5107 def _cached_discretize_basesource(self, source, store, cache, target):
5108 if (source, store) not in cache:
5109 cache[source, store] = source.discretize_basesource(store, target)
5111 return cache[source, store]
5113 def base_seismograms(self, source, targets, components, dsource_cache,
5114 nthreads=0):
5116 target = targets[0]
5118 interp = set([t.interpolation for t in targets])
5119 if len(interp) > 1:
5120 raise BadRequest('Targets have different interpolation schemes.')
5122 rates = set([t.sample_rate for t in targets])
5123 if len(rates) > 1:
5124 raise BadRequest('Targets have different sample rates.')
5126 store_ = self.get_store(target.store_id)
5127 receivers = [t.receiver(store_) for t in targets]
5129 if target.sample_rate is not None:
5130 deltat = 1. / target.sample_rate
5131 rate = target.sample_rate
5132 else:
5133 deltat = None
5134 rate = store_.config.sample_rate
5136 tmin = num.fromiter(
5137 (t.tmin for t in targets), dtype=float, count=len(targets))
5138 tmax = num.fromiter(
5139 (t.tmax for t in targets), dtype=float, count=len(targets))
5141 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax))
5143 itmin = num.zeros_like(tmin, dtype=num.int64)
5144 itmax = num.zeros_like(tmin, dtype=num.int64)
5145 nsamples = num.full_like(tmin, -1, dtype=num.int64)
5147 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64)
5148 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64)
5149 nsamples = itmax - itmin + 1
5150 nsamples[num.logical_not(mask)] = -1
5152 base_source = self._cached_discretize_basesource(
5153 source, store_, dsource_cache, target)
5155 base_seismograms = store_.calc_seismograms(
5156 base_source, receivers, components,
5157 deltat=deltat,
5158 itmin=itmin, nsamples=nsamples,
5159 interpolation=target.interpolation,
5160 optimization=target.optimization,
5161 nthreads=nthreads)
5163 for i, base_seismogram in enumerate(base_seismograms):
5164 base_seismograms[i] = store.make_same_span(base_seismogram)
5166 return base_seismograms
5168 def base_seismogram(self, source, target, components, dsource_cache,
5169 nthreads):
5171 tcounters = [xtime()]
5173 store_ = self.get_store(target.store_id)
5174 receiver = target.receiver(store_)
5176 if target.tmin and target.tmax is not None:
5177 rate = store_.config.sample_rate
5178 itmin = int(num.floor(target.tmin * rate))
5179 itmax = int(num.ceil(target.tmax * rate))
5180 nsamples = itmax - itmin + 1
5181 else:
5182 itmin = None
5183 nsamples = None
5185 tcounters.append(xtime())
5186 base_source = self._cached_discretize_basesource(
5187 source, store_, dsource_cache, target)
5189 tcounters.append(xtime())
5191 if target.sample_rate is not None:
5192 deltat = 1. / target.sample_rate
5193 else:
5194 deltat = None
5196 base_seismogram = store_.seismogram(
5197 base_source, receiver, components,
5198 deltat=deltat,
5199 itmin=itmin, nsamples=nsamples,
5200 interpolation=target.interpolation,
5201 optimization=target.optimization,
5202 nthreads=nthreads)
5204 tcounters.append(xtime())
5206 base_seismogram = store.make_same_span(base_seismogram)
5208 tcounters.append(xtime())
5210 return base_seismogram, tcounters
5212 def base_statics(self, source, target, components, nthreads):
5213 tcounters = [xtime()]
5214 store_ = self.get_store(target.store_id)
5216 if target.tsnapshot is not None:
5217 rate = store_.config.sample_rate
5218 itsnapshot = int(num.floor(target.tsnapshot * rate))
5219 else:
5220 itsnapshot = None
5221 tcounters.append(xtime())
5223 base_source = source.discretize_basesource(store_, target=target)
5225 tcounters.append(xtime())
5227 base_statics = store_.statics(
5228 base_source,
5229 target,
5230 itsnapshot,
5231 components,
5232 target.interpolation,
5233 nthreads)
5235 tcounters.append(xtime())
5237 return base_statics, tcounters
5239 def _post_process_dynamic(self, base_seismogram, source, target):
5240 base_any = next(iter(base_seismogram.values()))
5241 deltat = base_any.deltat
5242 itmin = base_any.itmin
5244 rule = self.get_rule(source, target)
5245 data = rule.apply_(target, base_seismogram)
5247 factor = source.get_factor() * target.get_factor()
5248 if factor != 1.0:
5249 data = data * factor
5251 stf = source.effective_stf_post()
5253 times, amplitudes = stf.discretize_t(
5254 deltat, 0.0)
5256 # repeat end point to prevent boundary effects
5257 padded_data = num.empty(data.size + amplitudes.size, dtype=float)
5258 padded_data[:data.size] = data
5259 padded_data[data.size:] = data[-1]
5260 data = num.convolve(amplitudes, padded_data)
5262 tmin = itmin * deltat + times[0]
5264 tr = meta.SeismosizerTrace(
5265 codes=target.codes,
5266 data=data[:-amplitudes.size],
5267 deltat=deltat,
5268 tmin=tmin)
5270 return target.post_process(self, source, tr)
5272 def _post_process_statics(self, base_statics, source, starget):
5273 rule = self.get_rule(source, starget)
5274 data = rule.apply_(starget, base_statics)
5276 factor = source.get_factor()
5277 if factor != 1.0:
5278 for v in data.values():
5279 v *= factor
5281 return starget.post_process(self, source, base_statics)
5283 def process(self, *args, **kwargs):
5284 '''
5285 Process a request.
5287 ::
5289 process(**kwargs)
5290 process(request, **kwargs)
5291 process(sources, targets, **kwargs)
5293 The request can be given a a :py:class:`Request` object, or such an
5294 object is created using ``Request(**kwargs)`` for convenience.
5296 :returns: :py:class:`Response` object
5297 '''
5299 if len(args) not in (0, 1, 2):
5300 raise BadRequest('Invalid arguments.')
5302 if len(args) == 1:
5303 kwargs['request'] = args[0]
5305 elif len(args) == 2:
5306 kwargs.update(Request.args2kwargs(args))
5308 request = kwargs.pop('request', None)
5309 status_callback = kwargs.pop('status_callback', None)
5310 calc_timeseries = kwargs.pop('calc_timeseries', True)
5312 nprocs = kwargs.pop('nprocs', None)
5313 nthreads = kwargs.pop('nthreads', 1)
5314 if nprocs is not None:
5315 nthreads = nprocs
5317 if request is None:
5318 request = Request(**kwargs)
5320 if resource:
5321 rs0 = resource.getrusage(resource.RUSAGE_SELF)
5322 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
5323 tt0 = xtime()
5325 # make sure stores are open before fork()
5326 store_ids = set(target.store_id for target in request.targets)
5327 for store_id in store_ids:
5328 self.get_store(store_id)
5330 source_index = dict((x, i) for (i, x) in
5331 enumerate(request.sources))
5332 target_index = dict((x, i) for (i, x) in
5333 enumerate(request.targets))
5335 m = request.subrequest_map()
5337 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
5338 results_list = []
5340 for i in range(len(request.sources)):
5341 results_list.append([None] * len(request.targets))
5343 tcounters_dyn_list = []
5344 tcounters_static_list = []
5345 nsub = len(skeys)
5346 isub = 0
5348 # Processing dynamic targets through
5349 # parimap(process_subrequest_dynamic)
5351 if calc_timeseries:
5352 _process_dynamic = process_dynamic_timeseries
5353 else:
5354 _process_dynamic = process_dynamic
5356 if request.has_dynamic:
5357 work_dynamic = [
5358 (i, nsub,
5359 [source_index[source] for source in m[k][0]],
5360 [target_index[target] for target in m[k][1]
5361 if not isinstance(target, StaticTarget)])
5362 for (i, k) in enumerate(skeys)]
5364 for ii_results, tcounters_dyn in _process_dynamic(
5365 work_dynamic, request.sources, request.targets, self,
5366 nthreads):
5368 tcounters_dyn_list.append(num.diff(tcounters_dyn))
5369 isource, itarget, result = ii_results
5370 results_list[isource][itarget] = result
5372 if status_callback:
5373 status_callback(isub, nsub)
5375 isub += 1
5377 # Processing static targets through process_static
5378 if request.has_statics:
5379 work_static = [
5380 (i, nsub,
5381 [source_index[source] for source in m[k][0]],
5382 [target_index[target] for target in m[k][1]
5383 if isinstance(target, StaticTarget)])
5384 for (i, k) in enumerate(skeys)]
5386 for ii_results, tcounters_static in process_static(
5387 work_static, request.sources, request.targets, self,
5388 nthreads=nthreads):
5390 tcounters_static_list.append(num.diff(tcounters_static))
5391 isource, itarget, result = ii_results
5392 results_list[isource][itarget] = result
5394 if status_callback:
5395 status_callback(isub, nsub)
5397 isub += 1
5399 if status_callback:
5400 status_callback(nsub, nsub)
5402 tt1 = time.time()
5403 if resource:
5404 rs1 = resource.getrusage(resource.RUSAGE_SELF)
5405 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
5407 s = ProcessingStats()
5409 if request.has_dynamic:
5410 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
5411 t_dyn = float(num.sum(tcumu_dyn))
5412 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
5413 (s.t_perc_get_store_and_receiver,
5414 s.t_perc_discretize_source,
5415 s.t_perc_make_base_seismogram,
5416 s.t_perc_make_same_span,
5417 s.t_perc_post_process) = perc_dyn
5418 else:
5419 t_dyn = 0.
5421 if request.has_statics:
5422 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
5423 t_static = num.sum(tcumu_static)
5424 perc_static = map(float, tcumu_static / t_static * 100.)
5425 (s.t_perc_static_get_store,
5426 s.t_perc_static_discretize_basesource,
5427 s.t_perc_static_sum_statics,
5428 s.t_perc_static_post_process) = perc_static
5430 s.t_wallclock = tt1 - tt0
5431 if resource:
5432 s.t_cpu = (
5433 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
5434 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
5435 s.n_read_blocks = (
5436 (rs1.ru_inblock + rc1.ru_inblock) -
5437 (rs0.ru_inblock + rc0.ru_inblock))
5439 n_records_stacked = 0.
5440 for results in results_list:
5441 for result in results:
5442 if not isinstance(result, meta.Result):
5443 continue
5444 shr = float(result.n_shared_stacking)
5445 n_records_stacked += result.n_records_stacked / shr
5446 s.t_perc_optimize += result.t_optimize / shr
5447 s.t_perc_stack += result.t_stack / shr
5448 s.n_records_stacked = int(n_records_stacked)
5449 if t_dyn != 0.:
5450 s.t_perc_optimize /= t_dyn * 100
5451 s.t_perc_stack /= t_dyn * 100
5453 return Response(
5454 request=request,
5455 results_list=results_list,
5456 stats=s)
5459class RemoteEngine(Engine):
5460 '''
5461 Client for remote synthetic seismogram calculator.
5462 '''
5464 site = String.T(default=ws.g_default_site, optional=True)
5465 url = String.T(default=ws.g_url, optional=True)
5467 def process(self, request=None, status_callback=None, **kwargs):
5469 if request is None:
5470 request = Request(**kwargs)
5472 return ws.seismosizer(url=self.url, site=self.site, request=request)
5475g_engine = None
5478def get_engine(store_superdirs=[]):
5479 global g_engine
5480 if g_engine is None:
5481 g_engine = LocalEngine(use_env=True, use_config=True)
5483 for d in store_superdirs:
5484 if d not in g_engine.store_superdirs:
5485 g_engine.store_superdirs.append(d)
5487 return g_engine
5490class SourceGroup(Object):
5492 def __getattr__(self, k):
5493 return num.fromiter((getattr(s, k) for s in self),
5494 dtype=float)
5496 def __iter__(self):
5497 raise NotImplementedError(
5498 'This method should be implemented in subclass.')
5500 def __len__(self):
5501 raise NotImplementedError(
5502 'This method should be implemented in subclass.')
5505class SourceList(SourceGroup):
5506 sources = List.T(Source.T())
5508 def append(self, s):
5509 self.sources.append(s)
5511 def __iter__(self):
5512 return iter(self.sources)
5514 def __len__(self):
5515 return len(self.sources)
5518class SourceGrid(SourceGroup):
5520 base = Source.T()
5521 variables = Dict.T(String.T(), Range.T())
5522 order = List.T(String.T())
5524 def __len__(self):
5525 n = 1
5526 for (k, v) in self.make_coords(self.base):
5527 n *= len(list(v))
5529 return n
5531 def __iter__(self):
5532 for items in permudef(self.make_coords(self.base)):
5533 s = self.base.clone(**{k: v for (k, v) in items})
5534 s.regularize()
5535 yield s
5537 def ordered_params(self):
5538 ks = list(self.variables.keys())
5539 for k in self.order + list(self.base.keys()):
5540 if k in ks:
5541 yield k
5542 ks.remove(k)
5543 if ks:
5544 raise Exception('Invalid parameter "%s" for source type "%s".' %
5545 (ks[0], self.base.__class__.__name__))
5547 def make_coords(self, base):
5548 return [(param, self.variables[param].make(base=base[param]))
5549 for param in self.ordered_params()]
5552source_classes = [
5553 Source,
5554 SourceWithMagnitude,
5555 SourceWithDerivedMagnitude,
5556 ExplosionSource,
5557 RectangularExplosionSource,
5558 DCSource,
5559 CLVDSource,
5560 VLVDSource,
5561 MTSource,
5562 RectangularSource,
5563 PseudoDynamicRupture,
5564 DoubleDCSource,
5565 RingfaultSource,
5566 CombiSource,
5567 SFSource,
5568 PorePressurePointSource,
5569 PorePressureLineSource,
5570]
5572stf_classes = [
5573 STF,
5574 BoxcarSTF,
5575 TriangularSTF,
5576 HalfSinusoidSTF,
5577 ResonatorSTF,
5578]
5580__all__ = '''
5581SeismosizerError
5582BadRequest
5583NoSuchStore
5584DerivedMagnitudeError
5585STFMode
5586'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
5587Request
5588ProcessingStats
5589Response
5590Engine
5591LocalEngine
5592RemoteEngine
5593source_classes
5594get_engine
5595Range
5596SourceGroup
5597SourceList
5598SourceGrid
5599map_anchor
5600'''.split()