1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
10.. _coordinate-system-names:
12Coordinate systems
13..................
15Coordinate system names commonly used in source models.
17================= ============================================
18Name Description
19================= ============================================
20``'xyz'`` northing, easting, depth in [m]
21``'xy'`` northing, easting in [m]
22``'latlon'`` latitude, longitude in [deg]
23``'lonlat'`` longitude, latitude in [deg]
24``'latlondepth'`` latitude, longitude in [deg], depth in [m]
25================= ============================================
26'''
28from collections import defaultdict
29from functools import cmp_to_key
30import time
31import math
32import os
33import re
34import logging
35try:
36 import resource
37except ImportError:
38 resource = None
39from hashlib import sha1
41import numpy as num
42from scipy.interpolate import RegularGridInterpolator
44from pyrocko.guts import (Object, Float, String, StringChoice, List,
45 Timestamp, Int, SObject, ArgumentError, Dict,
46 ValidationError, Bool)
47from pyrocko.guts_array import Array
49from pyrocko import moment_tensor as pmt
50from pyrocko import trace, util, config, model, eikonal_ext
51from pyrocko.orthodrome import ne_to_latlon
52from pyrocko.model import Location
53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \
54 okada_ext, invert_fault_dislocations_bem
56from . import meta, store, ws
57from .tractions import TractionField, DirectedTractions
58from .targets import Target, StaticTarget, SatelliteTarget
60pjoin = os.path.join
62guts_prefix = 'pf'
64d2r = math.pi / 180.
65r2d = 180. / math.pi
66km = 1e3
68logger = logging.getLogger('pyrocko.gf.seismosizer')
71def cmp_none_aware(a, b):
72 if isinstance(a, tuple) and isinstance(b, tuple):
73 for xa, xb in zip(a, b):
74 rv = cmp_none_aware(xa, xb)
75 if rv != 0:
76 return rv
78 return 0
80 anone = a is None
81 bnone = b is None
83 if anone and bnone:
84 return 0
86 if anone:
87 return -1
89 if bnone:
90 return 1
92 return bool(a > b) - bool(a < b)
95def xtime():
96 return time.time()
99class SeismosizerError(Exception):
100 pass
103class BadRequest(SeismosizerError):
104 pass
107class DuplicateStoreId(Exception):
108 pass
111class NoDefaultStoreSet(Exception):
112 pass
115class ConversionError(Exception):
116 pass
119class NoSuchStore(BadRequest):
121 def __init__(self, store_id=None, dirs=None):
122 BadRequest.__init__(self)
123 self.store_id = store_id
124 self.dirs = dirs
126 def __str__(self):
127 if self.store_id is not None:
128 rstr = 'no GF store with id "%s" found.' % self.store_id
129 else:
130 rstr = 'GF store not found.'
132 if self.dirs is not None:
133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
134 return rstr
137def ufloat(s):
138 units = {
139 'k': 1e3,
140 'M': 1e6,
141 }
143 factor = 1.0
144 if s and s[-1] in units:
145 factor = units[s[-1]]
146 s = s[:-1]
147 if not s:
148 raise ValueError("unit without a number: '%s'" % s)
150 return float(s) * factor
153def ufloat_or_none(s):
154 if s:
155 return ufloat(s)
156 else:
157 return None
160def int_or_none(s):
161 if s:
162 return int(s)
163 else:
164 return None
167def nonzero(x, eps=1e-15):
168 return abs(x) > eps
171def permudef(ln, j=0):
172 if j < len(ln):
173 k, v = ln[j]
174 for y in v:
175 ln[j] = k, y
176 for s in permudef(ln, j + 1):
177 yield s
179 ln[j] = k, v
180 return
181 else:
182 yield ln
185def arr(x):
186 return num.atleast_1d(num.asarray(x))
189def discretize_rect_source(deltas, deltat, time, north, east, depth,
190 strike, dip, length, width,
191 anchor, velocity=None, stf=None,
192 nucleation_x=None, nucleation_y=None,
193 decimation_factor=1, pointsonly=False,
194 plane_coords=False,
195 aggressive_oversampling=False):
197 if stf is None:
198 stf = STF()
200 if not velocity and not pointsonly:
201 raise AttributeError('velocity is required in time mode')
203 mindeltagf = float(num.min(deltas))
204 if velocity:
205 mindeltagf = min(mindeltagf, deltat * velocity)
207 ln = length
208 wd = width
210 if aggressive_oversampling:
211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
213 else:
214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
217 n = int(nl * nw)
219 dl = ln / nl
220 dw = wd / nw
222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
225 points = num.zeros((n, 3))
226 points[:, 0] = num.tile(xl, nw)
227 points[:, 1] = num.repeat(xw, nl)
229 if nucleation_x is not None:
230 dist_x = num.abs(nucleation_x - points[:, 0])
231 else:
232 dist_x = num.zeros(n)
234 if nucleation_y is not None:
235 dist_y = num.abs(nucleation_y - points[:, 1])
236 else:
237 dist_y = num.zeros(n)
239 dist = num.sqrt(dist_x**2 + dist_y**2)
240 times = dist / velocity
242 anch_x, anch_y = map_anchor[anchor]
244 points[:, 0] -= anch_x * 0.5 * length
245 points[:, 1] -= anch_y * 0.5 * width
247 if plane_coords:
248 return points, dl, dw, nl, nw
250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
251 points = num.dot(rotmat.T, points.T).T
253 points[:, 0] += north
254 points[:, 1] += east
255 points[:, 2] += depth
257 if pointsonly:
258 return points, dl, dw, nl, nw
260 xtau, amplitudes = stf.discretize_t(deltat, time)
261 nt = xtau.size
263 points2 = num.repeat(points, nt, axis=0)
264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel()
265 amplitudes2 = num.tile(amplitudes, n)
267 return points2, times2, amplitudes2, dl, dw, nl, nw
270def check_rect_source_discretisation(points2, nl, nw, store):
271 # We assume a non-rotated fault plane
272 N_CRITICAL = 8
273 points = points2.T.reshape((3, nl, nw))
274 if points.size <= N_CRITICAL:
275 logger.warning('RectangularSource is defined by only %d sub-sources!'
276 % points.size)
277 return True
279 distances = num.sqrt(
280 (points[0, 0, :] - points[0, 1, :])**2 +
281 (points[1, 0, :] - points[1, 1, :])**2 +
282 (points[2, 0, :] - points[2, 1, :])**2)
284 depths = points[2, 0, :]
285 vs_profile = store.config.get_vs(
286 lat=0., lon=0.,
287 points=num.repeat(depths[:, num.newaxis], 3, axis=1),
288 interpolation='multilinear')
290 min_wavelength = vs_profile * (store.config.deltat * 2)
291 if not num.all(min_wavelength > distances / 2):
292 return False
293 return True
296def outline_rect_source(strike, dip, length, width, anchor):
297 ln = length
298 wd = width
299 points = num.array(
300 [[-0.5 * ln, -0.5 * wd, 0.],
301 [0.5 * ln, -0.5 * wd, 0.],
302 [0.5 * ln, 0.5 * wd, 0.],
303 [-0.5 * ln, 0.5 * wd, 0.],
304 [-0.5 * ln, -0.5 * wd, 0.]])
306 anch_x, anch_y = map_anchor[anchor]
307 points[:, 0] -= anch_x * 0.5 * length
308 points[:, 1] -= anch_y * 0.5 * width
310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
312 return num.dot(rotmat.T, points.T).T
315def from_plane_coords(
316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
317 lat=0., lon=0.,
318 north_shift=0, east_shift=0,
319 anchor='top', cs='xy'):
321 ln = length
322 wd = width
323 x_abs = []
324 y_abs = []
325 if not isinstance(x_plane_coords, list):
326 x_plane_coords = [x_plane_coords]
327 y_plane_coords = [y_plane_coords]
329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
330 points = num.array(
331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]])
337 anch_x, anch_y = map_anchor[anchor]
338 points[:, 0] -= anch_x * 0.5 * length
339 points[:, 1] -= anch_y * 0.5 * width
341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
343 points = num.dot(rotmat.T, points.T).T
344 points[:, 0] += north_shift
345 points[:, 1] += east_shift
346 points[:, 2] += depth
347 if cs in ('latlon', 'lonlat'):
348 latlon = ne_to_latlon(lat, lon,
349 points[:, 0], points[:, 1])
350 latlon = num.array(latlon).T
351 x_abs.append(latlon[1:2, 1])
352 y_abs.append(latlon[2:3, 0])
353 if cs == 'xy':
354 x_abs.append(points[1:2, 1])
355 y_abs.append(points[2:3, 0])
357 if cs == 'lonlat':
358 return y_abs, x_abs
359 else:
360 return x_abs, y_abs
363def points_on_rect_source(
364 strike, dip, length, width, anchor,
365 discretized_basesource=None, points_x=None, points_y=None):
367 ln = length
368 wd = width
370 if isinstance(points_x, list) or isinstance(points_x, float):
371 points_x = num.array([points_x])
372 if isinstance(points_y, list) or isinstance(points_y, float):
373 points_y = num.array([points_y])
375 if discretized_basesource:
376 ds = discretized_basesource
378 nl_patches = ds.nl + 1
379 nw_patches = ds.nw + 1
381 npoints = nl_patches * nw_patches
382 points = num.zeros((npoints, 3))
383 ln_patches = num.array([il for il in range(nl_patches)])
384 wd_patches = num.array([iw for iw in range(nw_patches)])
386 points_ln =\
387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1
388 points_wd =\
389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1
391 for il in range(nl_patches):
392 for iw in range(nw_patches):
393 points[il * nw_patches + iw, :] = num.array([
394 points_ln[il] * ln * 0.5,
395 points_wd[iw] * wd * 0.5, 0.0])
397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0:
398 points = num.zeros(shape=((len(points_x), 3)))
399 for i, (x, y) in enumerate(zip(points_x, points_y)):
400 points[i, :] = num.array(
401 [x * 0.5 * ln, y * 0.5 * wd, 0.0])
403 anch_x, anch_y = map_anchor[anchor]
405 points[:, 0] -= anch_x * 0.5 * ln
406 points[:, 1] -= anch_y * 0.5 * wd
408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
410 return num.dot(rotmat.T, points.T).T
413class InvalidGridDef(Exception):
414 pass
417class Range(SObject):
418 '''
419 Convenient range specification.
421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
423 Range('0 .. 10k : 1k')
424 Range(start=0., stop=10e3, step=1e3)
425 Range(0, 10e3, 1e3)
426 Range('0 .. 10k @ 11')
427 Range(start=0., stop=10*km, n=11)
429 Range(0, 10e3, n=11)
430 Range(values=[x*1e3 for x in range(11)])
432 Depending on the use context, it can be possible to omit any part of the
433 specification. E.g. in the context of extracting a subset of an already
434 existing range, the existing range's specification values would be filled
435 in where missing.
437 The values are distributed with equal spacing, unless the ``spacing``
438 argument is modified. The values can be created offset or relative to an
439 external base value with the ``relative`` argument if the use context
440 supports this.
442 The range specification can be expressed with a short string
443 representation::
445 'start .. stop @ num | spacing, relative'
446 'start .. stop : step | spacing, relative'
448 most parts of the expression can be omitted if not needed. Whitespace is
449 allowed for readability but can also be omitted.
450 '''
452 start = Float.T(optional=True)
453 stop = Float.T(optional=True)
454 step = Float.T(optional=True)
455 n = Int.T(optional=True)
456 values = Array.T(optional=True, dtype=float, shape=(None,))
458 spacing = StringChoice.T(
459 choices=['lin', 'log', 'symlog'],
460 default='lin',
461 optional=True)
463 relative = StringChoice.T(
464 choices=['', 'add', 'mult'],
465 default='',
466 optional=True)
468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
470 r'(\|(?P<stuff>.+))?$')
472 def __init__(self, *args, **kwargs):
473 d = {}
474 if len(args) == 1:
475 d = self.parse(args[0])
476 elif len(args) in (2, 3):
477 d['start'], d['stop'] = [float(x) for x in args[:2]]
478 if len(args) == 3:
479 d['step'] = float(args[2])
481 for k, v in kwargs.items():
482 if k in d:
483 raise ArgumentError('%s specified more than once' % k)
485 d[k] = v
487 SObject.__init__(self, **d)
489 def __str__(self):
490 def sfloat(x):
491 if x is not None:
492 return '%g' % x
493 else:
494 return ''
496 if self.values:
497 return ','.join('%g' % x for x in self.values)
499 if self.start is None and self.stop is None:
500 s0 = ''
501 else:
502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
504 s1 = ''
505 if self.step is not None:
506 s1 = [' : %g', ':%g'][s0 == ''] % self.step
507 elif self.n is not None:
508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n
510 if self.spacing == 'lin' and self.relative == '':
511 s2 = ''
512 else:
513 x = []
514 if self.spacing != 'lin':
515 x.append(self.spacing)
517 if self.relative != '':
518 x.append(self.relative)
520 s2 = ' | %s' % ','.join(x)
522 return s0 + s1 + s2
524 @classmethod
525 def parse(cls, s):
526 s = re.sub(r'\s+', '', s)
527 m = cls.pattern.match(s)
528 if not m:
529 try:
530 vals = [ufloat(x) for x in s.split(',')]
531 except Exception:
532 raise InvalidGridDef(
533 '"%s" is not a valid range specification' % s)
535 return dict(values=num.array(vals, dtype=float))
537 d = m.groupdict()
538 try:
539 start = ufloat_or_none(d['start'])
540 stop = ufloat_or_none(d['stop'])
541 step = ufloat_or_none(d['step'])
542 n = int_or_none(d['n'])
543 except Exception:
544 raise InvalidGridDef(
545 '"%s" is not a valid range specification' % s)
547 spacing = 'lin'
548 relative = ''
550 if d['stuff'] is not None:
551 t = d['stuff'].split(',')
552 for x in t:
553 if x in cls.spacing.choices:
554 spacing = x
555 elif x and x in cls.relative.choices:
556 relative = x
557 else:
558 raise InvalidGridDef(
559 '"%s" is not a valid range specification' % s)
561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
562 relative=relative)
564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
565 if self.values:
566 return self.values
568 start = self.start
569 stop = self.stop
570 step = self.step
571 n = self.n
573 swap = step is not None and step < 0.
574 if start is None:
575 start = [mi, ma][swap]
576 if stop is None:
577 stop = [ma, mi][swap]
578 if step is None and inc is not None:
579 step = [inc, -inc][ma < mi]
581 if start is None or stop is None:
582 raise InvalidGridDef(
583 'Cannot use range specification "%s" without start '
584 'and stop in this context' % self)
586 if step is None and n is None:
587 step = stop - start
589 if n is None:
590 if (step < 0) != (stop - start < 0):
591 raise InvalidGridDef(
592 'Range specification "%s" has inconsistent ordering '
593 '(step < 0 => stop > start)' % self)
595 n = int(round((stop - start) / step)) + 1
596 stop2 = start + (n - 1) * step
597 if abs(stop - stop2) > eps:
598 n = int(math.floor((stop - start) / step)) + 1
599 stop = start + (n - 1) * step
600 else:
601 stop = stop2
603 if start == stop:
604 n = 1
606 if self.spacing == 'lin':
607 vals = num.linspace(start, stop, n)
609 elif self.spacing in ('log', 'symlog'):
610 if start > 0. and stop > 0.:
611 vals = num.exp(num.linspace(num.log(start),
612 num.log(stop), n))
613 elif start < 0. and stop < 0.:
614 vals = -num.exp(num.linspace(num.log(-start),
615 num.log(-stop), n))
616 else:
617 raise InvalidGridDef(
618 'Log ranges should not include or cross zero '
619 '(in range specification "%s").' % self)
621 if self.spacing == 'symlog':
622 nvals = - vals
623 vals = num.concatenate((nvals[::-1], vals))
625 if self.relative in ('add', 'mult') and base is None:
626 raise InvalidGridDef(
627 'Cannot use relative range specification in this context.')
629 vals = self.make_relative(base, vals)
631 return list(map(float, vals))
633 def make_relative(self, base, vals):
634 if self.relative == 'add':
635 vals += base
637 if self.relative == 'mult':
638 vals *= base
640 return vals
643class GridDefElement(Object):
645 param = meta.StringID.T()
646 rs = Range.T()
648 def __init__(self, shorthand=None, **kwargs):
649 if shorthand is not None:
650 t = shorthand.split('=')
651 if len(t) != 2:
652 raise InvalidGridDef(
653 'Invalid grid specification element: %s' % shorthand)
655 sp, sr = t[0].strip(), t[1].strip()
657 kwargs['param'] = sp
658 kwargs['rs'] = Range(sr)
660 Object.__init__(self, **kwargs)
662 def shorthand(self):
663 return self.param + ' = ' + str(self.rs)
666class GridDef(Object):
668 elements = List.T(GridDefElement.T())
670 def __init__(self, shorthand=None, **kwargs):
671 if shorthand is not None:
672 t = shorthand.splitlines()
673 tt = []
674 for x in t:
675 x = x.strip()
676 if x:
677 tt.extend(x.split(';'))
679 elements = []
680 for se in tt:
681 elements.append(GridDef(se))
683 kwargs['elements'] = elements
685 Object.__init__(self, **kwargs)
687 def shorthand(self):
688 return '; '.join(str(x) for x in self.elements)
691class Cloneable(object):
693 def __iter__(self):
694 return iter(self.T.propnames)
696 def __getitem__(self, k):
697 if k not in self.keys():
698 raise KeyError(k)
700 return getattr(self, k)
702 def __setitem__(self, k, v):
703 if k not in self.keys():
704 raise KeyError(k)
706 return setattr(self, k, v)
708 def clone(self, **kwargs):
709 '''
710 Make a copy of the object.
712 A new object of the same class is created and initialized with the
713 parameters of the object on which this method is called on. If
714 ``kwargs`` are given, these are used to override any of the
715 initialization parameters.
716 '''
718 d = dict(self)
719 for k in d:
720 v = d[k]
721 if isinstance(v, Cloneable):
722 d[k] = v.clone()
724 d.update(kwargs)
725 return self.__class__(**d)
727 @classmethod
728 def keys(cls):
729 '''
730 Get list of the source model's parameter names.
731 '''
733 return cls.T.propnames
736class STF(Object, Cloneable):
738 '''
739 Base class for source time functions.
740 '''
742 def __init__(self, effective_duration=None, **kwargs):
743 if effective_duration is not None:
744 kwargs['duration'] = effective_duration / \
745 self.factor_duration_to_effective()
747 Object.__init__(self, **kwargs)
749 @classmethod
750 def factor_duration_to_effective(cls):
751 return 1.0
753 def centroid_time(self, tref):
754 return tref
756 @property
757 def effective_duration(self):
758 return self.duration * self.factor_duration_to_effective()
760 def discretize_t(self, deltat, tref):
761 tl = math.floor(tref / deltat) * deltat
762 th = math.ceil(tref / deltat) * deltat
763 if tl == th:
764 return num.array([tl], dtype=float), num.ones(1)
765 else:
766 return (
767 num.array([tl, th], dtype=float),
768 num.array([th - tref, tref - tl], dtype=float) / deltat)
770 def base_key(self):
771 return (type(self).__name__,)
774g_unit_pulse = STF()
777def sshift(times, amplitudes, tshift, deltat):
779 t0 = math.floor(tshift / deltat) * deltat
780 t1 = math.ceil(tshift / deltat) * deltat
781 if t0 == t1:
782 return times, amplitudes
784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float)
786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
789 times2 = num.arange(times.size + 1, dtype=float) * \
790 deltat + times[0] + t0
792 return times2, amplitudes2
795class BoxcarSTF(STF):
797 '''
798 Boxcar type source time function.
800 .. figure :: /static/stf-BoxcarSTF.svg
801 :width: 40%
802 :align: center
803 :alt: boxcar source time function
804 '''
806 duration = Float.T(
807 default=0.0,
808 help='duration of the boxcar')
810 anchor = Float.T(
811 default=0.0,
812 help='anchor point with respect to source.time: ('
813 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
817 @classmethod
818 def factor_duration_to_effective(cls):
819 return 1.0
821 def centroid_time(self, tref):
822 return tref - 0.5 * self.duration * self.anchor
824 def discretize_t(self, deltat, tref):
825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
827 tmin = round(tmin_stf / deltat) * deltat
828 tmax = round(tmax_stf / deltat) * deltat
829 nt = int(round((tmax - tmin) / deltat)) + 1
830 times = num.linspace(tmin, tmax, nt)
831 amplitudes = num.ones_like(times)
832 if times.size > 1:
833 t_edges = num.linspace(
834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
835 t = tmin_stf + self.duration * num.array(
836 [0.0, 0.0, 1.0, 1.0], dtype=float)
837 f = num.array([0., 1., 1., 0.], dtype=float)
838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
839 amplitudes /= num.sum(amplitudes)
841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
843 return sshift(times, amplitudes, -tshift, deltat)
845 def base_key(self):
846 return (type(self).__name__, self.duration, self.anchor)
849class TriangularSTF(STF):
851 '''
852 Triangular type source time function.
854 .. figure :: /static/stf-TriangularSTF.svg
855 :width: 40%
856 :align: center
857 :alt: triangular source time function
858 '''
860 duration = Float.T(
861 default=0.0,
862 help='baseline of the triangle')
864 peak_ratio = Float.T(
865 default=0.5,
866 help='fraction of time compared to duration, '
867 'when the maximum amplitude is reached')
869 anchor = Float.T(
870 default=0.0,
871 help='anchor point with respect to source.time: ('
872 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
876 @classmethod
877 def factor_duration_to_effective(cls, peak_ratio=None):
878 if peak_ratio is None:
879 peak_ratio = cls.peak_ratio.default()
881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
883 def __init__(self, effective_duration=None, **kwargs):
884 if effective_duration is not None:
885 kwargs['duration'] = effective_duration / \
886 self.factor_duration_to_effective(
887 kwargs.get('peak_ratio', None))
889 STF.__init__(self, **kwargs)
891 @property
892 def centroid_ratio(self):
893 ra = self.peak_ratio
894 rb = 1.0 - ra
895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
897 def centroid_time(self, tref):
898 ca = self.centroid_ratio
899 cb = 1.0 - ca
900 if self.anchor <= 0.:
901 return tref - ca * self.duration * self.anchor
902 else:
903 return tref - cb * self.duration * self.anchor
905 @property
906 def effective_duration(self):
907 return self.duration * self.factor_duration_to_effective(
908 self.peak_ratio)
910 def tminmax_stf(self, tref):
911 ca = self.centroid_ratio
912 cb = 1.0 - ca
913 if self.anchor <= 0.:
914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
915 tmax_stf = tmin_stf + self.duration
916 else:
917 tmax_stf = tref + cb * self.duration * (1. - self.anchor)
918 tmin_stf = tmax_stf - self.duration
920 return tmin_stf, tmax_stf
922 def discretize_t(self, deltat, tref):
923 tmin_stf, tmax_stf = self.tminmax_stf(tref)
925 tmin = round(tmin_stf / deltat) * deltat
926 tmax = round(tmax_stf / deltat) * deltat
927 nt = int(round((tmax - tmin) / deltat)) + 1
928 if nt > 1:
929 t_edges = num.linspace(
930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
931 t = tmin_stf + self.duration * num.array(
932 [0.0, self.peak_ratio, 1.0], dtype=float)
933 f = num.array([0., 1., 0.], dtype=float)
934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
935 amplitudes /= num.sum(amplitudes)
936 else:
937 amplitudes = num.ones(1)
939 times = num.linspace(tmin, tmax, nt)
940 return times, amplitudes
942 def base_key(self):
943 return (
944 type(self).__name__, self.duration, self.peak_ratio, self.anchor)
947class HalfSinusoidSTF(STF):
949 '''
950 Half sinusoid type source time function.
952 .. figure :: /static/stf-HalfSinusoidSTF.svg
953 :width: 40%
954 :align: center
955 :alt: half-sinusouid source time function
956 '''
958 duration = Float.T(
959 default=0.0,
960 help='duration of the half-sinusoid (baseline)')
962 anchor = Float.T(
963 default=0.0,
964 help='anchor point with respect to source.time: ('
965 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
969 exponent = Int.T(
970 default=1,
971 help='set to 2 to use square of the half-period sinusoidal function.')
973 def __init__(self, effective_duration=None, **kwargs):
974 if effective_duration is not None:
975 kwargs['duration'] = effective_duration / \
976 self.factor_duration_to_effective(
977 kwargs.get('exponent', 1))
979 STF.__init__(self, **kwargs)
981 @classmethod
982 def factor_duration_to_effective(cls, exponent):
983 if exponent == 1:
984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
985 elif exponent == 2:
986 return math.sqrt(math.pi**2 - 6) / math.pi
987 else:
988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
990 @property
991 def effective_duration(self):
992 return self.duration * self.factor_duration_to_effective(self.exponent)
994 def centroid_time(self, tref):
995 return tref - 0.5 * self.duration * self.anchor
997 def discretize_t(self, deltat, tref):
998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1000 tmin = round(tmin_stf / deltat) * deltat
1001 tmax = round(tmax_stf / deltat) * deltat
1002 nt = int(round((tmax - tmin) / deltat)) + 1
1003 if nt > 1:
1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
1007 if self.exponent == 1:
1008 fint = -num.cos(
1009 (t_edges - tmin_stf) * (math.pi / self.duration))
1011 elif self.exponent == 2:
1012 fint = (t_edges - tmin_stf) / self.duration \
1013 - 1.0 / (2.0 * math.pi) * num.sin(
1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
1015 else:
1016 raise ValueError(
1017 'Exponent for HalfSinusoidSTF must be 1 or 2.')
1019 amplitudes = fint[1:] - fint[:-1]
1020 amplitudes /= num.sum(amplitudes)
1021 else:
1022 amplitudes = num.ones(1)
1024 times = num.linspace(tmin, tmax, nt)
1025 return times, amplitudes
1027 def base_key(self):
1028 return (type(self).__name__, self.duration, self.anchor)
1031class SmoothRampSTF(STF):
1032 '''
1033 Smooth-ramp type source time function for near-field displacement.
1034 Based on moment function of double-couple point source proposed by Bruestle
1035 and Mueller (PEPI, 1983).
1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
1038 earthquakes from Love-wave modelling for regional distances, PEPI 32,
1039 312-324.
1041 .. figure :: /static/stf-SmoothRampSTF.svg
1042 :width: 40%
1043 :alt: smooth ramp source time function
1044 '''
1045 duration = Float.T(
1046 default=0.0,
1047 help='duration of the ramp (baseline)')
1049 rise_ratio = Float.T(
1050 default=0.5,
1051 help='fraction of time compared to duration, '
1052 'when the maximum amplitude is reached')
1054 anchor = Float.T(
1055 default=0.0,
1056 help='anchor point with respect to source.time: ('
1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
1061 def discretize_t(self, deltat, tref):
1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1064 tmin = round(tmin_stf / deltat) * deltat
1065 tmax = round(tmax_stf / deltat) * deltat
1066 D = round((tmax - tmin) / deltat) * deltat
1067 nt = int(round(D / deltat)) + 1
1068 times = num.linspace(tmin, tmax, nt)
1069 if nt > 1:
1070 rise_time = self.rise_ratio * self.duration
1071 amplitudes = num.ones_like(times)
1072 tp = tmin + rise_time
1073 ii = num.where(times <= tp)
1074 t_inc = times[ii]
1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
1079 amplitudes /= num.sum(amplitudes)
1080 else:
1081 amplitudes = num.ones(1)
1083 return times, amplitudes
1085 def base_key(self):
1086 return (type(self).__name__,
1087 self.duration, self.rise_ratio, self.anchor)
1090class ResonatorSTF(STF):
1091 '''
1092 Simple resonator like source time function.
1094 .. math ::
1096 f(t) = 0 for t < 0
1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
1100 .. figure :: /static/stf-SmoothRampSTF.svg
1101 :width: 40%
1102 :alt: smooth ramp source time function
1104 '''
1106 duration = Float.T(
1107 default=0.0,
1108 help='decay time')
1110 frequency = Float.T(
1111 default=1.0,
1112 help='resonance frequency')
1114 def discretize_t(self, deltat, tref):
1115 tmin_stf = tref
1116 tmax_stf = tref + self.duration * 3
1117 tmin = math.floor(tmin_stf / deltat) * deltat
1118 tmax = math.ceil(tmax_stf / deltat) * deltat
1119 times = util.arange2(tmin, tmax, deltat)
1120 amplitudes = num.exp(-(times - tref) / self.duration) \
1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref))
1123 return times, amplitudes
1125 def base_key(self):
1126 return (type(self).__name__,
1127 self.duration, self.frequency)
1130class TremorSTF(STF):
1131 '''
1132 Oszillating source time function.
1134 .. math ::
1136 f(t) = 0 for t < -tau/2 or t > tau/2
1137 f(t) = cos(pi/tau*t) * sin(2 * pi * f * t)
1139 '''
1141 duration = Float.T(
1142 default=0.0,
1143 help='Tremor duration [s]')
1145 frequency = Float.T(
1146 default=1.0,
1147 help='Frequency [Hz]')
1149 def discretize_t(self, deltat, tref):
1150 tmin_stf = tref - 0.5 * self.duration
1151 tmax_stf = tref + 0.5 * self.duration
1152 tmin = math.floor(tmin_stf / deltat) * deltat
1153 tmax = math.ceil(tmax_stf / deltat) * deltat
1154 times = util.arange2(tmin, tmax, deltat)
1155 mask = num.logical_and(
1156 tref - 0.5 * self.duration < times,
1157 times < tref + 0.5 * self.duration)
1159 amplitudes = num.zeros_like(times)
1160 amplitudes[mask] = num.cos(num.pi/self.duration*(times[mask] - tref)) \
1161 * num.sin(2.0 * num.pi * self.frequency * (times[mask] - tref))
1163 return times, amplitudes
1165 def base_key(self):
1166 return (type(self).__name__,
1167 self.duration, self.frequency)
1170class STFMode(StringChoice):
1171 choices = ['pre', 'post']
1174class Source(Location, Cloneable):
1175 '''
1176 Base class for all source models.
1177 '''
1179 name = String.T(optional=True, default='')
1181 time = Timestamp.T(
1182 default=Timestamp.D('1970-01-01 00:00:00'),
1183 help='source origin time.')
1185 stf = STF.T(
1186 optional=True,
1187 help='source time function.')
1189 stf_mode = STFMode.T(
1190 default='post',
1191 help='whether to apply source time function in pre or '
1192 'post-processing.')
1194 def __init__(self, **kwargs):
1195 Location.__init__(self, **kwargs)
1197 def update(self, **kwargs):
1198 '''
1199 Change some of the source models parameters.
1201 Example::
1203 >>> from pyrocko import gf
1204 >>> s = gf.DCSource()
1205 >>> s.update(strike=66., dip=33.)
1206 >>> print(s)
1207 --- !pf.DCSource
1208 depth: 0.0
1209 time: 1970-01-01 00:00:00
1210 magnitude: 6.0
1211 strike: 66.0
1212 dip: 33.0
1213 rake: 0.0
1215 '''
1217 for (k, v) in kwargs.items():
1218 self[k] = v
1220 def grid(self, **variables):
1221 '''
1222 Create grid of source model variations.
1224 :returns: :py:class:`SourceGrid` instance.
1226 Example::
1228 >>> from pyrocko import gf
1229 >>> base = DCSource()
1230 >>> R = gf.Range
1231 >>> for s in base.grid(R('
1233 '''
1234 return SourceGrid(base=self, variables=variables)
1236 def base_key(self):
1237 '''
1238 Get key to decide about source discretization / GF stack sharing.
1240 When two source models differ only in amplitude and origin time, the
1241 discretization and the GF stacking can be done only once for a unit
1242 amplitude and a zero origin time and the amplitude and origin times of
1243 the seismograms can be applied during post-processing of the synthetic
1244 seismogram.
1246 For any derived parameterized source model, this method is called to
1247 decide if discretization and stacking of the source should be shared.
1248 When two source models return an equal vector of values discretization
1249 is shared.
1250 '''
1251 return (self.depth, self.lat, self.north_shift,
1252 self.lon, self.east_shift, self.time, type(self).__name__) + \
1253 self.effective_stf_pre().base_key()
1255 def get_factor(self):
1256 '''
1257 Get the scaling factor to be applied during post-processing.
1259 Discretization of the base seismogram is usually done for a unit
1260 amplitude, because a common factor can be efficiently multiplied to
1261 final seismograms. This eliminates to do repeat the stacking when
1262 creating seismograms for a series of source models only differing in
1263 amplitude.
1265 This method should return the scaling factor to apply in the
1266 post-processing (often this is simply the scalar moment of the source).
1267 '''
1269 return 1.0
1271 def effective_stf_pre(self):
1272 '''
1273 Return the STF applied before stacking of the Green's functions.
1275 This STF is used during discretization of the parameterized source
1276 models, i.e. to produce a temporal distribution of point sources.
1278 Handling of the STF before stacking of the GFs is less efficient but
1279 allows to use different source time functions for different parts of
1280 the source.
1281 '''
1283 if self.stf is not None and self.stf_mode == 'pre':
1284 return self.stf
1285 else:
1286 return g_unit_pulse
1288 def effective_stf_post(self):
1289 '''
1290 Return the STF applied after stacking of the Green's fuctions.
1292 This STF is used in the post-processing of the synthetic seismograms.
1294 Handling of the STF after stacking of the GFs is usually more efficient
1295 but is only possible when a common STF is used for all subsources.
1296 '''
1298 if self.stf is not None and self.stf_mode == 'post':
1299 return self.stf
1300 else:
1301 return g_unit_pulse
1303 def _dparams_base(self):
1304 return dict(times=arr(self.time),
1305 lat=self.lat, lon=self.lon,
1306 north_shifts=arr(self.north_shift),
1307 east_shifts=arr(self.east_shift),
1308 depths=arr(self.depth))
1310 def _hash(self):
1311 sha = sha1()
1312 for k in self.base_key():
1313 sha.update(str(k).encode())
1314 return sha.hexdigest()
1316 def _dparams_base_repeated(self, times):
1317 if times is None:
1318 return self._dparams_base()
1320 nt = times.size
1321 north_shifts = num.repeat(self.north_shift, nt)
1322 east_shifts = num.repeat(self.east_shift, nt)
1323 depths = num.repeat(self.depth, nt)
1324 return dict(times=times,
1325 lat=self.lat, lon=self.lon,
1326 north_shifts=north_shifts,
1327 east_shifts=east_shifts,
1328 depths=depths)
1330 def pyrocko_event(self, store=None, target=None, **kwargs):
1331 duration = None
1332 if self.stf:
1333 duration = self.stf.effective_duration
1335 return model.Event(
1336 lat=self.lat,
1337 lon=self.lon,
1338 north_shift=self.north_shift,
1339 east_shift=self.east_shift,
1340 time=self.time,
1341 name=self.name,
1342 depth=self.depth,
1343 duration=duration,
1344 **kwargs)
1346 def geometry(self, **kwargs):
1347 raise NotImplementedError
1349 def outline(self, cs='xyz'):
1350 points = num.atleast_2d(num.zeros([1, 3]))
1352 points[:, 0] += self.north_shift
1353 points[:, 1] += self.east_shift
1354 points[:, 2] += self.depth
1355 if cs == 'xyz':
1356 return points
1357 elif cs == 'xy':
1358 return points[:, :2]
1359 elif cs in ('latlon', 'lonlat'):
1360 latlon = ne_to_latlon(
1361 self.lat, self.lon, points[:, 0], points[:, 1])
1363 latlon = num.array(latlon).T
1364 if cs == 'latlon':
1365 return latlon
1366 else:
1367 return latlon[:, ::-1]
1369 @classmethod
1370 def from_pyrocko_event(cls, ev, **kwargs):
1371 if ev.depth is None:
1372 raise ConversionError(
1373 'Cannot convert event object to source object: '
1374 'no depth information available')
1376 stf = None
1377 if ev.duration is not None:
1378 stf = HalfSinusoidSTF(effective_duration=ev.duration)
1380 d = dict(
1381 name=ev.name,
1382 time=ev.time,
1383 lat=ev.lat,
1384 lon=ev.lon,
1385 north_shift=ev.north_shift,
1386 east_shift=ev.east_shift,
1387 depth=ev.depth,
1388 stf=stf)
1389 d.update(kwargs)
1390 return cls(**d)
1392 def get_magnitude(self):
1393 raise NotImplementedError(
1394 '%s does not implement get_magnitude()'
1395 % self.__class__.__name__)
1398class SourceWithMagnitude(Source):
1399 '''
1400 Base class for sources containing a moment magnitude.
1401 '''
1403 magnitude = Float.T(
1404 default=6.0,
1405 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1407 def __init__(self, **kwargs):
1408 if 'moment' in kwargs:
1409 mom = kwargs.pop('moment')
1410 if 'magnitude' not in kwargs:
1411 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1413 Source.__init__(self, **kwargs)
1415 @property
1416 def moment(self):
1417 return float(pmt.magnitude_to_moment(self.magnitude))
1419 @moment.setter
1420 def moment(self, value):
1421 self.magnitude = float(pmt.moment_to_magnitude(value))
1423 def pyrocko_event(self, store=None, target=None, **kwargs):
1424 return Source.pyrocko_event(
1425 self, store, target,
1426 magnitude=self.magnitude,
1427 **kwargs)
1429 @classmethod
1430 def from_pyrocko_event(cls, ev, **kwargs):
1431 d = {}
1432 if ev.magnitude:
1433 d.update(magnitude=ev.magnitude)
1435 d.update(kwargs)
1436 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
1438 def get_magnitude(self):
1439 return self.magnitude
1442class DerivedMagnitudeError(ValidationError):
1443 pass
1446class SourceWithDerivedMagnitude(Source):
1448 class __T(Source.T):
1450 def validate_extra(self, val):
1451 Source.T.validate_extra(self, val)
1452 val.check_conflicts()
1454 def check_conflicts(self):
1455 '''
1456 Check for parameter conflicts.
1458 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
1459 on conflicts.
1460 '''
1461 pass
1463 def get_magnitude(self, store=None, target=None):
1464 raise DerivedMagnitudeError('No magnitude set.')
1466 def get_moment(self, store=None, target=None):
1467 return float(pmt.magnitude_to_moment(
1468 self.get_magnitude(store, target)))
1470 def pyrocko_moment_tensor(self, store=None, target=None):
1471 raise NotImplementedError(
1472 '%s does not implement pyrocko_moment_tensor()'
1473 % self.__class__.__name__)
1475 def pyrocko_event(self, store=None, target=None, **kwargs):
1476 try:
1477 mt = self.pyrocko_moment_tensor(store, target)
1478 magnitude = self.get_magnitude()
1479 except (DerivedMagnitudeError, NotImplementedError):
1480 mt = None
1481 magnitude = None
1483 return Source.pyrocko_event(
1484 self, store, target,
1485 moment_tensor=mt,
1486 magnitude=magnitude,
1487 **kwargs)
1490class ExplosionSource(SourceWithDerivedMagnitude):
1491 '''
1492 An isotropic explosion point source.
1493 '''
1495 magnitude = Float.T(
1496 optional=True,
1497 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1499 volume_change = Float.T(
1500 optional=True,
1501 help='volume change of the explosion/implosion or '
1502 'the contracting/extending magmatic source. [m^3]')
1504 discretized_source_class = meta.DiscretizedExplosionSource
1506 def __init__(self, **kwargs):
1507 if 'moment' in kwargs:
1508 mom = kwargs.pop('moment')
1509 if 'magnitude' not in kwargs:
1510 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1512 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1514 def base_key(self):
1515 return SourceWithDerivedMagnitude.base_key(self) + \
1516 (self.volume_change,)
1518 def check_conflicts(self):
1519 if self.magnitude is not None and self.volume_change is not None:
1520 raise DerivedMagnitudeError(
1521 'Magnitude and volume_change are both defined.')
1523 def get_magnitude(self, store=None, target=None):
1524 self.check_conflicts()
1526 if self.magnitude is not None:
1527 return self.magnitude
1529 elif self.volume_change is not None:
1530 moment = self.volume_change * \
1531 self.get_moment_to_volume_change_ratio(store, target)
1533 return float(pmt.moment_to_magnitude(abs(moment)))
1534 else:
1535 return float(pmt.moment_to_magnitude(1.0))
1537 def get_volume_change(self, store=None, target=None):
1538 self.check_conflicts()
1540 if self.volume_change is not None:
1541 return self.volume_change
1543 elif self.magnitude is not None:
1544 moment = float(pmt.magnitude_to_moment(self.magnitude))
1545 return moment / self.get_moment_to_volume_change_ratio(
1546 store, target)
1548 else:
1549 return 1.0 / self.get_moment_to_volume_change_ratio(store)
1551 def get_moment_to_volume_change_ratio(self, store, target=None):
1552 if store is None:
1553 raise DerivedMagnitudeError(
1554 'Need earth model to convert between volume change and '
1555 'magnitude.')
1557 points = num.array(
1558 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1560 interpolation = target.interpolation if target else 'multilinear'
1561 try:
1562 shear_moduli = store.config.get_shear_moduli(
1563 self.lat, self.lon,
1564 points=points,
1565 interpolation=interpolation)[0]
1567 bulk_moduli = store.config.get_bulk_moduli(
1568 self.lat, self.lon,
1569 points=points,
1570 interpolation=interpolation)[0]
1571 except meta.OutOfBounds:
1572 raise DerivedMagnitudeError(
1573 'Could not get shear modulus at source position.')
1575 return float(2. * shear_moduli + bulk_moduli)
1577 def get_factor(self):
1578 return 1.0
1580 def discretize_basesource(self, store, target=None):
1581 times, amplitudes = self.effective_stf_pre().discretize_t(
1582 store.config.deltat, self.time)
1584 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
1586 if self.volume_change is not None:
1587 if self.volume_change < 0.:
1588 amplitudes *= -1
1590 return meta.DiscretizedExplosionSource(
1591 m0s=amplitudes,
1592 **self._dparams_base_repeated(times))
1594 def pyrocko_moment_tensor(self, store=None, target=None):
1595 a = self.get_moment(store, target) * math.sqrt(2. / 3.)
1596 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
1599class RectangularExplosionSource(ExplosionSource):
1600 '''
1601 Rectangular or line explosion source.
1602 '''
1604 discretized_source_class = meta.DiscretizedExplosionSource
1606 strike = Float.T(
1607 default=0.0,
1608 help='strike direction in [deg], measured clockwise from north')
1610 dip = Float.T(
1611 default=90.0,
1612 help='dip angle in [deg], measured downward from horizontal')
1614 length = Float.T(
1615 default=0.,
1616 help='length of rectangular source area [m]')
1618 width = Float.T(
1619 default=0.,
1620 help='width of rectangular source area [m]')
1622 anchor = StringChoice.T(
1623 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1624 'bottom_left', 'bottom_right'],
1625 default='center',
1626 optional=True,
1627 help='Anchor point for positioning the plane, can be: top, center or'
1628 'bottom and also top_left, top_right,bottom_left,'
1629 'bottom_right, center_left and center right')
1631 nucleation_x = Float.T(
1632 optional=True,
1633 help='horizontal position of rupture nucleation in normalized fault '
1634 'plane coordinates (-1 = left edge, +1 = right edge)')
1636 nucleation_y = Float.T(
1637 optional=True,
1638 help='down-dip position of rupture nucleation in normalized fault '
1639 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1641 velocity = Float.T(
1642 default=3500.,
1643 help='speed of explosion front [m/s]')
1645 aggressive_oversampling = Bool.T(
1646 default=False,
1647 help='Aggressive oversampling for basesource discretization. '
1648 "When using 'multilinear' interpolation oversampling has"
1649 ' practically no effect.')
1651 def base_key(self):
1652 return Source.base_key(self) + (self.strike, self.dip, self.length,
1653 self.width, self.nucleation_x,
1654 self.nucleation_y, self.velocity,
1655 self.anchor)
1657 def discretize_basesource(self, store, target=None):
1659 if self.nucleation_x is not None:
1660 nucx = self.nucleation_x * 0.5 * self.length
1661 else:
1662 nucx = None
1664 if self.nucleation_y is not None:
1665 nucy = self.nucleation_y * 0.5 * self.width
1666 else:
1667 nucy = None
1669 stf = self.effective_stf_pre()
1671 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
1672 store.config.deltas, store.config.deltat,
1673 self.time, self.north_shift, self.east_shift, self.depth,
1674 self.strike, self.dip, self.length, self.width, self.anchor,
1675 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
1677 amplitudes /= num.sum(amplitudes)
1678 amplitudes *= self.get_moment(store, target)
1680 return meta.DiscretizedExplosionSource(
1681 lat=self.lat,
1682 lon=self.lon,
1683 times=times,
1684 north_shifts=points[:, 0],
1685 east_shifts=points[:, 1],
1686 depths=points[:, 2],
1687 m0s=amplitudes)
1689 def outline(self, cs='xyz'):
1690 points = outline_rect_source(self.strike, self.dip, self.length,
1691 self.width, self.anchor)
1693 points[:, 0] += self.north_shift
1694 points[:, 1] += self.east_shift
1695 points[:, 2] += self.depth
1696 if cs == 'xyz':
1697 return points
1698 elif cs == 'xy':
1699 return points[:, :2]
1700 elif cs in ('latlon', 'lonlat'):
1701 latlon = ne_to_latlon(
1702 self.lat, self.lon, points[:, 0], points[:, 1])
1704 latlon = num.array(latlon).T
1705 if cs == 'latlon':
1706 return latlon
1707 else:
1708 return latlon[:, ::-1]
1710 def get_nucleation_abs_coord(self, cs='xy'):
1712 if self.nucleation_x is None:
1713 return None, None
1715 coords = from_plane_coords(self.strike, self.dip, self.length,
1716 self.width, self.depth, self.nucleation_x,
1717 self.nucleation_y, lat=self.lat,
1718 lon=self.lon, north_shift=self.north_shift,
1719 east_shift=self.east_shift, cs=cs)
1720 return coords
1723class DCSource(SourceWithMagnitude):
1724 '''
1725 A double-couple point source.
1726 '''
1728 strike = Float.T(
1729 default=0.0,
1730 help='strike direction in [deg], measured clockwise from north')
1732 dip = Float.T(
1733 default=90.0,
1734 help='dip angle in [deg], measured downward from horizontal')
1736 rake = Float.T(
1737 default=0.0,
1738 help='rake angle in [deg], '
1739 'measured counter-clockwise from right-horizontal '
1740 'in on-plane view')
1742 discretized_source_class = meta.DiscretizedMTSource
1744 def base_key(self):
1745 return Source.base_key(self) + (self.strike, self.dip, self.rake)
1747 def get_factor(self):
1748 return float(pmt.magnitude_to_moment(self.magnitude))
1750 def discretize_basesource(self, store, target=None):
1751 mot = pmt.MomentTensor(
1752 strike=self.strike, dip=self.dip, rake=self.rake)
1754 times, amplitudes = self.effective_stf_pre().discretize_t(
1755 store.config.deltat, self.time)
1756 return meta.DiscretizedMTSource(
1757 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
1758 **self._dparams_base_repeated(times))
1760 def pyrocko_moment_tensor(self, store=None, target=None):
1761 return pmt.MomentTensor(
1762 strike=self.strike,
1763 dip=self.dip,
1764 rake=self.rake,
1765 scalar_moment=self.moment)
1767 def pyrocko_event(self, store=None, target=None, **kwargs):
1768 return SourceWithMagnitude.pyrocko_event(
1769 self, store, target,
1770 moment_tensor=self.pyrocko_moment_tensor(store, target),
1771 **kwargs)
1773 @classmethod
1774 def from_pyrocko_event(cls, ev, **kwargs):
1775 d = {}
1776 mt = ev.moment_tensor
1777 if mt:
1778 (strike, dip, rake), _ = mt.both_strike_dip_rake()
1779 d.update(
1780 strike=float(strike),
1781 dip=float(dip),
1782 rake=float(rake),
1783 magnitude=float(mt.moment_magnitude()))
1785 d.update(kwargs)
1786 return super(DCSource, cls).from_pyrocko_event(ev, **d)
1789class CLVDSource(SourceWithMagnitude):
1790 '''
1791 A pure CLVD point source.
1792 '''
1794 discretized_source_class = meta.DiscretizedMTSource
1796 azimuth = Float.T(
1797 default=0.0,
1798 help='azimuth direction of largest dipole, clockwise from north [deg]')
1800 dip = Float.T(
1801 default=90.,
1802 help='dip direction of largest dipole, downward from horizontal [deg]')
1804 def base_key(self):
1805 return Source.base_key(self) + (self.azimuth, self.dip)
1807 def get_factor(self):
1808 return float(pmt.magnitude_to_moment(self.magnitude))
1810 @property
1811 def m6(self):
1812 a = math.sqrt(4. / 3.) * self.get_factor()
1813 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1814 rotmat1 = pmt.euler_to_matrix(
1815 d2r * (self.dip - 90.),
1816 d2r * (self.azimuth - 90.),
1817 0.)
1818 m = num.dot(rotmat1.T, num.dot(m, rotmat1))
1819 return pmt.to6(m)
1821 @property
1822 def m6_astuple(self):
1823 return tuple(self.m6.tolist())
1825 def discretize_basesource(self, store, target=None):
1826 factor = self.get_factor()
1827 times, amplitudes = self.effective_stf_pre().discretize_t(
1828 store.config.deltat, self.time)
1829 return meta.DiscretizedMTSource(
1830 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
1831 **self._dparams_base_repeated(times))
1833 def pyrocko_moment_tensor(self, store=None, target=None):
1834 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1836 def pyrocko_event(self, store=None, target=None, **kwargs):
1837 mt = self.pyrocko_moment_tensor(store, target)
1838 return Source.pyrocko_event(
1839 self, store, target,
1840 moment_tensor=self.pyrocko_moment_tensor(store, target),
1841 magnitude=float(mt.moment_magnitude()),
1842 **kwargs)
1845class VLVDSource(SourceWithDerivedMagnitude):
1846 '''
1847 Volumetric linear vector dipole source.
1849 This source is a parameterization for a restricted moment tensor point
1850 source, useful to represent dyke or sill like inflation or deflation
1851 sources. The restriction is such that the moment tensor is rotational
1852 symmetric. It can be represented by a superposition of a linear vector
1853 dipole (here we use a CLVD for convenience) and an isotropic component. The
1854 restricted moment tensor has 4 degrees of freedom: 2 independent
1855 eigenvalues and 2 rotation angles orienting the the symmetry axis.
1857 In this parameterization, the isotropic component is controlled by
1858 ``volume_change``. To define the moment tensor, it must be converted to the
1859 scalar moment of the the MT's isotropic component. For the conversion, the
1860 shear modulus at the source's position must be known. This value is
1861 extracted from the earth model defined in the GF store in use.
1863 The CLVD part by controlled by its scalar moment :math:`M_0`:
1864 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
1865 positiv or negativ CLVD (the sign of the largest eigenvalue).
1866 '''
1868 discretized_source_class = meta.DiscretizedMTSource
1870 azimuth = Float.T(
1871 default=0.0,
1872 help='azimuth direction of symmetry axis, clockwise from north [deg].')
1874 dip = Float.T(
1875 default=90.,
1876 help='dip direction of symmetry axis, downward from horizontal [deg].')
1878 volume_change = Float.T(
1879 default=0.,
1880 help='volume change of the inflation/deflation [m^3].')
1882 clvd_moment = Float.T(
1883 default=0.,
1884 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
1885 'controls the sign of the CLVD (the sign of its largest '
1886 'eigenvalue).')
1888 def get_moment_to_volume_change_ratio(self, store, target):
1889 if store is None or target is None:
1890 raise DerivedMagnitudeError(
1891 'Need earth model to convert between volume change and '
1892 'magnitude.')
1894 points = num.array(
1895 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1897 try:
1898 shear_moduli = store.config.get_shear_moduli(
1899 self.lat, self.lon,
1900 points=points,
1901 interpolation=target.interpolation)[0]
1903 bulk_moduli = store.config.get_bulk_moduli(
1904 self.lat, self.lon,
1905 points=points,
1906 interpolation=target.interpolation)[0]
1907 except meta.OutOfBounds:
1908 raise DerivedMagnitudeError(
1909 'Could not get shear modulus at source position.')
1911 return float(2. * shear_moduli + bulk_moduli)
1913 def base_key(self):
1914 return Source.base_key(self) + \
1915 (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
1917 def get_magnitude(self, store=None, target=None):
1918 mt = self.pyrocko_moment_tensor(store, target)
1919 return float(pmt.moment_to_magnitude(mt.moment))
1921 def get_m6(self, store, target):
1922 a = math.sqrt(4. / 3.) * self.clvd_moment
1923 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1925 rotmat1 = pmt.euler_to_matrix(
1926 d2r * (self.dip - 90.),
1927 d2r * (self.azimuth - 90.),
1928 0.)
1929 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1))
1931 m_iso = self.volume_change * \
1932 self.get_moment_to_volume_change_ratio(store, target)
1934 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0.,
1935 0., 0.,) * math.sqrt(2. / 3)
1937 m = pmt.to6(m_clvd) + pmt.to6(m_iso)
1938 return m
1940 def get_moment(self, store=None, target=None):
1941 return float(pmt.magnitude_to_moment(
1942 self.get_magnitude(store, target)))
1944 def get_m6_astuple(self, store, target):
1945 m6 = self.get_m6(store, target)
1946 return tuple(m6.tolist())
1948 def discretize_basesource(self, store, target=None):
1949 times, amplitudes = self.effective_stf_pre().discretize_t(
1950 store.config.deltat, self.time)
1952 m6 = self.get_m6(store, target)
1953 m6 *= amplitudes / self.get_factor()
1955 return meta.DiscretizedMTSource(
1956 m6s=m6[num.newaxis, :],
1957 **self._dparams_base_repeated(times))
1959 def pyrocko_moment_tensor(self, store=None, target=None):
1960 m6_astuple = self.get_m6_astuple(store, target)
1961 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
1964class MTSource(Source):
1965 '''
1966 A moment tensor point source.
1967 '''
1969 discretized_source_class = meta.DiscretizedMTSource
1971 mnn = Float.T(
1972 default=1.,
1973 help='north-north component of moment tensor in [Nm]')
1975 mee = Float.T(
1976 default=1.,
1977 help='east-east component of moment tensor in [Nm]')
1979 mdd = Float.T(
1980 default=1.,
1981 help='down-down component of moment tensor in [Nm]')
1983 mne = Float.T(
1984 default=0.,
1985 help='north-east component of moment tensor in [Nm]')
1987 mnd = Float.T(
1988 default=0.,
1989 help='north-down component of moment tensor in [Nm]')
1991 med = Float.T(
1992 default=0.,
1993 help='east-down component of moment tensor in [Nm]')
1995 def __init__(self, **kwargs):
1996 if 'm6' in kwargs:
1997 for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
1998 kwargs.pop('m6')):
1999 kwargs[k] = float(v)
2001 Source.__init__(self, **kwargs)
2003 @property
2004 def m6(self):
2005 return num.array(self.m6_astuple)
2007 @property
2008 def m6_astuple(self):
2009 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
2011 @m6.setter
2012 def m6(self, value):
2013 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
2015 def base_key(self):
2016 return Source.base_key(self) + self.m6_astuple
2018 def discretize_basesource(self, store, target=None):
2019 times, amplitudes = self.effective_stf_pre().discretize_t(
2020 store.config.deltat, self.time)
2021 return meta.DiscretizedMTSource(
2022 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
2023 **self._dparams_base_repeated(times))
2025 def get_magnitude(self, store=None, target=None):
2026 m6 = self.m6
2027 return pmt.moment_to_magnitude(
2028 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
2029 math.sqrt(2.))
2031 def pyrocko_moment_tensor(self, store=None, target=None):
2032 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
2034 def pyrocko_event(self, store=None, target=None, **kwargs):
2035 mt = self.pyrocko_moment_tensor(store, target)
2036 return Source.pyrocko_event(
2037 self, store, target,
2038 moment_tensor=self.pyrocko_moment_tensor(store, target),
2039 magnitude=float(mt.moment_magnitude()),
2040 **kwargs)
2042 @classmethod
2043 def from_pyrocko_event(cls, ev, **kwargs):
2044 d = {}
2045 mt = ev.moment_tensor
2046 if mt:
2047 d.update(m6=tuple(map(float, mt.m6())))
2048 else:
2049 if ev.magnitude is not None:
2050 mom = pmt.magnitude_to_moment(ev.magnitude)
2051 v = math.sqrt(2. / 3.) * mom
2052 d.update(m6=(v, v, v, 0., 0., 0.))
2054 d.update(kwargs)
2055 return super(MTSource, cls).from_pyrocko_event(ev, **d)
2058map_anchor = {
2059 'center': (0.0, 0.0),
2060 'center_left': (-1.0, 0.0),
2061 'center_right': (1.0, 0.0),
2062 'top': (0.0, -1.0),
2063 'top_left': (-1.0, -1.0),
2064 'top_right': (1.0, -1.0),
2065 'bottom': (0.0, 1.0),
2066 'bottom_left': (-1.0, 1.0),
2067 'bottom_right': (1.0, 1.0)}
2070class RectangularSource(SourceWithDerivedMagnitude):
2071 '''
2072 Classical Haskell source model modified for bilateral rupture.
2073 '''
2075 discretized_source_class = meta.DiscretizedMTSource
2077 magnitude = Float.T(
2078 optional=True,
2079 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
2081 strike = Float.T(
2082 default=0.0,
2083 help='strike direction in [deg], measured clockwise from north')
2085 dip = Float.T(
2086 default=90.0,
2087 help='dip angle in [deg], measured downward from horizontal')
2089 rake = Float.T(
2090 default=0.0,
2091 help='rake angle in [deg], '
2092 'measured counter-clockwise from right-horizontal '
2093 'in on-plane view')
2095 length = Float.T(
2096 default=0.,
2097 help='length of rectangular source area [m]')
2099 width = Float.T(
2100 default=0.,
2101 help='width of rectangular source area [m]')
2103 anchor = StringChoice.T(
2104 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2105 'bottom_left', 'bottom_right'],
2106 default='center',
2107 optional=True,
2108 help='Anchor point for positioning the plane, can be: ``top, center '
2109 'bottom, top_left, top_right,bottom_left,'
2110 'bottom_right, center_left, center right``.')
2112 nucleation_x = Float.T(
2113 optional=True,
2114 help='horizontal position of rupture nucleation in normalized fault '
2115 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)')
2117 nucleation_y = Float.T(
2118 optional=True,
2119 help='down-dip position of rupture nucleation in normalized fault '
2120 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)')
2122 velocity = Float.T(
2123 default=3500.,
2124 help='speed of rupture front [m/s]')
2126 slip = Float.T(
2127 optional=True,
2128 help='Slip on the rectangular source area [m]')
2130 opening_fraction = Float.T(
2131 default=0.,
2132 help='Determines fraction of slip related to opening. '
2133 '(``-1``: pure tensile closing, '
2134 '``0``: pure shear, '
2135 '``1``: pure tensile opening)')
2137 decimation_factor = Int.T(
2138 optional=True,
2139 default=1,
2140 help='Sub-source decimation factor, a larger decimation will'
2141 ' make the result inaccurate but shorten the necessary'
2142 ' computation time (use for testing puposes only).')
2144 aggressive_oversampling = Bool.T(
2145 default=False,
2146 help='Aggressive oversampling for basesource discretization. '
2147 "When using 'multilinear' interpolation oversampling has"
2148 ' practically no effect.')
2150 def __init__(self, **kwargs):
2151 if 'moment' in kwargs:
2152 mom = kwargs.pop('moment')
2153 if 'magnitude' not in kwargs:
2154 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
2156 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2158 def base_key(self):
2159 return SourceWithDerivedMagnitude.base_key(self) + (
2160 self.magnitude,
2161 self.slip,
2162 self.strike,
2163 self.dip,
2164 self.rake,
2165 self.length,
2166 self.width,
2167 self.nucleation_x,
2168 self.nucleation_y,
2169 self.velocity,
2170 self.decimation_factor,
2171 self.anchor)
2173 def check_conflicts(self):
2174 if self.magnitude is not None and self.slip is not None:
2175 raise DerivedMagnitudeError(
2176 'Magnitude and slip are both defined.')
2178 def get_magnitude(self, store=None, target=None):
2179 self.check_conflicts()
2180 if self.magnitude is not None:
2181 return self.magnitude
2183 elif self.slip is not None:
2184 if None in (store, target):
2185 raise DerivedMagnitudeError(
2186 'Magnitude for a rectangular source with slip defined '
2187 'can only be derived when earth model and target '
2188 'interpolation method are available.')
2190 amplitudes = self._discretize(store, target)[2]
2191 if amplitudes.ndim == 2:
2192 # CLVD component has no net moment, leave out
2193 return float(pmt.moment_to_magnitude(
2194 num.sum(num.abs(amplitudes[0:2, :]).sum())))
2195 else:
2196 return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
2198 else:
2199 return float(pmt.moment_to_magnitude(1.0))
2201 def get_factor(self):
2202 return 1.0
2204 def get_slip_tensile(self):
2205 return self.slip * self.opening_fraction
2207 def get_slip_shear(self):
2208 return self.slip - abs(self.get_slip_tensile)
2210 def _discretize(self, store, target):
2211 if self.nucleation_x is not None:
2212 nucx = self.nucleation_x * 0.5 * self.length
2213 else:
2214 nucx = None
2216 if self.nucleation_y is not None:
2217 nucy = self.nucleation_y * 0.5 * self.width
2218 else:
2219 nucy = None
2221 stf = self.effective_stf_pre()
2223 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
2224 store.config.deltas, store.config.deltat,
2225 self.time, self.north_shift, self.east_shift, self.depth,
2226 self.strike, self.dip, self.length, self.width, self.anchor,
2227 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
2228 decimation_factor=self.decimation_factor,
2229 aggressive_oversampling=self.aggressive_oversampling)
2231 if self.slip is not None:
2232 if target is not None:
2233 interpolation = target.interpolation
2234 else:
2235 interpolation = 'nearest_neighbor'
2236 logger.warning(
2237 'no target information available, will use '
2238 '"nearest_neighbor" interpolation when extracting shear '
2239 'modulus from earth model')
2241 shear_moduli = store.config.get_shear_moduli(
2242 self.lat, self.lon,
2243 points=points,
2244 interpolation=interpolation)
2246 tensile_slip = self.get_slip_tensile()
2247 shear_slip = self.slip - abs(tensile_slip)
2249 amplitudes_total = [shear_moduli * shear_slip]
2250 if tensile_slip != 0:
2251 bulk_moduli = store.config.get_bulk_moduli(
2252 self.lat, self.lon,
2253 points=points,
2254 interpolation=interpolation)
2256 tensile_iso = bulk_moduli * tensile_slip
2257 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
2259 amplitudes_total.extend([tensile_iso, tensile_clvd])
2261 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
2262 amplitudes * dl * dw
2264 else:
2265 # normalization to retain total moment
2266 amplitudes_norm = amplitudes / num.sum(amplitudes)
2267 moment = self.get_moment(store, target)
2269 amplitudes_total = [
2270 amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
2271 if self.opening_fraction != 0.:
2272 amplitudes_total.append(
2273 amplitudes_norm * self.opening_fraction * moment)
2275 amplitudes_total = num.vstack(amplitudes_total).squeeze()
2277 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw
2279 def discretize_basesource(self, store, target=None):
2281 points, times, amplitudes, dl, dw, nl, nw = self._discretize(
2282 store, target)
2284 mot = pmt.MomentTensor(
2285 strike=self.strike, dip=self.dip, rake=self.rake)
2286 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
2288 if amplitudes.ndim == 1:
2289 m6s[:, :] *= amplitudes[:, num.newaxis]
2290 elif amplitudes.ndim == 2:
2291 # shear MT components
2292 rotmat1 = pmt.euler_to_matrix(
2293 d2r * self.dip, d2r * self.strike, d2r * -self.rake)
2294 m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
2296 if amplitudes.shape[0] == 2:
2297 # tensile MT components - moment/magnitude input
2298 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
2299 rot_tensile = pmt.to6(
2300 num.dot(rotmat1.T, num.dot(tensile, rotmat1)))
2302 m6s_tensile = rot_tensile[
2303 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2304 m6s += m6s_tensile
2306 elif amplitudes.shape[0] == 3:
2307 # tensile MT components - slip input
2308 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
2309 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
2311 rot_iso = pmt.to6(
2312 num.dot(rotmat1.T, num.dot(iso, rotmat1)))
2313 rot_clvd = pmt.to6(
2314 num.dot(rotmat1.T, num.dot(clvd, rotmat1)))
2316 m6s_iso = rot_iso[
2317 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2318 m6s_clvd = rot_clvd[
2319 num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
2320 m6s += m6s_iso + m6s_clvd
2321 else:
2322 raise ValueError('Unknwown amplitudes shape!')
2323 else:
2324 raise ValueError(
2325 'Unexpected dimension of {}'.format(amplitudes.ndim))
2327 ds = meta.DiscretizedMTSource(
2328 lat=self.lat,
2329 lon=self.lon,
2330 times=times,
2331 north_shifts=points[:, 0],
2332 east_shifts=points[:, 1],
2333 depths=points[:, 2],
2334 m6s=m6s,
2335 dl=dl,
2336 dw=dw,
2337 nl=nl,
2338 nw=nw)
2340 return ds
2342 def xy_to_coord(self, x, y, cs='xyz'):
2343 ln, wd = self.length, self.width
2344 strike, dip = self.strike, self.dip
2346 def array_check(variable):
2347 if not isinstance(variable, num.ndarray):
2348 return num.array(variable)
2349 else:
2350 return variable
2352 x, y = array_check(x), array_check(y)
2354 if x.shape[0] != y.shape[0]:
2355 raise ValueError('Shapes of x and y mismatch')
2357 x, y = x * 0.5 * ln, y * 0.5 * wd
2359 points = num.hstack((
2360 x.reshape(-1, 1), y.reshape(-1, 1), num.zeros((x.shape[0], 1))))
2362 anch_x, anch_y = map_anchor[self.anchor]
2363 points[:, 0] -= anch_x * 0.5 * ln
2364 points[:, 1] -= anch_y * 0.5 * wd
2366 rotmat = num.asarray(
2367 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
2369 points_rot = num.dot(rotmat.T, points.T).T
2371 points_rot[:, 0] += self.north_shift
2372 points_rot[:, 1] += self.east_shift
2373 points_rot[:, 2] += self.depth
2375 if cs == 'xyz':
2376 return points_rot
2377 elif cs == 'xy':
2378 return points_rot[:, :2]
2379 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2380 latlon = ne_to_latlon(
2381 self.lat, self.lon, points_rot[:, 0], points_rot[:, 1])
2382 latlon = num.array(latlon).T
2383 if cs == 'latlon':
2384 return latlon
2385 elif cs == 'lonlat':
2386 return latlon[:, ::-1]
2387 else:
2388 return num.concatenate(
2389 (latlon, points_rot[:, 2].reshape((len(points_rot), 1))),
2390 axis=1)
2392 def outline(self, cs='xyz'):
2393 x = num.array([-1., 1., 1., -1., -1.])
2394 y = num.array([-1., -1., 1., 1., -1.])
2396 return self.xy_to_coord(x, y, cs=cs)
2398 def points_on_source(self, cs='xyz', **kwargs):
2400 points = points_on_rect_source(
2401 self.strike, self.dip, self.length, self.width,
2402 self.anchor, **kwargs)
2404 points[:, 0] += self.north_shift
2405 points[:, 1] += self.east_shift
2406 points[:, 2] += self.depth
2407 if cs == 'xyz':
2408 return points
2409 elif cs == 'xy':
2410 return points[:, :2]
2411 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2412 latlon = ne_to_latlon(
2413 self.lat, self.lon, points[:, 0], points[:, 1])
2415 latlon = num.array(latlon).T
2416 if cs == 'latlon':
2417 return latlon
2418 elif cs == 'lonlat':
2419 return latlon[:, ::-1]
2420 else:
2421 return num.concatenate(
2422 (latlon, points[:, 2].reshape((len(points), 1))),
2423 axis=1)
2425 def geometry(self, *args, **kwargs):
2426 from pyrocko.model import Geometry
2428 ds = self.discretize_basesource(*args, **kwargs)
2429 nx, ny = ds.nl, ds.nw
2431 def patch_outlines_xy(nx, ny):
2432 points = num.zeros((nx * ny, 2))
2433 points[:, 0] = num.tile(num.linspace(-1., 1., nx), ny)
2434 points[:, 1] = num.repeat(num.linspace(-1., 1., ny), nx)
2436 return points
2438 points_ds = patch_outlines_xy(nx + 1, ny + 1)
2439 npoints = (nx + 1) * (ny + 1)
2441 vertices = num.hstack((
2442 num.ones((npoints, 1)) * self.lat,
2443 num.ones((npoints, 1)) * self.lon,
2444 self.xy_to_coord(points_ds[:, 0], points_ds[:, 1], cs='xyz')))
2446 faces = num.array([[
2447 iy * (nx + 1) + ix,
2448 iy * (nx + 1) + ix + 1,
2449 (iy + 1) * (nx + 1) + ix + 1,
2450 (iy + 1) * (nx + 1) + ix,
2451 iy * (nx + 1) + ix]
2452 for iy in range(ny) for ix in range(nx)])
2454 xyz = self.outline('xyz')
2455 latlon = num.ones((5, 2)) * num.array([self.lat, self.lon])
2456 patchverts = num.hstack((latlon, xyz))
2458 geom = Geometry()
2459 geom.setup(vertices, faces)
2460 geom.set_outlines([patchverts])
2462 if self.stf:
2463 geom.times = num.unique(ds.times)
2465 if self.nucleation_x is not None and self.nucleation_y is not None:
2466 geom.add_property('t_arrival', ds.times)
2468 geom.add_property(
2469 'moment', ds.moments().reshape(ds.nl*ds.nw, -1))
2471 geom.add_property(
2472 'slip', num.ones_like(ds.times) * self.slip)
2474 return geom
2476 def get_nucleation_abs_coord(self, cs='xy'):
2478 if self.nucleation_x is None:
2479 return None, None
2481 coords = from_plane_coords(self.strike, self.dip, self.length,
2482 self.width, self.depth, self.nucleation_x,
2483 self.nucleation_y, lat=self.lat,
2484 lon=self.lon, north_shift=self.north_shift,
2485 east_shift=self.east_shift, cs=cs)
2486 return coords
2488 def pyrocko_moment_tensor(self, store=None, target=None):
2489 return pmt.MomentTensor(
2490 strike=self.strike,
2491 dip=self.dip,
2492 rake=self.rake,
2493 scalar_moment=self.get_moment(store, target))
2495 def pyrocko_event(self, store=None, target=None, **kwargs):
2496 return SourceWithDerivedMagnitude.pyrocko_event(
2497 self, store, target,
2498 **kwargs)
2500 @classmethod
2501 def from_pyrocko_event(cls, ev, **kwargs):
2502 d = {}
2503 mt = ev.moment_tensor
2504 if mt:
2505 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2506 d.update(
2507 strike=float(strike),
2508 dip=float(dip),
2509 rake=float(rake),
2510 magnitude=float(mt.moment_magnitude()))
2512 d.update(kwargs)
2513 return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
2516class PseudoDynamicRupture(SourceWithDerivedMagnitude):
2517 '''
2518 Combined Eikonal and Okada quasi-dynamic rupture model.
2520 Details are described in :doc:`/topics/pseudo-dynamic-rupture`.
2521 Note: attribute `stf` is not used so far, but kept for future applications.
2522 '''
2524 discretized_source_class = meta.DiscretizedMTSource
2526 strike = Float.T(
2527 default=0.0,
2528 help='Strike direction in [deg], measured clockwise from north.')
2530 dip = Float.T(
2531 default=0.0,
2532 help='Dip angle in [deg], measured downward from horizontal.')
2534 length = Float.T(
2535 default=10. * km,
2536 help='Length of rectangular source area in [m].')
2538 width = Float.T(
2539 default=5. * km,
2540 help='Width of rectangular source area in [m].')
2542 anchor = StringChoice.T(
2543 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2544 'bottom_left', 'bottom_right'],
2545 default='center',
2546 optional=True,
2547 help='Anchor point for positioning the plane, can be: ``top, center, '
2548 'bottom, top_left, top_right, bottom_left, '
2549 'bottom_right, center_left, center_right``.')
2551 nucleation_x__ = Array.T(
2552 default=num.array([0.]),
2553 dtype=num.float64,
2554 serialize_as='list',
2555 help='Horizontal position of rupture nucleation in normalized fault '
2556 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).')
2558 nucleation_y__ = Array.T(
2559 default=num.array([0.]),
2560 dtype=num.float64,
2561 serialize_as='list',
2562 help='Down-dip position of rupture nucleation in normalized fault '
2563 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).')
2565 nucleation_time__ = Array.T(
2566 optional=True,
2567 help='Time in [s] after origin, when nucleation points defined by '
2568 '``nucleation_x`` and ``nucleation_y`` rupture.',
2569 dtype=num.float64,
2570 serialize_as='list')
2572 gamma = Float.T(
2573 default=0.8,
2574 help='Scaling factor between rupture velocity and S-wave velocity: '
2575 r':math:`v_r = \gamma * v_s`.')
2577 nx = Int.T(
2578 default=2,
2579 help='Number of discrete source patches in x direction (along '
2580 'strike).')
2582 ny = Int.T(
2583 default=2,
2584 help='Number of discrete source patches in y direction (down dip).')
2586 slip = Float.T(
2587 optional=True,
2588 help='Maximum slip of the rectangular source [m]. '
2589 'Setting the slip the tractions/stress field '
2590 'will be normalized to accomodate the desired maximum slip.')
2592 rake = Float.T(
2593 optional=True,
2594 help='Rake angle in [deg], '
2595 'measured counter-clockwise from right-horizontal '
2596 'in on-plane view. Rake is translated into homogenous tractions '
2597 'in strike and up-dip direction. ``rake`` is mutually exclusive '
2598 'with tractions parameter.')
2600 patches = List.T(
2601 OkadaSource.T(),
2602 optional=True,
2603 help='List of all boundary elements/sub faults/fault patches.')
2605 patch_mask__ = Array.T(
2606 dtype=bool,
2607 serialize_as='list',
2608 shape=(None,),
2609 optional=True,
2610 help='Mask for all boundary elements/sub faults/fault patches. True '
2611 'leaves the patch in the calculation, False excludes the patch.')
2613 tractions = TractionField.T(
2614 optional=True,
2615 help='Traction field the rupture plane is exposed to. See the '
2616 ':py:mod:`pyrocko.gf.tractions` module for more details. '
2617 'If ``tractions=None`` and ``rake`` is given'
2618 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will'
2619 ' be used.')
2621 coef_mat = Array.T(
2622 optional=True,
2623 help='Coefficient matrix linking traction and dislocation field.',
2624 dtype=num.float64,
2625 shape=(None, None))
2627 eikonal_decimation = Int.T(
2628 optional=True,
2629 default=1,
2630 help='Sub-source eikonal factor, a smaller eikonal factor will'
2631 ' increase the accuracy of rupture front calculation but'
2632 ' increases also the computation time.')
2634 decimation_factor = Int.T(
2635 optional=True,
2636 default=1,
2637 help='Sub-source decimation factor, a larger decimation will'
2638 ' make the result inaccurate but shorten the necessary'
2639 ' computation time (use for testing puposes only).')
2641 nthreads = Int.T(
2642 optional=True,
2643 default=1,
2644 help='Number of threads for Okada forward modelling, '
2645 'matrix inversion and calculation of point subsources. '
2646 'Note: for small/medium matrices 1 thread is most efficient.')
2648 pure_shear = Bool.T(
2649 optional=True,
2650 default=False,
2651 help='Calculate only shear tractions and omit tensile tractions.')
2653 smooth_rupture = Bool.T(
2654 default=True,
2655 help='Smooth the tractions by weighting partially ruptured'
2656 ' fault patches.')
2658 aggressive_oversampling = Bool.T(
2659 default=False,
2660 help='Aggressive oversampling for basesource discretization. '
2661 "When using 'multilinear' interpolation oversampling has"
2662 ' practically no effect.')
2664 def __init__(self, **kwargs):
2665 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2666 self._interpolators = {}
2667 self.check_conflicts()
2669 @property
2670 def nucleation_x(self):
2671 return self.nucleation_x__
2673 @nucleation_x.setter
2674 def nucleation_x(self, nucleation_x):
2675 if isinstance(nucleation_x, list):
2676 nucleation_x = num.array(nucleation_x)
2678 elif not isinstance(
2679 nucleation_x, num.ndarray) and nucleation_x is not None:
2681 nucleation_x = num.array([nucleation_x])
2682 self.nucleation_x__ = nucleation_x
2684 @property
2685 def nucleation_y(self):
2686 return self.nucleation_y__
2688 @nucleation_y.setter
2689 def nucleation_y(self, nucleation_y):
2690 if isinstance(nucleation_y, list):
2691 nucleation_y = num.array(nucleation_y)
2693 elif not isinstance(nucleation_y, num.ndarray) \
2694 and nucleation_y is not None:
2695 nucleation_y = num.array([nucleation_y])
2697 self.nucleation_y__ = nucleation_y
2699 @property
2700 def nucleation(self):
2701 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2703 if (nucl_x is None) or (nucl_y is None):
2704 return None
2706 assert nucl_x.shape[0] == nucl_y.shape[0]
2708 return num.concatenate(
2709 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1)
2711 @nucleation.setter
2712 def nucleation(self, nucleation):
2713 if isinstance(nucleation, list):
2714 nucleation = num.array(nucleation)
2716 assert nucleation.shape[1] == 2
2718 self.nucleation_x = nucleation[:, 0]
2719 self.nucleation_y = nucleation[:, 1]
2721 @property
2722 def nucleation_time(self):
2723 return self.nucleation_time__
2725 @nucleation_time.setter
2726 def nucleation_time(self, nucleation_time):
2727 if not isinstance(nucleation_time, num.ndarray) \
2728 and nucleation_time is not None:
2729 nucleation_time = num.array([nucleation_time])
2731 self.nucleation_time__ = nucleation_time
2733 @property
2734 def patch_mask(self):
2735 if (self.patch_mask__ is not None and
2736 self.patch_mask__.shape == (self.nx * self.ny,)):
2738 return self.patch_mask__
2739 else:
2740 return num.ones(self.nx * self.ny, dtype=bool)
2742 @patch_mask.setter
2743 def patch_mask(self, patch_mask):
2744 if isinstance(patch_mask, list):
2745 patch_mask = num.array(patch_mask)
2747 self.patch_mask__ = patch_mask
2749 def get_tractions(self):
2750 '''
2751 Get source traction vectors.
2753 If :py:attr:`rake` is given, unit length directed traction vectors
2754 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned,
2755 else the given :py:attr:`tractions` are used.
2757 :returns:
2758 Traction vectors per patch.
2759 :rtype:
2760 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``.
2761 '''
2763 if self.rake is not None:
2764 if num.isnan(self.rake):
2765 raise ValueError('Rake must be a real number, not NaN.')
2767 logger.warning(
2768 'Tractions are derived based on the given source rake.')
2769 tractions = DirectedTractions(rake=self.rake)
2770 else:
2771 tractions = self.tractions
2772 return tractions.get_tractions(self.nx, self.ny, self.patches)
2774 def get_scaled_tractions(self, store):
2775 '''
2776 Get traction vectors rescaled to given slip.
2778 Opposing to :py:meth:`get_tractions` traction vectors
2779 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are rescaled to
2780 the given :py:attr:`slip` before returning. If no :py:attr:`slip` and
2781 :py:attr:`rake` are provided, the given :py:attr:`tractions` are
2782 returned without scaling.
2784 :param store:
2785 Green's function database (needs to cover whole region of the
2786 source).
2787 :type store:
2788 :py:class:`~pyrocko.gf.store.Store`
2790 :returns:
2791 Traction vectors per patch.
2792 :rtype:
2793 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``.
2794 '''
2795 tractions = self.tractions
2796 factor = 1.
2798 if self.rake is not None and self.slip is not None:
2799 if num.isnan(self.rake):
2800 raise ValueError('Rake must be a real number, not NaN.')
2802 self.discretize_patches(store)
2803 slip_0t = max(num.linalg.norm(
2804 self.get_slip(scale_slip=False),
2805 axis=1))
2807 factor = self.slip / slip_0t
2808 tractions = DirectedTractions(rake=self.rake)
2810 return tractions.get_tractions(self.nx, self.ny, self.patches) * factor
2812 def base_key(self):
2813 return SourceWithDerivedMagnitude.base_key(self) + (
2814 self.slip,
2815 self.strike,
2816 self.dip,
2817 self.rake,
2818 self.length,
2819 self.width,
2820 float(self.nucleation_x.mean()),
2821 float(self.nucleation_y.mean()),
2822 self.decimation_factor,
2823 self.anchor,
2824 self.pure_shear,
2825 self.gamma,
2826 tuple(self.patch_mask))
2828 def check_conflicts(self):
2829 if self.tractions and self.rake:
2830 raise AttributeError(
2831 'Tractions and rake are mutually exclusive.')
2832 if self.tractions is None and self.rake is None:
2833 self.rake = 0.
2835 def get_magnitude(self, store=None, target=None):
2836 self.check_conflicts()
2837 if self.slip is not None or self.tractions is not None:
2838 if store is None:
2839 raise DerivedMagnitudeError(
2840 'Magnitude for a rectangular source with slip or '
2841 'tractions defined can only be derived when earth model '
2842 'is set.')
2844 moment_rate, calc_times = self.discretize_basesource(
2845 store, target=target).get_moment_rate(store.config.deltat)
2847 deltat = num.concatenate((
2848 (num.diff(calc_times)[0],),
2849 num.diff(calc_times)))
2851 return float(pmt.moment_to_magnitude(
2852 num.sum(moment_rate * deltat)))
2854 else:
2855 return float(pmt.moment_to_magnitude(1.0))
2857 def get_factor(self):
2858 return 1.0
2860 def outline(self, cs='xyz'):
2861 '''
2862 Get source outline corner coordinates.
2864 :param cs:
2865 :ref:`Output coordinate system <coordinate-system-names>`.
2866 :type cs:
2867 optional, str
2869 :returns:
2870 Corner points in desired coordinate system.
2871 :rtype:
2872 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``.
2873 '''
2874 points = outline_rect_source(self.strike, self.dip, self.length,
2875 self.width, self.anchor)
2877 points[:, 0] += self.north_shift
2878 points[:, 1] += self.east_shift
2879 points[:, 2] += self.depth
2880 if cs == 'xyz':
2881 return points
2882 elif cs == 'xy':
2883 return points[:, :2]
2884 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2885 latlon = ne_to_latlon(
2886 self.lat, self.lon, points[:, 0], points[:, 1])
2888 latlon = num.array(latlon).T
2889 if cs == 'latlon':
2890 return latlon
2891 elif cs == 'lonlat':
2892 return latlon[:, ::-1]
2893 else:
2894 return num.concatenate(
2895 (latlon, points[:, 2].reshape((len(points), 1))),
2896 axis=1)
2898 def points_on_source(self, cs='xyz', **kwargs):
2899 '''
2900 Convert relative plane coordinates to geographical coordinates.
2902 Given x and y coordinates (relative source coordinates between -1.
2903 and 1.) are converted to desired geographical coordinates. Coordinates
2904 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x``
2905 and ``points_y``.
2907 :param cs:
2908 :ref:`Output coordinate system <coordinate-system-names>`.
2909 :type cs:
2910 optional, str
2912 :returns:
2913 Point coordinates in desired coordinate system.
2914 :rtype:
2915 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``.
2916 '''
2917 points = points_on_rect_source(
2918 self.strike, self.dip, self.length, self.width,
2919 self.anchor, **kwargs)
2921 points[:, 0] += self.north_shift
2922 points[:, 1] += self.east_shift
2923 points[:, 2] += self.depth
2924 if cs == 'xyz':
2925 return points
2926 elif cs == 'xy':
2927 return points[:, :2]
2928 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2929 latlon = ne_to_latlon(
2930 self.lat, self.lon, points[:, 0], points[:, 1])
2932 latlon = num.array(latlon).T
2933 if cs == 'latlon':
2934 return latlon
2935 elif cs == 'lonlat':
2936 return latlon[:, ::-1]
2937 else:
2938 return num.concatenate(
2939 (latlon, points[:, 2].reshape((len(points), 1))),
2940 axis=1)
2942 def pyrocko_moment_tensor(self, store=None, target=None):
2943 if store is not None:
2944 if not self.patches:
2945 self.discretize_patches(store)
2947 data = self.get_slip()
2948 else:
2949 data = self.get_tractions()
2951 weights = num.linalg.norm(data, axis=1)
2952 weights /= weights.sum()
2954 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d
2955 rake = num.average(rakes, weights=weights)
2957 return pmt.MomentTensor(
2958 strike=self.strike,
2959 dip=self.dip,
2960 rake=rake,
2961 scalar_moment=self.get_moment(store, target))
2963 def pyrocko_event(self, store=None, target=None, **kwargs):
2964 return SourceWithDerivedMagnitude.pyrocko_event(
2965 self, store, target,
2966 **kwargs)
2968 @classmethod
2969 def from_pyrocko_event(cls, ev, **kwargs):
2970 d = {}
2971 mt = ev.moment_tensor
2972 if mt:
2973 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2974 d.update(
2975 strike=float(strike),
2976 dip=float(dip),
2977 rake=float(rake))
2979 d.update(kwargs)
2980 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d)
2982 def _discretize_points(self, store, *args, **kwargs):
2983 '''
2984 Discretize source plane with equal vertical and horizontal spacing.
2986 Additional ``*args`` and ``**kwargs`` are passed to
2987 :py:meth:`points_on_source`.
2989 :param store:
2990 Green's function database (needs to cover whole region of the
2991 source).
2992 :type store:
2993 :py:class:`~pyrocko.gf.store.Store`
2995 :returns:
2996 Number of points in strike and dip direction, distance
2997 between adjacent points, coordinates (latlondepth) and coordinates
2998 (xy on fault) for discrete points.
2999 :rtype:
3000 (int, int, float, :py:class:`~numpy.ndarray`,
3001 :py:class:`~numpy.ndarray`).
3002 '''
3003 anch_x, anch_y = map_anchor[self.anchor]
3005 npoints = int(self.width // km) + 1
3006 points = num.zeros((npoints, 3))
3007 points[:, 1] = num.linspace(-1., 1., npoints)
3008 points[:, 1] = (points[:, 1] - anch_y) * self.width/2
3010 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)
3011 points = num.dot(rotmat.T, points.T).T
3012 points[:, 2] += self.depth
3014 vs_min = store.config.get_vs(
3015 self.lat, self.lon, points,
3016 interpolation='nearest_neighbor')
3017 vr_min = max(vs_min.min(), .5*km) * self.gamma
3019 oversampling = 10.
3020 delta_l = self.length / (self.nx * oversampling)
3021 delta_w = self.width / (self.ny * oversampling)
3023 delta = self.eikonal_decimation * num.min([
3024 store.config.deltat * vr_min / oversampling,
3025 delta_l, delta_w] + [
3026 deltas for deltas in store.config.deltas])
3028 delta = delta_w / num.ceil(delta_w / delta)
3030 nx = int(num.ceil(self.length / delta)) + 1
3031 ny = int(num.ceil(self.width / delta)) + 1
3033 rem_l = (nx-1)*delta - self.length
3034 lim_x = rem_l / self.length
3036 points_xy = num.zeros((nx * ny, 2))
3037 points_xy[:, 0] = num.repeat(
3038 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny)
3039 points_xy[:, 1] = num.tile(
3040 num.linspace(-1., 1., ny), nx)
3042 points = self.points_on_source(
3043 points_x=points_xy[:, 0],
3044 points_y=points_xy[:, 1],
3045 **kwargs)
3047 return nx, ny, delta, points, points_xy
3049 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor',
3050 points=None):
3051 '''
3052 Get rupture velocity for discrete points on source plane.
3054 :param store:
3055 Green's function database (needs to cover the whole region of the
3056 source)
3057 :type store:
3058 optional, :py:class:`~pyrocko.gf.store.Store`
3060 :param interpolation:
3061 Interpolation method to use (choose between ``'nearest_neighbor'``
3062 and ``'multilinear'``).
3063 :type interpolation:
3064 optional, str
3066 :param points:
3067 Coordinates on fault (-1.:1.) of discrete points.
3068 :type points:
3069 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)``
3071 :returns:
3072 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete
3073 points.
3074 :rtype:
3075 :py:class:`~numpy.ndarray`: ``(n_points, )``.
3076 '''
3078 if points is None:
3079 _, _, _, points, _ = self._discretize_points(store, cs='xyz')
3081 return store.config.get_vs(
3082 self.lat, self.lon,
3083 points=points,
3084 interpolation=interpolation) * self.gamma
3086 def discretize_time(
3087 self, store, interpolation='nearest_neighbor',
3088 vr=None, times=None, *args, **kwargs):
3089 '''
3090 Get rupture start time for discrete points on source plane.
3092 :param store:
3093 Green's function database (needs to cover whole region of the
3094 source)
3095 :type store:
3096 :py:class:`~pyrocko.gf.store.Store`
3098 :param interpolation:
3099 Interpolation method to use (choose between ``'nearest_neighbor'``
3100 and ``'multilinear'``).
3101 :type interpolation:
3102 optional, str
3104 :param vr:
3105 Array, containing rupture user defined rupture velocity values.
3106 :type vr:
3107 optional, :py:class:`~numpy.ndarray`
3109 :param times:
3110 Array, containing zeros, where rupture is starting, real positive
3111 numbers at later secondary nucleation points and -1, where time
3112 will be calculated. If not given, rupture starts at nucleation_x,
3113 nucleation_y. Times are given for discrete points with equal
3114 horizontal and vertical spacing.
3115 :type times:
3116 optional, :py:class:`~numpy.ndarray`
3118 :returns:
3119 Coordinates (latlondepth), coordinates (xy), rupture velocity,
3120 rupture propagation time of discrete points.
3121 :rtype:
3122 :py:class:`~numpy.ndarray`: ``(n_points, 3)``,
3123 :py:class:`~numpy.ndarray`: ``(n_points, 2)``,
3124 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``,
3125 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``.
3126 '''
3127 nx, ny, delta, points, points_xy = self._discretize_points(
3128 store, cs='xyz')
3130 if vr is None or vr.shape != tuple((nx, ny)):
3131 if vr:
3132 logger.warning(
3133 'Given rupture velocities are not in right shape: '
3134 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny))
3135 vr = self._discretize_rupture_v(store, interpolation, points)\
3136 .reshape(nx, ny)
3138 if vr.shape != tuple((nx, ny)):
3139 logger.warning(
3140 'Given rupture velocities are not in right shape. Therefore'
3141 ' standard rupture velocity array is used.')
3143 def initialize_times():
3144 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
3146 if nucl_x.shape != nucl_y.shape:
3147 raise ValueError(
3148 'Nucleation coordinates have different shape.')
3150 dist_points = num.array([
3151 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1)
3152 for x, y in zip(nucl_x, nucl_y)])
3153 nucl_indices = num.argmin(dist_points, axis=1)
3155 if self.nucleation_time is None:
3156 nucl_times = num.zeros_like(nucl_indices)
3157 else:
3158 if self.nucleation_time.shape == nucl_x.shape:
3159 nucl_times = self.nucleation_time
3160 else:
3161 raise ValueError(
3162 'Nucleation coordinates and times have different '
3163 'shapes')
3165 t = num.full(nx * ny, -1.)
3166 t[nucl_indices] = nucl_times
3167 return t.reshape(nx, ny)
3169 if times is None:
3170 times = initialize_times()
3171 elif times.shape != tuple((nx, ny)):
3172 times = initialize_times()
3173 logger.warning(
3174 'Given times are not in right shape. Therefore standard time'
3175 ' array is used.')
3177 eikonal_ext.eikonal_solver_fmm_cartesian(
3178 speeds=vr, times=times, delta=delta)
3180 return points, points_xy, vr, times
3182 def get_vr_time_interpolators(
3183 self, store, interpolation='nearest_neighbor', force=False,
3184 **kwargs):
3185 '''
3186 Get interpolators for rupture velocity and rupture time.
3188 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3190 :param store:
3191 Green's function database (needs to cover whole region of the
3192 source).
3193 :type store:
3194 :py:class:`~pyrocko.gf.store.Store`
3196 :param interpolation:
3197 Interpolation method to use (choose between ``'nearest_neighbor'``
3198 and ``'multilinear'``).
3199 :type interpolation:
3200 optional, str
3202 :param force:
3203 Force recalculation of the interpolators (e.g. after change of
3204 nucleation point locations/times). Default is ``False``.
3205 :type force:
3206 optional, bool
3207 '''
3208 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'}
3209 if interpolation not in interp_map:
3210 raise TypeError(
3211 'Interpolation method %s not available' % interpolation)
3213 if not self._interpolators.get(interpolation, False) or force:
3214 _, points_xy, vr, times = self.discretize_time(
3215 store, **kwargs)
3217 if self.length <= 0.:
3218 raise ValueError(
3219 'length must be larger then 0. not %g' % self.length)
3221 if self.width <= 0.:
3222 raise ValueError(
3223 'width must be larger then 0. not %g' % self.width)
3225 nx, ny = times.shape
3226 anch_x, anch_y = map_anchor[self.anchor]
3228 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2.
3229 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2.
3231 ascont = num.ascontiguousarray
3233 self._interpolators[interpolation] = (
3234 nx, ny, times, vr,
3235 RegularGridInterpolator(
3236 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])),
3237 times,
3238 method=interp_map[interpolation]),
3239 RegularGridInterpolator(
3240 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])),
3241 vr,
3242 method=interp_map[interpolation]))
3244 return self._interpolators[interpolation]
3246 def discretize_patches(
3247 self, store, interpolation='nearest_neighbor', force=False,
3248 grid_shape=(),
3249 **kwargs):
3250 '''
3251 Get rupture start time and OkadaSource elements for points on rupture.
3253 All source elements and their corresponding center points are
3254 calculated and stored in the :py:attr:`patches` attribute.
3256 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3258 :param store:
3259 Green's function database (needs to cover whole region of the
3260 source).
3261 :type store:
3262 :py:class:`~pyrocko.gf.store.Store`
3264 :param interpolation:
3265 Interpolation method to use (choose between ``'nearest_neighbor'``
3266 and ``'multilinear'``).
3267 :type interpolation:
3268 optional, str
3270 :param force:
3271 Force recalculation of the vr and time interpolators ( e.g. after
3272 change of nucleation point locations/times). Default is ``False``.
3273 :type force:
3274 optional, bool
3276 :param grid_shape:
3277 Desired sub fault patch grid size (nlength, nwidth). Either factor
3278 or grid_shape should be set.
3279 :type grid_shape:
3280 optional, tuple of int
3281 '''
3282 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3283 self.get_vr_time_interpolators(
3284 store,
3285 interpolation=interpolation, force=force, **kwargs)
3286 anch_x, anch_y = map_anchor[self.anchor]
3288 al = self.length / 2.
3289 aw = self.width / 2.
3290 al1 = -(al + anch_x * al)
3291 al2 = al - anch_x * al
3292 aw1 = -aw + anch_y * aw
3293 aw2 = aw + anch_y * aw
3294 assert num.abs([al1, al2]).sum() == self.length
3295 assert num.abs([aw1, aw2]).sum() == self.width
3297 def get_lame(*a, **kw):
3298 shear_mod = store.config.get_shear_moduli(*a, **kw)
3299 lamb = store.config.get_vp(*a, **kw)**2 \
3300 * store.config.get_rho(*a, **kw) - 2. * shear_mod
3301 return shear_mod, lamb / (2. * (lamb + shear_mod))
3303 shear_mod, poisson = get_lame(
3304 self.lat, self.lon,
3305 num.array([[self.north_shift, self.east_shift, self.depth]]),
3306 interpolation=interpolation)
3308 okada_src = OkadaSource(
3309 lat=self.lat, lon=self.lon,
3310 strike=self.strike, dip=self.dip,
3311 north_shift=self.north_shift, east_shift=self.east_shift,
3312 depth=self.depth,
3313 al1=al1, al2=al2, aw1=aw1, aw2=aw2,
3314 poisson=poisson.mean(),
3315 shearmod=shear_mod.mean(),
3316 opening=kwargs.get('opening', 0.))
3318 if not (self.nx and self.ny):
3319 if grid_shape:
3320 self.nx, self.ny = grid_shape
3321 else:
3322 self.nx = nx
3323 self.ny = ny
3325 source_disc, source_points = okada_src.discretize(self.nx, self.ny)
3327 shear_mod, poisson = get_lame(
3328 self.lat, self.lon,
3329 num.array([src.source_patch()[:3] for src in source_disc]),
3330 interpolation=interpolation)
3332 if (self.nx, self.ny) != (nx, ny):
3333 times_interp = time_interpolator(
3334 num.ascontiguousarray(source_points[:, :2]))
3335 vr_interp = vr_interpolator(
3336 num.ascontiguousarray(source_points[:, :2]))
3337 else:
3338 times_interp = times.T.ravel()
3339 vr_interp = vr.T.ravel()
3341 for isrc, src in enumerate(source_disc):
3342 src.vr = vr_interp[isrc]
3343 src.time = times_interp[isrc] + self.time
3345 self.patches = source_disc
3347 def discretize_basesource(self, store, target=None):
3348 '''
3349 Prepare source for synthetic waveform calculation.
3351 :param store:
3352 Green's function database (needs to cover whole region of the
3353 source).
3354 :type store:
3355 :py:class:`~pyrocko.gf.store.Store`
3357 :param target:
3358 Target information.
3359 :type target:
3360 optional, :py:class:`~pyrocko.gf.targets.Target`
3362 :returns:
3363 Source discretized by a set of moment tensors and times.
3364 :rtype:
3365 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource`
3366 '''
3367 if not target:
3368 interpolation = 'nearest_neighbor'
3369 else:
3370 interpolation = target.interpolation
3372 if not self.patches:
3373 self.discretize_patches(store, interpolation)
3375 if self.coef_mat is None:
3376 self.calc_coef_mat()
3378 delta_slip, slip_times = self.get_delta_slip(store)
3379 npatches = self.nx * self.ny
3380 ntimes = slip_times.size
3382 anch_x, anch_y = map_anchor[self.anchor]
3384 pln = self.length / self.nx
3385 pwd = self.width / self.ny
3387 patch_coords = num.array([
3388 (p.ix, p.iy)
3389 for p in self.patches]).reshape(self.nx, self.ny, 2)
3391 # boundary condition is zero-slip
3392 # is not valid to avoid unwished interpolation effects
3393 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3))
3394 slip_grid[1:-1, 1:-1, :, :] = \
3395 delta_slip.reshape(self.nx, self.ny, ntimes, 3)
3397 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :]
3398 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :]
3399 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :]
3400 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :]
3402 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :]
3403 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :]
3404 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :]
3405 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :]
3407 def make_grid(patch_parameter):
3408 grid = num.zeros((self.nx + 2, self.ny + 2))
3409 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny)
3411 grid[0, 0] = grid[1, 1]
3412 grid[0, -1] = grid[1, -2]
3413 grid[-1, 0] = grid[-2, 1]
3414 grid[-1, -1] = grid[-2, -2]
3416 grid[1:-1, 0] = grid[1:-1, 1]
3417 grid[1:-1, -1] = grid[1:-1, -2]
3418 grid[0, 1:-1] = grid[1, 1:-1]
3419 grid[-1, 1:-1] = grid[-2, 1:-1]
3421 return grid
3423 lamb = self.get_patch_attribute('lamb')
3424 mu = self.get_patch_attribute('shearmod')
3426 lamb_grid = make_grid(lamb)
3427 mu_grid = make_grid(mu)
3429 coords_x = num.zeros(self.nx + 2)
3430 coords_x[1:-1] = patch_coords[:, 0, 0]
3431 coords_x[0] = coords_x[1] - pln / 2
3432 coords_x[-1] = coords_x[-2] + pln / 2
3434 coords_y = num.zeros(self.ny + 2)
3435 coords_y[1:-1] = patch_coords[0, :, 1]
3436 coords_y[0] = coords_y[1] - pwd / 2
3437 coords_y[-1] = coords_y[-2] + pwd / 2
3439 slip_interp = RegularGridInterpolator(
3440 (coords_x, coords_y, slip_times),
3441 slip_grid, method='nearest')
3443 lamb_interp = RegularGridInterpolator(
3444 (coords_x, coords_y),
3445 lamb_grid, method='nearest')
3447 mu_interp = RegularGridInterpolator(
3448 (coords_x, coords_y),
3449 mu_grid, method='nearest')
3451 # discretize basesources
3452 mindeltagf = min(tuple(
3453 (self.length / self.nx, self.width / self.ny) +
3454 tuple(store.config.deltas)))
3456 nl = int((1. / self.decimation_factor) *
3457 num.ceil(pln / mindeltagf)) + 1
3458 nw = int((1. / self.decimation_factor) *
3459 num.ceil(pwd / mindeltagf)) + 1
3460 nsrc_patch = int(nl * nw)
3461 dl = pln / nl
3462 dw = pwd / nw
3464 patch_area = dl * dw
3466 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl)
3467 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw)
3469 base_coords = num.zeros((nsrc_patch, 3))
3470 base_coords[:, 0] = num.tile(xl, nw)
3471 base_coords[:, 1] = num.repeat(xw, nl)
3472 base_coords = num.tile(base_coords, (npatches, 1))
3474 center_coords = num.zeros((npatches, 3))
3475 center_coords[:, 0] = num.repeat(
3476 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2
3477 center_coords[:, 1] = num.tile(
3478 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2
3480 base_coords -= center_coords.repeat(nsrc_patch, axis=0)
3481 nbaselocs = base_coords.shape[0]
3483 base_interp = base_coords.repeat(ntimes, axis=0)
3485 base_times = num.tile(slip_times, nbaselocs)
3486 base_interp[:, 0] -= anch_x * self.length / 2
3487 base_interp[:, 1] -= anch_y * self.width / 2
3488 base_interp[:, 2] = base_times
3490 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3491 store, interpolation=interpolation)
3493 time_eikonal_max = time_interpolator.values.max()
3495 nbasesrcs = base_interp.shape[0]
3496 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3)
3497 lamb = lamb_interp(base_interp[:, :2]).ravel()
3498 mu = mu_interp(base_interp[:, :2]).ravel()
3500 if False:
3501 try:
3502 import matplotlib.pyplot as plt
3503 coords = base_coords.copy()
3504 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1)
3505 plt.scatter(coords[:, 0], coords[:, 1], c=norm)
3506 plt.show()
3507 except AttributeError:
3508 pass
3510 base_interp[:, 2] = 0.
3511 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
3512 base_interp = num.dot(rotmat.T, base_interp.T).T
3513 base_interp[:, 0] += self.north_shift
3514 base_interp[:, 1] += self.east_shift
3515 base_interp[:, 2] += self.depth
3517 slip_strike = delta_slip[:, :, 0].ravel()
3518 slip_dip = delta_slip[:, :, 1].ravel()
3519 slip_norm = delta_slip[:, :, 2].ravel()
3521 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0)
3522 slip_rake = r2d * num.arctan2(slip_dip, slip_strike)
3524 m6s = okada_ext.patch2m6(
3525 strikes=num.full(nbasesrcs, self.strike, dtype=float),
3526 dips=num.full(nbasesrcs, self.dip, dtype=float),
3527 rakes=slip_rake,
3528 disl_shear=slip_shear,
3529 disl_norm=slip_norm,
3530 lamb=lamb,
3531 mu=mu,
3532 nthreads=self.nthreads)
3534 m6s *= patch_area
3536 dl = -self.patches[0].al1 + self.patches[0].al2
3537 dw = -self.patches[0].aw1 + self.patches[0].aw2
3539 base_times[base_times > time_eikonal_max] = time_eikonal_max
3541 ds = meta.DiscretizedMTSource(
3542 lat=self.lat,
3543 lon=self.lon,
3544 times=base_times + self.time,
3545 north_shifts=base_interp[:, 0],
3546 east_shifts=base_interp[:, 1],
3547 depths=base_interp[:, 2],
3548 m6s=m6s,
3549 dl=dl,
3550 dw=dw,
3551 nl=self.nx,
3552 nw=self.ny)
3554 return ds
3556 def calc_coef_mat(self):
3557 '''
3558 Calculate coefficients connecting tractions and dislocations.
3559 '''
3560 if not self.patches:
3561 raise ValueError(
3562 'Patches are needed. Please calculate them first.')
3564 self.coef_mat = make_okada_coefficient_matrix(
3565 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear)
3567 def get_patch_attribute(self, attr):
3568 '''
3569 Get patch attributes.
3571 :param attr:
3572 Name of selected attribute (see
3573 :py:class`pyrocko.modelling.okada.OkadaSource`).
3574 :type attr:
3575 str
3577 :returns:
3578 Array with attribute value for each fault patch.
3579 :rtype:
3580 :py:class:`~numpy.ndarray`
3582 '''
3583 if not self.patches:
3584 raise ValueError(
3585 'Patches are needed. Please calculate them first.')
3586 return num.array([getattr(p, attr) for p in self.patches])
3588 def get_slip(
3589 self,
3590 time=None,
3591 scale_slip=True,
3592 interpolation='nearest_neighbor',
3593 **kwargs):
3594 '''
3595 Get slip per subfault patch for given time after rupture start.
3597 :param time:
3598 Time after origin [s], for which slip is computed. If not
3599 given, final static slip is returned.
3600 :type time:
3601 optional, float > 0.
3603 :param scale_slip:
3604 If ``True`` and :py:attr:`slip` given, all slip values are scaled
3605 to fit the given maximum slip.
3606 :type scale_slip:
3607 optional, bool
3609 :param interpolation:
3610 Interpolation method to use (choose between ``'nearest_neighbor'``
3611 and ``'multilinear'``).
3612 :type interpolation:
3613 optional, str
3615 :returns:
3616 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`)
3617 for each source patch.
3618 :rtype:
3619 :py:class:`~numpy.ndarray`: ``(n_sources, 3)``
3620 '''
3622 if self.patches is None:
3623 raise ValueError(
3624 'Please discretize the source first (discretize_patches())')
3625 npatches = len(self.patches)
3626 tractions = self.get_tractions()
3627 time_patch_max = self.get_patch_attribute('time').max() - self.time
3629 time_patch = time
3630 if time is None:
3631 time_patch = time_patch_max
3633 if self.coef_mat is None:
3634 self.calc_coef_mat()
3636 if tractions.shape != (npatches, 3):
3637 raise AttributeError(
3638 'The traction vector is of invalid shape.'
3639 ' Required shape is (npatches, 3)')
3641 patch_mask = num.ones(npatches, dtype=bool)
3642 if self.patch_mask is not None:
3643 patch_mask = self.patch_mask
3645 times = self.get_patch_attribute('time') - self.time
3646 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches
3647 relevant_sources = num.nonzero(times <= time_patch)[0]
3648 disloc_est = num.zeros_like(tractions)
3650 if self.smooth_rupture:
3651 patch_activation = num.zeros(npatches)
3653 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3654 self.get_vr_time_interpolators(
3655 store, interpolation=interpolation)
3657 # Getting the native Eikonal grid, bit hackish
3658 points_x = num.round(time_interpolator.grid[0], decimals=2)
3659 points_y = num.round(time_interpolator.grid[1], decimals=2)
3660 times_eikonal = time_interpolator.values
3662 time_max = time
3663 if time is None:
3664 time_max = times_eikonal.max()
3666 for ip, p in enumerate(self.patches):
3667 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2)
3668 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2)
3670 idx_length = num.logical_and(
3671 points_x >= ul[0], points_x <= lr[0])
3672 idx_width = num.logical_and(
3673 points_y >= ul[1], points_y <= lr[1])
3675 times_patch = times_eikonal[num.ix_(idx_length, idx_width)]
3676 if times_patch.size == 0:
3677 raise AttributeError('could not use smooth_rupture')
3679 patch_activation[ip] = \
3680 (times_patch <= time_max).sum() / times_patch.size
3682 if time_patch == 0 and time_patch != time_patch_max:
3683 patch_activation[ip] = 0.
3685 patch_activation[~patch_mask] = 0. # exlcude unmasked patches
3687 relevant_sources = num.nonzero(patch_activation > 0.)[0]
3689 if relevant_sources.size == 0:
3690 return disloc_est
3692 indices_disl = num.repeat(relevant_sources * 3, 3)
3693 indices_disl[1::3] += 1
3694 indices_disl[2::3] += 2
3696 disloc_est[relevant_sources] = invert_fault_dislocations_bem(
3697 stress_field=tractions[relevant_sources, :].ravel(),
3698 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3699 pure_shear=self.pure_shear, nthreads=self.nthreads,
3700 epsilon=None,
3701 **kwargs)
3703 if self.smooth_rupture:
3704 disloc_est *= patch_activation[:, num.newaxis]
3706 if scale_slip and self.slip is not None:
3707 disloc_tmax = num.zeros(npatches)
3709 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3)
3710 indices_disl[1::3] += 1
3711 indices_disl[2::3] += 2
3713 disloc_tmax[patch_mask] = num.linalg.norm(
3714 invert_fault_dislocations_bem(
3715 stress_field=tractions[patch_mask, :].ravel(),
3716 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3717 pure_shear=self.pure_shear, nthreads=self.nthreads,
3718 epsilon=None,
3719 **kwargs), axis=1)
3721 disloc_tmax_max = disloc_tmax.max()
3722 if disloc_tmax_max == 0.:
3723 logger.warning(
3724 'slip scaling not performed. Maximum slip is 0.')
3726 disloc_est *= self.slip / disloc_tmax_max
3728 return disloc_est
3730 def get_delta_slip(
3731 self,
3732 store=None,
3733 deltat=None,
3734 delta=True,
3735 interpolation='nearest_neighbor',
3736 **kwargs):
3737 '''
3738 Get slip change snapshots.
3740 The time interval, within which the slip changes are computed is
3741 determined by the sampling rate of the Green's function database or
3742 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`.
3744 :param store:
3745 Green's function database (needs to cover whole region of of the
3746 source). Its sampling interval is used as time increment for slip
3747 difference calculation. Either ``deltat`` or ``store`` should be
3748 given.
3749 :type store:
3750 optional, :py:class:`~pyrocko.gf.store.Store`
3752 :param deltat:
3753 Time interval for slip difference calculation [s]. Either
3754 ``deltat`` or ``store`` should be given.
3755 :type deltat:
3756 optional, float
3758 :param delta:
3759 If ``True``, slip differences between two time steps are given. If
3760 ``False``, cumulative slip for all time steps.
3761 :type delta:
3762 optional, bool
3764 :param interpolation:
3765 Interpolation method to use (choose between ``'nearest_neighbor'``
3766 and ``'multilinear'``).
3767 :type interpolation:
3768 optional, str
3770 :returns:
3771 Displacement changes(:math:`\\Delta u_{strike},
3772 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and
3773 time; corner times, for which delta slip is computed. The order of
3774 displacement changes array is:
3776 .. math::
3778 &[[\\\\
3779 &[\\Delta u_{strike, patch1, t1},
3780 \\Delta u_{dip, patch1, t1},
3781 \\Delta u_{tensile, patch1, t1}],\\\\
3782 &[\\Delta u_{strike, patch1, t2},
3783 \\Delta u_{dip, patch1, t2},
3784 \\Delta u_{tensile, patch1, t2}]\\\\
3785 &], [\\\\
3786 &[\\Delta u_{strike, patch2, t1}, ...],\\\\
3787 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\
3789 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3790 :py:class:`~numpy.ndarray`: ``(n_times, )``
3791 '''
3792 if store and deltat:
3793 raise AttributeError(
3794 'Argument collision. '
3795 'Please define only the store or the deltat argument.')
3797 if store:
3798 deltat = store.config.deltat
3800 if not deltat:
3801 raise AttributeError('Please give a GF store or set deltat.')
3803 npatches = len(self.patches)
3805 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3806 store, interpolation=interpolation)
3807 tmax = time_interpolator.values.max()
3809 calc_times = num.arange(0., tmax + deltat, deltat)
3810 calc_times[calc_times > tmax] = tmax
3812 disloc_est = num.zeros((npatches, calc_times.size, 3))
3814 for itime, t in enumerate(calc_times):
3815 disloc_est[:, itime, :] = self.get_slip(
3816 time=t, scale_slip=False, **kwargs)
3818 if self.slip:
3819 disloc_tmax = num.linalg.norm(
3820 self.get_slip(scale_slip=False, time=tmax),
3821 axis=1)
3823 disloc_tmax_max = disloc_tmax.max()
3824 if disloc_tmax_max == 0.:
3825 logger.warning(
3826 'Slip scaling not performed. Maximum slip is 0.')
3827 else:
3828 disloc_est *= self.slip / disloc_tmax_max
3830 if not delta:
3831 return disloc_est, calc_times
3833 # if we have only one timestep there is no gradient
3834 if calc_times.size > 1:
3835 disloc_init = disloc_est[:, 0, :]
3836 disloc_est = num.diff(disloc_est, axis=1)
3837 disloc_est = num.concatenate((
3838 disloc_init[:, num.newaxis, :], disloc_est), axis=1)
3840 calc_times = calc_times
3842 return disloc_est, calc_times
3844 def get_slip_rate(self, *args, **kwargs):
3845 '''
3846 Get slip rate inverted from patches.
3848 The time interval, within which the slip rates are computed is
3849 determined by the sampling rate of the Green's function database or
3850 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to
3851 :py:meth:`get_delta_slip`.
3853 :returns:
3854 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`,
3855 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`)
3856 for each source patch and time; corner times, for which slip rate
3857 is computed. The order of sliprate array is:
3859 .. math::
3861 &[[\\\\
3862 &[\\Delta u_{strike, patch1, t1}/\\Delta t,
3863 \\Delta u_{dip, patch1, t1}/\\Delta t,
3864 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\
3865 &[\\Delta u_{strike, patch1, t2}/\\Delta t,
3866 \\Delta u_{dip, patch1, t2}/\\Delta t,
3867 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\
3868 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\
3869 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\
3871 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3872 :py:class:`~numpy.ndarray`: ``(n_times, )``
3873 '''
3874 ddisloc_est, calc_times = self.get_delta_slip(
3875 *args, delta=True, **kwargs)
3877 dt = num.concatenate(
3878 [(num.diff(calc_times)[0], ), num.diff(calc_times)])
3879 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt
3881 return slip_rate, calc_times
3883 def get_moment_rate_patches(self, *args, **kwargs):
3884 '''
3885 Get scalar seismic moment rate for each patch individually.
3887 Additional ``*args`` and ``**kwargs`` are passed to
3888 :py:meth:`get_slip_rate`.
3890 :returns:
3891 Seismic moment rate for each source patch and time; corner times,
3892 for which patch moment rate is computed based on slip rate. The
3893 order of the moment rate array is:
3895 .. math::
3897 &[\\\\
3898 &[(\\Delta M / \\Delta t)_{patch1, t1},
3899 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\
3900 &[(\\Delta M / \\Delta t)_{patch2, t1},
3901 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\
3902 &[...]]\\\\
3904 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``,
3905 :py:class:`~numpy.ndarray`: ``(n_times, )``
3906 '''
3907 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs)
3909 shear_mod = self.get_patch_attribute('shearmod')
3910 p_length = self.get_patch_attribute('length')
3911 p_width = self.get_patch_attribute('width')
3913 dA = p_length * p_width
3915 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis]
3917 return mom_rate, calc_times
3919 def get_moment_rate(self, store, target=None, deltat=None):
3920 '''
3921 Get seismic source moment rate for the total source (STF).
3923 :param store:
3924 Green's function database (needs to cover whole region of of the
3925 source). Its ``deltat`` [s] is used as time increment for slip
3926 difference calculation. Either ``deltat`` or ``store`` should be
3927 given.
3928 :type store:
3929 :py:class:`~pyrocko.gf.store.Store`
3931 :param target:
3932 Target information, needed for interpolation method.
3933 :type target:
3934 optional, :py:class:`~pyrocko.gf.targets.Target`
3936 :param deltat:
3937 Time increment for slip difference calculation [s]. If not given
3938 ``store.deltat`` is used.
3939 :type deltat:
3940 optional, float
3942 :return:
3943 Seismic moment rate [Nm/s] for each time; corner times, for which
3944 moment rate is computed. The order of the moment rate array is:
3946 .. math::
3948 &[\\\\
3949 &(\\Delta M / \\Delta t)_{t1},\\\\
3950 &(\\Delta M / \\Delta t)_{t2},\\\\
3951 &...]\\\\
3953 :rtype:
3954 :py:class:`~numpy.ndarray`: ``(n_times, )``,
3955 :py:class:`~numpy.ndarray`: ``(n_times, )``
3956 '''
3957 if not deltat:
3958 deltat = store.config.deltat
3959 return self.discretize_basesource(
3960 store, target=target).get_moment_rate(deltat)
3962 def get_moment(self, *args, **kwargs):
3963 '''
3964 Get seismic cumulative moment.
3966 Additional ``*args`` and ``**kwargs`` are passed to
3967 :py:meth:`get_magnitude`.
3969 :returns:
3970 Cumulative seismic moment in [Nm].
3971 :rtype:
3972 float
3973 '''
3974 return float(pmt.magnitude_to_moment(self.get_magnitude(
3975 *args, **kwargs)))
3977 def rescale_slip(self, magnitude=None, moment=None, **kwargs):
3978 '''
3979 Rescale source slip based on given target magnitude or seismic moment.
3981 Rescale the maximum source slip to fit the source moment magnitude or
3982 seismic moment to the given target values. Either ``magnitude`` or
3983 ``moment`` need to be given. Additional ``**kwargs`` are passed to
3984 :py:meth:`get_moment`.
3986 :param magnitude:
3987 Target moment magnitude :math:`M_\\mathrm{w}` as in
3988 [Hanks and Kanamori, 1979]
3989 :type magnitude:
3990 optional, float
3992 :param moment:
3993 Target seismic moment :math:`M_0` [Nm].
3994 :type moment:
3995 optional, float
3996 '''
3997 if self.slip is None:
3998 self.slip = 1.
3999 logger.warning('No slip found for rescaling. '
4000 'An initial slip of 1 m is assumed.')
4002 if magnitude is None and moment is None:
4003 raise ValueError(
4004 'Either target magnitude or moment need to be given.')
4006 moment_init = self.get_moment(**kwargs)
4008 if magnitude is not None:
4009 moment = pmt.magnitude_to_moment(magnitude)
4011 self.slip *= moment / moment_init
4013 def get_centroid(self, store, *args, **kwargs):
4014 '''
4015 Centroid of the pseudo dynamic rupture model.
4017 The centroid location and time are derived from the locations and times
4018 of the individual patches weighted with their moment contribution.
4019 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`.
4021 :param store:
4022 Green's function database (needs to cover whole region of of the
4023 source). Its ``deltat`` [s] is used as time increment for slip
4024 difference calculation. Either ``deltat`` or ``store`` should be
4025 given.
4026 :type store:
4027 :py:class:`~pyrocko.gf.store.Store`
4029 :returns:
4030 The centroid location and associated moment tensor.
4031 :rtype:
4032 :py:class:`pyrocko.model.Event`
4033 '''
4034 _, _, _, _, time, _ = self.get_vr_time_interpolators(store)
4035 t_max = time.values.max()
4037 moment_rate, times = self.get_moment_rate_patches(deltat=t_max)
4039 moment = num.sum(moment_rate * times, axis=1)
4040 weights = moment / moment.sum()
4042 norths = self.get_patch_attribute('north_shift')
4043 easts = self.get_patch_attribute('east_shift')
4044 depths = self.get_patch_attribute('depth')
4046 centroid_n = num.sum(weights * norths)
4047 centroid_e = num.sum(weights * easts)
4048 centroid_d = num.sum(weights * depths)
4050 centroid_lat, centroid_lon = ne_to_latlon(
4051 self.lat, self.lon, centroid_n, centroid_e)
4053 moment_rate_, times = self.get_moment_rate(store)
4054 delta_times = num.concatenate((
4055 [times[1] - times[0]],
4056 num.diff(times)))
4057 moment_src = delta_times * moment_rate
4059 centroid_t = num.sum(
4060 moment_src / num.sum(moment_src) * times) + self.time
4062 mt = self.pyrocko_moment_tensor(store, *args, **kwargs)
4064 return model.Event(
4065 lat=centroid_lat,
4066 lon=centroid_lon,
4067 depth=centroid_d,
4068 time=centroid_t,
4069 moment_tensor=mt,
4070 magnitude=mt.magnitude,
4071 duration=t_max)
4073 def get_coulomb_failure_stress(
4074 self,
4075 receiver_points,
4076 friction,
4077 pressure,
4078 strike,
4079 dip,
4080 rake,
4081 time=None,
4082 *args,
4083 **kwargs):
4084 '''
4085 Calculate Coulomb failure stress change CFS.
4087 The function obtains the Coulomb failure stress change :math:`\\Delta
4088 \\sigma_C` at arbitrary receiver points with a commonly oriented
4089 receiver plane assuming:
4091 .. math::
4093 \\Delta \\sigma_C = \\sigma_S - \\mu (\\sigma_N - \\Delta p)
4095 with the shear stress :math:`\\sigma_S`, the coefficient of friction
4096 :math:`\\mu`, the normal stress :math:`\\sigma_N`, and the pore fluid
4097 pressure change :math:`\\Delta p`. Each receiver point is characterized
4098 by its geographical coordinates, and depth. The required receiver plane
4099 orientation is defined by ``strike``, ``dip``, and ``rake``. The
4100 Coulomb failure stress change is calculated for a given time after
4101 rupture origin time.
4103 :param receiver_points:
4104 Location of the receiver points in Northing, Easting, and depth in
4105 [m].
4106 :type receiver_points:
4107 :py:class:`~numpy.ndarray`: ``(n_receiver, 3)``
4109 :param friction:
4110 Coefficient of friction.
4111 :type friction:
4112 float
4114 :param pressure:
4115 Pore pressure change in [Pa].
4116 :type pressure:
4117 float
4119 :param strike:
4120 Strike of the receiver plane in [deg].
4121 :type strike:
4122 float
4124 :param dip:
4125 Dip of the receiver plane in [deg].
4126 :type dip:
4127 float
4129 :param rake:
4130 Rake of the receiver plane in [deg].
4131 :type rake:
4132 float
4134 :param time:
4135 Time after origin [s], for which the resulting :math:`\\Delta
4136 \\Sigma_c` is computed. If not given, :math:`\\Delta \\Sigma_c` is
4137 derived based on the final static slip.
4138 :type time:
4139 optional, float > 0.
4141 :returns:
4142 The Coulomb failure stress change :math:`\\Delta \\Sigma_c` at each
4143 receiver point in [Pa].
4144 :rtype:
4145 :py:class:`~numpy.ndarray`: ``(n_receiver,)``
4146 '''
4147 # dislocation at given time
4148 source_slip = self.get_slip(time=time, scale_slip=True)
4150 # source planes
4151 source_patches = num.array([
4152 src.source_patch() for src in self.patches])
4154 # earth model
4155 lambda_mean = num.mean([src.lamb for src in self.patches])
4156 mu_mean = num.mean([src.shearmod for src in self.patches])
4158 # Dislocation and spatial derivatives from okada in NED
4159 results = okada_ext.okada(
4160 source_patches,
4161 source_slip,
4162 receiver_points,
4163 lambda_mean,
4164 mu_mean,
4165 rotate_sdn=False, # TODO Check
4166 stack_sources=0, # TODO Check
4167 *args, **kwargs)
4169 # resolve stress tensor (sum!)
4170 diag_ind = [0, 4, 8]
4171 kron = num.zeros(9)
4172 kron[diag_ind] = 1.
4173 kron = kron[num.newaxis, num.newaxis, :]
4175 eps = 0.5 * (
4176 results[:, :, 3:] +
4177 results[:, :, (3, 6, 9, 4, 7, 10, 5, 8, 11)])
4179 dilatation \
4180 = eps[:, :, diag_ind].sum(axis=-1)[:, :, num.newaxis]
4182 stress = kron*lambda_mean*dilatation + 2.*mu_mean*eps
4184 # superposed stress of all sources at receiver locations
4185 stress_sum = num.sum(stress, axis=0)
4187 # get shear and normal stress from stress tensor
4188 strike_rad = d2r * strike
4189 dip_rad = d2r * dip
4190 rake_rad = d2r * rake
4192 n_rec = receiver_points.shape[0]
4193 stress_normal = num.zeros(n_rec)
4194 tau = num.zeros(n_rec)
4196 # Get vectors in receiver fault normal (ns), strike (rst) and
4197 # dip (rdi) direction
4198 ns = num.zeros(3)
4199 rst = num.zeros(3)
4200 rdi = num.zeros(3)
4202 ns[0] = num.sin(dip_rad) * num.cos(strike_rad + 0.5 * num.pi)
4203 ns[1] = num.sin(dip_rad) * num.sin(strike_rad + 0.5 * num.pi)
4204 ns[2] = -num.cos(dip_rad)
4206 rst[0] = num.cos(strike_rad)
4207 rst[1] = num.sin(strike_rad)
4208 rst[2] = 0.0
4210 rdi[0] = num.cos(dip_rad) * num.cos(strike_rad + 0.5 * num.pi)
4211 rdi[1] = num.cos(dip_rad) * num.sin(strike_rad + 0.5 * num.pi)
4212 rdi[2] = num.sin(dip_rad)
4214 ts = rst * num.cos(rake_rad) - rdi * num.sin(rake_rad)
4216 stress_normal = num.sum(
4217 num.tile(ns, 3) * stress_sum * num.repeat(ns, 3), axis=1)
4219 tau = num.sum(
4220 num.tile(ts, 3) * stress_sum * num.repeat(ns, 3), axis=1)
4222 # calculate cfs using formula above and return
4223 return tau + friction * (stress_normal + pressure)
4226class DoubleDCSource(SourceWithMagnitude):
4227 '''
4228 Two double-couple point sources separated in space and time.
4229 Moment share between the sub-sources is controlled by the
4230 parameter mix.
4231 The position of the subsources is dependent on the moment
4232 distribution between the two sources. Depth, east and north
4233 shift are given for the centroid between the two double-couples.
4234 The subsources will positioned according to their moment shares
4235 around this centroid position.
4236 This is done according to their delta parameters, which are
4237 therefore in relation to that centroid.
4238 Note that depth of the subsources therefore can be
4239 depth+/-delta_depth. For shallow earthquakes therefore
4240 the depth has to be chosen deeper to avoid sampling
4241 above surface.
4242 '''
4244 strike1 = Float.T(
4245 default=0.0,
4246 help='strike direction in [deg], measured clockwise from north')
4248 dip1 = Float.T(
4249 default=90.0,
4250 help='dip angle in [deg], measured downward from horizontal')
4252 azimuth = Float.T(
4253 default=0.0,
4254 help='azimuth to second double-couple [deg], '
4255 'measured at first, clockwise from north')
4257 rake1 = Float.T(
4258 default=0.0,
4259 help='rake angle in [deg], '
4260 'measured counter-clockwise from right-horizontal '
4261 'in on-plane view')
4263 strike2 = Float.T(
4264 default=0.0,
4265 help='strike direction in [deg], measured clockwise from north')
4267 dip2 = Float.T(
4268 default=90.0,
4269 help='dip angle in [deg], measured downward from horizontal')
4271 rake2 = Float.T(
4272 default=0.0,
4273 help='rake angle in [deg], '
4274 'measured counter-clockwise from right-horizontal '
4275 'in on-plane view')
4277 delta_time = Float.T(
4278 default=0.0,
4279 help='separation of double-couples in time (t2-t1) [s]')
4281 delta_depth = Float.T(
4282 default=0.0,
4283 help='difference in depth (z2-z1) [m]')
4285 distance = Float.T(
4286 default=0.0,
4287 help='distance between the two double-couples [m]')
4289 mix = Float.T(
4290 default=0.5,
4291 help='how to distribute the moment to the two doublecouples '
4292 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
4294 stf1 = STF.T(
4295 optional=True,
4296 help='Source time function of subsource 1 '
4297 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
4299 stf2 = STF.T(
4300 optional=True,
4301 help='Source time function of subsource 2 '
4302 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
4304 discretized_source_class = meta.DiscretizedMTSource
4306 def base_key(self):
4307 return (
4308 self.time, self.depth, self.lat, self.north_shift,
4309 self.lon, self.east_shift, type(self).__name__) + \
4310 self.effective_stf1_pre().base_key() + \
4311 self.effective_stf2_pre().base_key() + (
4312 self.strike1, self.dip1, self.rake1,
4313 self.strike2, self.dip2, self.rake2,
4314 self.delta_time, self.delta_depth,
4315 self.azimuth, self.distance, self.mix)
4317 def get_factor(self):
4318 return self.moment
4320 def effective_stf1_pre(self):
4321 return self.stf1 or self.stf or g_unit_pulse
4323 def effective_stf2_pre(self):
4324 return self.stf2 or self.stf or g_unit_pulse
4326 def effective_stf_post(self):
4327 return g_unit_pulse
4329 def split(self):
4330 a1 = 1.0 - self.mix
4331 a2 = self.mix
4332 delta_north = math.cos(self.azimuth * d2r) * self.distance
4333 delta_east = math.sin(self.azimuth * d2r) * self.distance
4335 dc1 = DCSource(
4336 lat=self.lat,
4337 lon=self.lon,
4338 time=self.time - self.delta_time * a2,
4339 north_shift=self.north_shift - delta_north * a2,
4340 east_shift=self.east_shift - delta_east * a2,
4341 depth=self.depth - self.delta_depth * a2,
4342 moment=self.moment * a1,
4343 strike=self.strike1,
4344 dip=self.dip1,
4345 rake=self.rake1,
4346 stf=self.stf1 or self.stf)
4348 dc2 = DCSource(
4349 lat=self.lat,
4350 lon=self.lon,
4351 time=self.time + self.delta_time * a1,
4352 north_shift=self.north_shift + delta_north * a1,
4353 east_shift=self.east_shift + delta_east * a1,
4354 depth=self.depth + self.delta_depth * a1,
4355 moment=self.moment * a2,
4356 strike=self.strike2,
4357 dip=self.dip2,
4358 rake=self.rake2,
4359 stf=self.stf2 or self.stf)
4361 return [dc1, dc2]
4363 def discretize_basesource(self, store, target=None):
4364 a1 = 1.0 - self.mix
4365 a2 = self.mix
4366 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4367 rake=self.rake1, scalar_moment=a1)
4368 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4369 rake=self.rake2, scalar_moment=a2)
4371 delta_north = math.cos(self.azimuth * d2r) * self.distance
4372 delta_east = math.sin(self.azimuth * d2r) * self.distance
4374 times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
4375 store.config.deltat, self.time - self.delta_time * a2)
4377 times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
4378 store.config.deltat, self.time + self.delta_time * a1)
4380 nt1 = times1.size
4381 nt2 = times2.size
4383 ds = meta.DiscretizedMTSource(
4384 lat=self.lat,
4385 lon=self.lon,
4386 times=num.concatenate((times1, times2)),
4387 north_shifts=num.concatenate((
4388 num.repeat(self.north_shift - delta_north * a2, nt1),
4389 num.repeat(self.north_shift + delta_north * a1, nt2))),
4390 east_shifts=num.concatenate((
4391 num.repeat(self.east_shift - delta_east * a2, nt1),
4392 num.repeat(self.east_shift + delta_east * a1, nt2))),
4393 depths=num.concatenate((
4394 num.repeat(self.depth - self.delta_depth * a2, nt1),
4395 num.repeat(self.depth + self.delta_depth * a1, nt2))),
4396 m6s=num.vstack((
4397 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
4398 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
4400 return ds
4402 def pyrocko_moment_tensor(self, store=None, target=None):
4403 a1 = 1.0 - self.mix
4404 a2 = self.mix
4405 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4406 rake=self.rake1,
4407 scalar_moment=a1 * self.moment)
4408 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4409 rake=self.rake2,
4410 scalar_moment=a2 * self.moment)
4411 return pmt.MomentTensor(m=mot1.m() + mot2.m())
4413 def pyrocko_event(self, store=None, target=None, **kwargs):
4414 return SourceWithMagnitude.pyrocko_event(
4415 self, store, target,
4416 moment_tensor=self.pyrocko_moment_tensor(store, target),
4417 **kwargs)
4419 @classmethod
4420 def from_pyrocko_event(cls, ev, **kwargs):
4421 d = {}
4422 mt = ev.moment_tensor
4423 if mt:
4424 (strike, dip, rake), _ = mt.both_strike_dip_rake()
4425 d.update(
4426 strike1=float(strike),
4427 dip1=float(dip),
4428 rake1=float(rake),
4429 strike2=float(strike),
4430 dip2=float(dip),
4431 rake2=float(rake),
4432 mix=0.0,
4433 magnitude=float(mt.moment_magnitude()))
4435 d.update(kwargs)
4436 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
4437 source.stf1 = source.stf
4438 source.stf2 = HalfSinusoidSTF(effective_duration=0.)
4439 source.stf = None
4440 return source
4443class RingfaultSource(SourceWithMagnitude):
4444 '''
4445 A ring fault with vertical doublecouples.
4446 '''
4448 diameter = Float.T(
4449 default=1.0,
4450 help='diameter of the ring in [m]')
4452 sign = Float.T(
4453 default=1.0,
4454 help='inside of the ring moves up (+1) or down (-1)')
4456 strike = Float.T(
4457 default=0.0,
4458 help='strike direction of the ring plane, clockwise from north,'
4459 ' in [deg]')
4461 dip = Float.T(
4462 default=0.0,
4463 help='dip angle of the ring plane from horizontal in [deg]')
4465 npointsources = Int.T(
4466 default=360,
4467 help='number of point sources to use')
4469 discretized_source_class = meta.DiscretizedMTSource
4471 def base_key(self):
4472 return Source.base_key(self) + (
4473 self.strike, self.dip, self.diameter, self.npointsources)
4475 def get_factor(self):
4476 return self.sign * self.moment
4478 def discretize_basesource(self, store=None, target=None):
4479 n = self.npointsources
4480 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
4482 points = num.zeros((n, 3))
4483 points[:, 0] = num.cos(phi) * 0.5 * self.diameter
4484 points[:, 1] = num.sin(phi) * 0.5 * self.diameter
4486 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
4487 points = num.dot(rotmat.T, points.T).T # !!! ?
4489 points[:, 0] += self.north_shift
4490 points[:, 1] += self.east_shift
4491 points[:, 2] += self.depth
4493 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
4494 scalar_moment=1.0 / n).m())
4496 rotmats = num.transpose(
4497 [[num.cos(phi), num.sin(phi), num.zeros(n)],
4498 [-num.sin(phi), num.cos(phi), num.zeros(n)],
4499 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
4501 ms = num.zeros((n, 3, 3))
4502 for i in range(n):
4503 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
4504 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
4506 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
4507 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
4509 times, amplitudes = self.effective_stf_pre().discretize_t(
4510 store.config.deltat, self.time)
4512 nt = times.size
4514 return meta.DiscretizedMTSource(
4515 times=num.tile(times, n),
4516 lat=self.lat,
4517 lon=self.lon,
4518 north_shifts=num.repeat(points[:, 0], nt),
4519 east_shifts=num.repeat(points[:, 1], nt),
4520 depths=num.repeat(points[:, 2], nt),
4521 m6s=num.repeat(m6s, nt, axis=0) * num.tile(
4522 amplitudes, n)[:, num.newaxis])
4525class CombiSource(Source):
4526 '''
4527 Composite source model.
4528 '''
4530 discretized_source_class = meta.DiscretizedMTSource
4532 subsources = List.T(Source.T())
4534 def __init__(self, subsources=[], **kwargs):
4535 if not subsources:
4536 raise BadRequest(
4537 'Need at least one sub-source to create a CombiSource object.')
4539 lats = num.array(
4540 [subsource.lat for subsource in subsources], dtype=float)
4541 lons = num.array(
4542 [subsource.lon for subsource in subsources], dtype=float)
4544 lat, lon = lats[0], lons[0]
4545 if not num.all(lats == lat) and num.all(lons == lon):
4546 subsources = [s.clone() for s in subsources]
4547 for subsource in subsources[1:]:
4548 subsource.set_origin(lat, lon)
4550 depth = float(num.mean([p.depth for p in subsources]))
4551 time = float(num.mean([p.time for p in subsources]))
4552 north_shift = float(num.mean([p.north_shift for p in subsources]))
4553 east_shift = float(num.mean([p.east_shift for p in subsources]))
4554 kwargs.update(
4555 time=time,
4556 lat=float(lat),
4557 lon=float(lon),
4558 north_shift=north_shift,
4559 east_shift=east_shift,
4560 depth=depth)
4562 Source.__init__(self, subsources=subsources, **kwargs)
4564 def get_factor(self):
4565 return 1.0
4567 def discretize_basesource(self, store, target=None):
4568 dsources = []
4569 for sf in self.subsources:
4570 ds = sf.discretize_basesource(store, target)
4571 ds.m6s *= sf.get_factor()
4572 dsources.append(ds)
4574 return meta.DiscretizedMTSource.combine(dsources)
4577class SFSource(Source):
4578 '''
4579 A single force point source.
4581 Supported GF schemes: `'elastic5'`.
4582 '''
4584 discretized_source_class = meta.DiscretizedSFSource
4586 fn = Float.T(
4587 default=0.,
4588 help='northward component of single force [N]')
4590 fe = Float.T(
4591 default=0.,
4592 help='eastward component of single force [N]')
4594 fd = Float.T(
4595 default=0.,
4596 help='downward component of single force [N]')
4598 def __init__(self, **kwargs):
4599 Source.__init__(self, **kwargs)
4601 def base_key(self):
4602 return Source.base_key(self) + (self.fn, self.fe, self.fd)
4604 def get_factor(self):
4605 return 1.0
4607 def discretize_basesource(self, store, target=None):
4608 times, amplitudes = self.effective_stf_pre().discretize_t(
4609 store.config.deltat, self.time)
4610 forces = amplitudes[:, num.newaxis] * num.array(
4611 [[self.fn, self.fe, self.fd]], dtype=float)
4613 return meta.DiscretizedSFSource(forces=forces,
4614 **self._dparams_base_repeated(times))
4616 def pyrocko_event(self, store=None, target=None, **kwargs):
4617 return Source.pyrocko_event(
4618 self, store, target,
4619 **kwargs)
4621 @classmethod
4622 def from_pyrocko_event(cls, ev, **kwargs):
4623 d = {}
4624 d.update(kwargs)
4625 return super(SFSource, cls).from_pyrocko_event(ev, **d)
4628class PorePressurePointSource(Source):
4629 '''
4630 Excess pore pressure point source.
4632 For poro-elastic initial value problem where an excess pore pressure is
4633 brought into a small source volume.
4634 '''
4636 discretized_source_class = meta.DiscretizedPorePressureSource
4638 pp = Float.T(
4639 default=1.0,
4640 help='initial excess pore pressure in [Pa]')
4642 def base_key(self):
4643 return Source.base_key(self)
4645 def get_factor(self):
4646 return self.pp
4648 def discretize_basesource(self, store, target=None):
4649 return meta.DiscretizedPorePressureSource(pp=arr(1.0),
4650 **self._dparams_base())
4653class PorePressureLineSource(Source):
4654 '''
4655 Excess pore pressure line source.
4657 The line source is centered at (north_shift, east_shift, depth).
4658 '''
4660 discretized_source_class = meta.DiscretizedPorePressureSource
4662 pp = Float.T(
4663 default=1.0,
4664 help='initial excess pore pressure in [Pa]')
4666 length = Float.T(
4667 default=0.0,
4668 help='length of the line source [m]')
4670 azimuth = Float.T(
4671 default=0.0,
4672 help='azimuth direction, clockwise from north [deg]')
4674 dip = Float.T(
4675 default=90.,
4676 help='dip direction, downward from horizontal [deg]')
4678 def base_key(self):
4679 return Source.base_key(self) + (self.azimuth, self.dip, self.length)
4681 def get_factor(self):
4682 return self.pp
4684 def discretize_basesource(self, store, target=None):
4686 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
4688 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
4690 sa = math.sin(self.azimuth * d2r)
4691 ca = math.cos(self.azimuth * d2r)
4692 sd = math.sin(self.dip * d2r)
4693 cd = math.cos(self.dip * d2r)
4695 points = num.zeros((n, 3))
4696 points[:, 0] = self.north_shift + a * ca * cd
4697 points[:, 1] = self.east_shift + a * sa * cd
4698 points[:, 2] = self.depth + a * sd
4700 return meta.DiscretizedPorePressureSource(
4701 times=util.num_full(n, self.time),
4702 lat=self.lat,
4703 lon=self.lon,
4704 north_shifts=points[:, 0],
4705 east_shifts=points[:, 1],
4706 depths=points[:, 2],
4707 pp=num.ones(n) / n)
4710class Request(Object):
4711 '''
4712 Synthetic seismogram computation request.
4714 ::
4716 Request(**kwargs)
4717 Request(sources, targets, **kwargs)
4718 '''
4720 sources = List.T(
4721 Source.T(),
4722 help='list of sources for which to produce synthetics.')
4724 targets = List.T(
4725 Target.T(),
4726 help='list of targets for which to produce synthetics.')
4728 @classmethod
4729 def args2kwargs(cls, args):
4730 if len(args) not in (0, 2, 3):
4731 raise BadRequest('Invalid arguments.')
4733 if len(args) == 2:
4734 return dict(sources=args[0], targets=args[1])
4735 else:
4736 return {}
4738 def __init__(self, *args, **kwargs):
4739 kwargs.update(self.args2kwargs(args))
4740 sources = kwargs.pop('sources', [])
4741 targets = kwargs.pop('targets', [])
4743 if isinstance(sources, Source):
4744 sources = [sources]
4746 if isinstance(targets, Target) or isinstance(targets, StaticTarget):
4747 targets = [targets]
4749 Object.__init__(self, sources=sources, targets=targets, **kwargs)
4751 @property
4752 def targets_dynamic(self):
4753 return [t for t in self.targets if isinstance(t, Target)]
4755 @property
4756 def targets_static(self):
4757 return [t for t in self.targets if isinstance(t, StaticTarget)]
4759 @property
4760 def has_dynamic(self):
4761 return True if len(self.targets_dynamic) > 0 else False
4763 @property
4764 def has_statics(self):
4765 return True if len(self.targets_static) > 0 else False
4767 def subsources_map(self):
4768 m = defaultdict(list)
4769 for source in self.sources:
4770 m[source.base_key()].append(source)
4772 return m
4774 def subtargets_map(self):
4775 m = defaultdict(list)
4776 for target in self.targets:
4777 m[target.base_key()].append(target)
4779 return m
4781 def subrequest_map(self):
4782 ms = self.subsources_map()
4783 mt = self.subtargets_map()
4784 m = {}
4785 for (ks, ls) in ms.items():
4786 for (kt, lt) in mt.items():
4787 m[ks, kt] = (ls, lt)
4789 return m
4792class ProcessingStats(Object):
4793 t_perc_get_store_and_receiver = Float.T(default=0.)
4794 t_perc_discretize_source = Float.T(default=0.)
4795 t_perc_make_base_seismogram = Float.T(default=0.)
4796 t_perc_make_same_span = Float.T(default=0.)
4797 t_perc_post_process = Float.T(default=0.)
4798 t_perc_optimize = Float.T(default=0.)
4799 t_perc_stack = Float.T(default=0.)
4800 t_perc_static_get_store = Float.T(default=0.)
4801 t_perc_static_discretize_basesource = Float.T(default=0.)
4802 t_perc_static_sum_statics = Float.T(default=0.)
4803 t_perc_static_post_process = Float.T(default=0.)
4804 t_wallclock = Float.T(default=0.)
4805 t_cpu = Float.T(default=0.)
4806 n_read_blocks = Int.T(default=0)
4807 n_results = Int.T(default=0)
4808 n_subrequests = Int.T(default=0)
4809 n_stores = Int.T(default=0)
4810 n_records_stacked = Int.T(default=0)
4813class Response(Object):
4814 '''
4815 Resonse object to a synthetic seismogram computation request.
4816 '''
4818 request = Request.T()
4819 results_list = List.T(List.T(meta.SeismosizerResult.T()))
4820 stats = ProcessingStats.T()
4822 def pyrocko_traces(self):
4823 '''
4824 Return a list of requested
4825 :class:`~pyrocko.trace.Trace` instances.
4826 '''
4828 traces = []
4829 for results in self.results_list:
4830 for result in results:
4831 if not isinstance(result, meta.Result):
4832 continue
4833 traces.append(result.trace.pyrocko_trace())
4835 return traces
4837 def kite_scenes(self):
4838 '''
4839 Return a list of requested
4840 :class:`~kite.scenes` instances.
4841 '''
4842 kite_scenes = []
4843 for results in self.results_list:
4844 for result in results:
4845 if isinstance(result, meta.KiteSceneResult):
4846 sc = result.get_scene()
4847 kite_scenes.append(sc)
4849 return kite_scenes
4851 def static_results(self):
4852 '''
4853 Return a list of requested
4854 :class:`~pyrocko.gf.meta.StaticResult` instances.
4855 '''
4856 statics = []
4857 for results in self.results_list:
4858 for result in results:
4859 if not isinstance(result, meta.StaticResult):
4860 continue
4861 statics.append(result)
4863 return statics
4865 def iter_results(self, get='pyrocko_traces'):
4866 '''
4867 Generator function to iterate over results of request.
4869 Yields associated :py:class:`Source`,
4870 :class:`~pyrocko.gf.targets.Target`,
4871 :class:`~pyrocko.trace.Trace` instances in each iteration.
4872 '''
4874 for isource, source in enumerate(self.request.sources):
4875 for itarget, target in enumerate(self.request.targets):
4876 result = self.results_list[isource][itarget]
4877 if get == 'pyrocko_traces':
4878 yield source, target, result.trace.pyrocko_trace()
4879 elif get == 'results':
4880 yield source, target, result
4882 def snuffle(self, **kwargs):
4883 '''
4884 Open *snuffler* with requested traces.
4885 '''
4887 trace.snuffle(self.pyrocko_traces(), **kwargs)
4890class Engine(Object):
4891 '''
4892 Base class for synthetic seismogram calculators.
4893 '''
4895 def get_store_ids(self):
4896 '''
4897 Get list of available GF store IDs
4898 '''
4900 return []
4903class Rule(object):
4904 pass
4907class VectorRule(Rule):
4909 def __init__(self, quantity, differentiate=0, integrate=0):
4910 self.components = [quantity + '.' + c for c in 'ned']
4911 self.differentiate = differentiate
4912 self.integrate = integrate
4914 def required_components(self, target):
4915 n, e, d = self.components
4916 sa, ca, sd, cd = target.get_sin_cos_factors()
4918 comps = []
4919 if nonzero(ca * cd):
4920 comps.append(n)
4922 if nonzero(sa * cd):
4923 comps.append(e)
4925 if nonzero(sd):
4926 comps.append(d)
4928 return tuple(comps)
4930 def apply_(self, target, base_seismogram):
4931 n, e, d = self.components
4932 sa, ca, sd, cd = target.get_sin_cos_factors()
4934 if nonzero(ca * cd):
4935 data = base_seismogram[n].data * (ca * cd)
4936 deltat = base_seismogram[n].deltat
4937 else:
4938 data = 0.0
4940 if nonzero(sa * cd):
4941 data = data + base_seismogram[e].data * (sa * cd)
4942 deltat = base_seismogram[e].deltat
4944 if nonzero(sd):
4945 data = data + base_seismogram[d].data * sd
4946 deltat = base_seismogram[d].deltat
4948 if self.differentiate:
4949 data = util.diff_fd(self.differentiate, 4, deltat, data)
4951 if self.integrate:
4952 raise NotImplementedError('Integration is not implemented yet.')
4954 return data
4957class HorizontalVectorRule(Rule):
4959 def __init__(self, quantity, differentiate=0, integrate=0):
4960 self.components = [quantity + '.' + c for c in 'ne']
4961 self.differentiate = differentiate
4962 self.integrate = integrate
4964 def required_components(self, target):
4965 n, e = self.components
4966 sa, ca, _, _ = target.get_sin_cos_factors()
4968 comps = []
4969 if nonzero(ca):
4970 comps.append(n)
4972 if nonzero(sa):
4973 comps.append(e)
4975 return tuple(comps)
4977 def apply_(self, target, base_seismogram):
4978 n, e = self.components
4979 sa, ca, _, _ = target.get_sin_cos_factors()
4981 if nonzero(ca):
4982 data = base_seismogram[n].data * ca
4983 else:
4984 data = 0.0
4986 if nonzero(sa):
4987 data = data + base_seismogram[e].data * sa
4989 if self.differentiate:
4990 deltat = base_seismogram[e].deltat
4991 data = util.diff_fd(self.differentiate, 4, deltat, data)
4993 if self.integrate:
4994 raise NotImplementedError('Integration is not implemented yet.')
4996 return data
4999class ScalarRule(Rule):
5001 def __init__(self, quantity, differentiate=0):
5002 self.c = quantity
5004 def required_components(self, target):
5005 return (self.c, )
5007 def apply_(self, target, base_seismogram):
5008 data = base_seismogram[self.c].data.copy()
5009 deltat = base_seismogram[self.c].deltat
5010 if self.differentiate:
5011 data = util.diff_fd(self.differentiate, 4, deltat, data)
5013 return data
5016class StaticDisplacement(Rule):
5018 def required_components(self, target):
5019 return tuple(['displacement.%s' % c for c in list('ned')])
5021 def apply_(self, target, base_statics):
5022 if isinstance(target, SatelliteTarget):
5023 los_fac = target.get_los_factors()
5024 base_statics['displacement.los'] =\
5025 (los_fac[:, 0] * -base_statics['displacement.d'] +
5026 los_fac[:, 1] * base_statics['displacement.e'] +
5027 los_fac[:, 2] * base_statics['displacement.n'])
5028 return base_statics
5031channel_rules = {
5032 'displacement': [VectorRule('displacement')],
5033 'rotation': [VectorRule('rotation')],
5034 'velocity': [
5035 VectorRule('velocity'),
5036 VectorRule('displacement', differentiate=1)],
5037 'acceleration': [
5038 VectorRule('acceleration'),
5039 VectorRule('velocity', differentiate=1),
5040 VectorRule('displacement', differentiate=2)],
5041 'pore_pressure': [ScalarRule('pore_pressure')],
5042 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
5043 'darcy_velocity': [VectorRule('darcy_velocity')],
5044}
5046static_rules = {
5047 'displacement': [StaticDisplacement()]
5048}
5051class OutOfBoundsContext(Object):
5052 source = Source.T()
5053 target = Target.T()
5054 distance = Float.T()
5055 components = List.T(String.T())
5058def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
5059 dsource_cache = {}
5060 tcounters = list(range(6))
5062 store_ids = set()
5063 sources = set()
5064 targets = set()
5066 for itarget, target in enumerate(ptargets):
5067 target._id = itarget
5069 for w in work:
5070 _, _, isources, itargets = w
5072 sources.update([psources[isource] for isource in isources])
5073 targets.update([ptargets[itarget] for itarget in itargets])
5075 store_ids = set([t.store_id for t in targets])
5077 for isource, source in enumerate(psources):
5079 components = set()
5080 for itarget, target in enumerate(targets):
5081 rule = engine.get_rule(source, target)
5082 components.update(rule.required_components(target))
5084 for store_id in store_ids:
5085 store_targets = [t for t in targets if t.store_id == store_id]
5087 sample_rates = set([t.sample_rate for t in store_targets])
5088 interpolations = set([t.interpolation for t in store_targets])
5090 base_seismograms = []
5091 store_targets_out = []
5093 for samp_rate in sample_rates:
5094 for interp in interpolations:
5095 engine_targets = [
5096 t for t in store_targets if t.sample_rate == samp_rate
5097 and t.interpolation == interp]
5099 if not engine_targets:
5100 continue
5102 store_targets_out += engine_targets
5104 base_seismograms += engine.base_seismograms(
5105 source,
5106 engine_targets,
5107 components,
5108 dsource_cache,
5109 nthreads)
5111 for iseis, seismogram in enumerate(base_seismograms):
5112 for tr in seismogram.values():
5113 if tr.err != store.SeismosizerErrorEnum.SUCCESS:
5114 e = SeismosizerError(
5115 'Seismosizer failed with return code %i\n%s' % (
5116 tr.err, str(
5117 OutOfBoundsContext(
5118 source=source,
5119 target=store_targets[iseis],
5120 distance=source.distance_to(
5121 store_targets[iseis]),
5122 components=components))))
5123 raise e
5125 for seismogram, target in zip(base_seismograms, store_targets_out):
5127 try:
5128 result = engine._post_process_dynamic(
5129 seismogram, source, target)
5130 except SeismosizerError as e:
5131 result = e
5133 yield (isource, target._id, result), tcounters
5136def process_dynamic(work, psources, ptargets, engine, nthreads=0):
5137 dsource_cache = {}
5139 for w in work:
5140 _, _, isources, itargets = w
5142 sources = [psources[isource] for isource in isources]
5143 targets = [ptargets[itarget] for itarget in itargets]
5145 components = set()
5146 for target in targets:
5147 rule = engine.get_rule(sources[0], target)
5148 components.update(rule.required_components(target))
5150 for isource, source in zip(isources, sources):
5151 for itarget, target in zip(itargets, targets):
5153 try:
5154 base_seismogram, tcounters = engine.base_seismogram(
5155 source, target, components, dsource_cache, nthreads)
5156 except meta.OutOfBounds as e:
5157 e.context = OutOfBoundsContext(
5158 source=sources[0],
5159 target=targets[0],
5160 distance=sources[0].distance_to(targets[0]),
5161 components=components)
5162 raise
5164 n_records_stacked = 0
5165 t_optimize = 0.0
5166 t_stack = 0.0
5168 for _, tr in base_seismogram.items():
5169 n_records_stacked += tr.n_records_stacked
5170 t_optimize += tr.t_optimize
5171 t_stack += tr.t_stack
5173 try:
5174 result = engine._post_process_dynamic(
5175 base_seismogram, source, target)
5176 result.n_records_stacked = n_records_stacked
5177 result.n_shared_stacking = len(sources) *\
5178 len(targets)
5179 result.t_optimize = t_optimize
5180 result.t_stack = t_stack
5181 except SeismosizerError as e:
5182 result = e
5184 tcounters.append(xtime())
5185 yield (isource, itarget, result), tcounters
5188def process_static(work, psources, ptargets, engine, nthreads=0):
5189 for w in work:
5190 _, _, isources, itargets = w
5192 sources = [psources[isource] for isource in isources]
5193 targets = [ptargets[itarget] for itarget in itargets]
5195 for isource, source in zip(isources, sources):
5196 for itarget, target in zip(itargets, targets):
5197 components = engine.get_rule(source, target)\
5198 .required_components(target)
5200 try:
5201 base_statics, tcounters = engine.base_statics(
5202 source, target, components, nthreads)
5203 except meta.OutOfBounds as e:
5204 e.context = OutOfBoundsContext(
5205 source=sources[0],
5206 target=targets[0],
5207 distance=float('nan'),
5208 components=components)
5209 raise
5210 result = engine._post_process_statics(
5211 base_statics, source, target)
5212 tcounters.append(xtime())
5214 yield (isource, itarget, result), tcounters
5217class LocalEngine(Engine):
5218 '''
5219 Offline synthetic seismogram calculator.
5221 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and
5222 :py:attr:`store_dirs` with paths set in environment variables
5223 GF_STORE_SUPERDIRS and GF_STORE_DIRS.
5224 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and
5225 :py:attr:`store_dirs` with paths set in the user's config file.
5227 The config file can be found at :file:`~/.pyrocko/config.pf`
5229 .. code-block :: python
5231 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
5232 gf_store_superdirs: ['/home/pyrocko/gf_stores/']
5233 '''
5235 store_superdirs = List.T(
5236 String.T(),
5237 help="directories which are searched for Green's function stores")
5239 store_dirs = List.T(
5240 String.T(),
5241 help="additional individual Green's function store directories")
5243 default_store_id = String.T(
5244 optional=True,
5245 help='default store ID to be used when a request does not provide '
5246 'one')
5248 def __init__(self, **kwargs):
5249 use_env = kwargs.pop('use_env', False)
5250 use_config = kwargs.pop('use_config', False)
5251 Engine.__init__(self, **kwargs)
5252 if use_env:
5253 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
5254 env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
5255 if env_store_superdirs:
5256 self.store_superdirs.extend(env_store_superdirs.split(':'))
5258 if env_store_dirs:
5259 self.store_dirs.extend(env_store_dirs.split(':'))
5261 if use_config:
5262 c = config.config()
5263 self.store_superdirs.extend(c.gf_store_superdirs)
5264 self.store_dirs.extend(c.gf_store_dirs)
5266 self._check_store_dirs_type()
5267 self._id_to_store_dir = {}
5268 self._open_stores = {}
5269 self._effective_default_store_id = None
5271 def _check_store_dirs_type(self):
5272 for sdir in ['store_dirs', 'store_superdirs']:
5273 if not isinstance(self.__getattribute__(sdir), list):
5274 raise TypeError('{} of {} is not of type list'.format(
5275 sdir, self.__class__.__name__))
5277 def _get_store_id(self, store_dir):
5278 store_ = store.Store(store_dir)
5279 store_id = store_.config.id
5280 store_.close()
5281 return store_id
5283 def _looks_like_store_dir(self, store_dir):
5284 return os.path.isdir(store_dir) and \
5285 all(os.path.isfile(pjoin(store_dir, x)) for x in
5286 ('index', 'traces', 'config'))
5288 def iter_store_dirs(self):
5289 store_dirs = set()
5290 for d in self.store_superdirs:
5291 if not os.path.exists(d):
5292 logger.warning('store_superdir not available: %s' % d)
5293 continue
5295 for entry in os.listdir(d):
5296 store_dir = os.path.realpath(pjoin(d, entry))
5297 if self._looks_like_store_dir(store_dir):
5298 store_dirs.add(store_dir)
5300 for store_dir in self.store_dirs:
5301 store_dirs.add(os.path.realpath(store_dir))
5303 return store_dirs
5305 def _scan_stores(self):
5306 for store_dir in self.iter_store_dirs():
5307 store_id = self._get_store_id(store_dir)
5308 if store_id not in self._id_to_store_dir:
5309 self._id_to_store_dir[store_id] = store_dir
5310 else:
5311 if store_dir != self._id_to_store_dir[store_id]:
5312 raise DuplicateStoreId(
5313 'GF store ID %s is used in (at least) two '
5314 'different stores. Locations are: %s and %s' %
5315 (store_id, self._id_to_store_dir[store_id], store_dir))
5317 def get_store_dir(self, store_id):
5318 '''
5319 Lookup directory given a GF store ID.
5320 '''
5322 if store_id not in self._id_to_store_dir:
5323 self._scan_stores()
5325 if store_id not in self._id_to_store_dir:
5326 raise NoSuchStore(store_id, self.iter_store_dirs())
5328 return self._id_to_store_dir[store_id]
5330 def get_store_ids(self):
5331 '''
5332 Get list of available store IDs.
5333 '''
5335 self._scan_stores()
5336 return sorted(self._id_to_store_dir.keys())
5338 def effective_default_store_id(self):
5339 if self._effective_default_store_id is None:
5340 if self.default_store_id is None:
5341 store_ids = self.get_store_ids()
5342 if len(store_ids) == 1:
5343 self._effective_default_store_id = self.get_store_ids()[0]
5344 else:
5345 raise NoDefaultStoreSet()
5346 else:
5347 self._effective_default_store_id = self.default_store_id
5349 return self._effective_default_store_id
5351 def get_store(self, store_id=None):
5352 '''
5353 Get a store from the engine.
5355 :param store_id: identifier of the store (optional)
5356 :returns: :py:class:`~pyrocko.gf.store.Store` object
5358 If no ``store_id`` is provided the store
5359 associated with the :py:gattr:`default_store_id` is returned.
5360 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
5361 undefined.
5362 '''
5364 if store_id is None:
5365 store_id = self.effective_default_store_id()
5367 if store_id not in self._open_stores:
5368 store_dir = self.get_store_dir(store_id)
5369 self._open_stores[store_id] = store.Store(store_dir)
5371 return self._open_stores[store_id]
5373 def get_store_config(self, store_id):
5374 store = self.get_store(store_id)
5375 return store.config
5377 def get_store_extra(self, store_id, key):
5378 store = self.get_store(store_id)
5379 return store.get_extra(key)
5381 def close_cashed_stores(self):
5382 '''
5383 Close and remove ids from cashed stores.
5384 '''
5385 store_ids = []
5386 for store_id, store_ in self._open_stores.items():
5387 store_.close()
5388 store_ids.append(store_id)
5390 for store_id in store_ids:
5391 self._open_stores.pop(store_id)
5393 def get_rule(self, source, target):
5394 cprovided = self.get_store(target.store_id).get_provided_components()
5396 if isinstance(target, StaticTarget):
5397 quantity = target.quantity
5398 available_rules = static_rules
5399 elif isinstance(target, Target):
5400 quantity = target.effective_quantity()
5401 available_rules = channel_rules
5403 try:
5404 for rule in available_rules[quantity]:
5405 cneeded = rule.required_components(target)
5406 if all(c in cprovided for c in cneeded):
5407 return rule
5409 except KeyError:
5410 pass
5412 raise BadRequest(
5413 'No rule to calculate "%s" with GFs from store "%s" '
5414 'for source model "%s".' % (
5415 target.effective_quantity(),
5416 target.store_id,
5417 source.__class__.__name__))
5419 def _cached_discretize_basesource(self, source, store, cache, target):
5420 if (source, store) not in cache:
5421 cache[source, store] = source.discretize_basesource(store, target)
5423 return cache[source, store]
5425 def base_seismograms(self, source, targets, components, dsource_cache,
5426 nthreads=0):
5428 target = targets[0]
5430 interp = set([t.interpolation for t in targets])
5431 if len(interp) > 1:
5432 raise BadRequest('Targets have different interpolation schemes.')
5434 rates = set([t.sample_rate for t in targets])
5435 if len(rates) > 1:
5436 raise BadRequest('Targets have different sample rates.')
5438 store_ = self.get_store(target.store_id)
5439 receivers = [t.receiver(store_) for t in targets]
5441 if target.sample_rate is not None:
5442 deltat = 1. / target.sample_rate
5443 rate = target.sample_rate
5444 else:
5445 deltat = None
5446 rate = store_.config.sample_rate
5448 tmin = num.fromiter(
5449 (t.tmin for t in targets), dtype=float, count=len(targets))
5450 tmax = num.fromiter(
5451 (t.tmax for t in targets), dtype=float, count=len(targets))
5453 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax))
5455 itmin = num.zeros_like(tmin, dtype=num.int64)
5456 itmax = num.zeros_like(tmin, dtype=num.int64)
5457 nsamples = num.full_like(tmin, -1, dtype=num.int64)
5459 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64)
5460 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64)
5461 nsamples = itmax - itmin + 1
5462 nsamples[num.logical_not(mask)] = -1
5464 base_source = self._cached_discretize_basesource(
5465 source, store_, dsource_cache, target)
5467 base_seismograms = store_.calc_seismograms(
5468 base_source, receivers, components,
5469 deltat=deltat,
5470 itmin=itmin, nsamples=nsamples,
5471 interpolation=target.interpolation,
5472 optimization=target.optimization,
5473 nthreads=nthreads)
5475 for i, base_seismogram in enumerate(base_seismograms):
5476 base_seismograms[i] = store.make_same_span(base_seismogram)
5478 return base_seismograms
5480 def base_seismogram(self, source, target, components, dsource_cache,
5481 nthreads):
5483 tcounters = [xtime()]
5485 store_ = self.get_store(target.store_id)
5486 receiver = target.receiver(store_)
5488 if target.tmin and target.tmax is not None:
5489 rate = store_.config.sample_rate
5490 itmin = int(num.floor(target.tmin * rate))
5491 itmax = int(num.ceil(target.tmax * rate))
5492 nsamples = itmax - itmin + 1
5493 else:
5494 itmin = None
5495 nsamples = None
5497 tcounters.append(xtime())
5498 base_source = self._cached_discretize_basesource(
5499 source, store_, dsource_cache, target)
5501 tcounters.append(xtime())
5503 if target.sample_rate is not None:
5504 deltat = 1. / target.sample_rate
5505 else:
5506 deltat = None
5508 base_seismogram = store_.seismogram(
5509 base_source, receiver, components,
5510 deltat=deltat,
5511 itmin=itmin, nsamples=nsamples,
5512 interpolation=target.interpolation,
5513 optimization=target.optimization,
5514 nthreads=nthreads)
5516 tcounters.append(xtime())
5518 base_seismogram = store.make_same_span(base_seismogram)
5520 tcounters.append(xtime())
5522 return base_seismogram, tcounters
5524 def base_statics(self, source, target, components, nthreads):
5525 tcounters = [xtime()]
5526 store_ = self.get_store(target.store_id)
5528 if target.tsnapshot is not None:
5529 rate = store_.config.sample_rate
5530 itsnapshot = int(num.floor(target.tsnapshot * rate))
5531 else:
5532 itsnapshot = None
5533 tcounters.append(xtime())
5535 base_source = source.discretize_basesource(store_, target=target)
5537 tcounters.append(xtime())
5539 base_statics = store_.statics(
5540 base_source,
5541 target,
5542 itsnapshot,
5543 components,
5544 target.interpolation,
5545 nthreads)
5547 tcounters.append(xtime())
5549 return base_statics, tcounters
5551 def _post_process_dynamic(self, base_seismogram, source, target):
5552 base_any = next(iter(base_seismogram.values()))
5553 deltat = base_any.deltat
5554 itmin = base_any.itmin
5556 rule = self.get_rule(source, target)
5557 data = rule.apply_(target, base_seismogram)
5559 factor = source.get_factor() * target.get_factor()
5560 if factor != 1.0:
5561 data = data * factor
5563 stf = source.effective_stf_post()
5565 times, amplitudes = stf.discretize_t(
5566 deltat, 0.0)
5568 # repeat end point to prevent boundary effects
5569 padded_data = num.empty(data.size + amplitudes.size, dtype=float)
5570 padded_data[:data.size] = data
5571 padded_data[data.size:] = data[-1]
5572 data = num.convolve(amplitudes, padded_data)
5574 tmin = itmin * deltat + times[0]
5576 tr = meta.SeismosizerTrace(
5577 codes=target.codes,
5578 data=data[:-amplitudes.size],
5579 deltat=deltat,
5580 tmin=tmin)
5582 return target.post_process(self, source, tr)
5584 def _post_process_statics(self, base_statics, source, starget):
5585 rule = self.get_rule(source, starget)
5586 data = rule.apply_(starget, base_statics)
5588 factor = source.get_factor()
5589 if factor != 1.0:
5590 for v in data.values():
5591 v *= factor
5593 return starget.post_process(self, source, base_statics)
5595 def process(self, *args, **kwargs):
5596 '''
5597 Process a request.
5599 ::
5601 process(**kwargs)
5602 process(request, **kwargs)
5603 process(sources, targets, **kwargs)
5605 The request can be given a a :py:class:`Request` object, or such an
5606 object is created using ``Request(**kwargs)`` for convenience.
5608 :returns: :py:class:`Response` object
5609 '''
5611 if len(args) not in (0, 1, 2):
5612 raise BadRequest('Invalid arguments.')
5614 if len(args) == 1:
5615 kwargs['request'] = args[0]
5617 elif len(args) == 2:
5618 kwargs.update(Request.args2kwargs(args))
5620 request = kwargs.pop('request', None)
5621 status_callback = kwargs.pop('status_callback', None)
5622 calc_timeseries = kwargs.pop('calc_timeseries', True)
5624 nprocs = kwargs.pop('nprocs', None)
5625 nthreads = kwargs.pop('nthreads', 1)
5626 if nprocs is not None:
5627 nthreads = nprocs
5629 if request is None:
5630 request = Request(**kwargs)
5632 if resource:
5633 rs0 = resource.getrusage(resource.RUSAGE_SELF)
5634 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
5635 tt0 = xtime()
5637 # make sure stores are open before fork()
5638 store_ids = set(target.store_id for target in request.targets)
5639 for store_id in store_ids:
5640 self.get_store(store_id)
5642 source_index = dict((x, i) for (i, x) in
5643 enumerate(request.sources))
5644 target_index = dict((x, i) for (i, x) in
5645 enumerate(request.targets))
5647 m = request.subrequest_map()
5649 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
5650 results_list = []
5652 for i in range(len(request.sources)):
5653 results_list.append([None] * len(request.targets))
5655 tcounters_dyn_list = []
5656 tcounters_static_list = []
5657 nsub = len(skeys)
5658 isub = 0
5660 # Processing dynamic targets through
5661 # parimap(process_subrequest_dynamic)
5663 if calc_timeseries:
5664 _process_dynamic = process_dynamic_timeseries
5665 else:
5666 _process_dynamic = process_dynamic
5668 if request.has_dynamic:
5669 work_dynamic = [
5670 (i, nsub,
5671 [source_index[source] for source in m[k][0]],
5672 [target_index[target] for target in m[k][1]
5673 if not isinstance(target, StaticTarget)])
5674 for (i, k) in enumerate(skeys)]
5676 for ii_results, tcounters_dyn in _process_dynamic(
5677 work_dynamic, request.sources, request.targets, self,
5678 nthreads):
5680 tcounters_dyn_list.append(num.diff(tcounters_dyn))
5681 isource, itarget, result = ii_results
5682 results_list[isource][itarget] = result
5684 if status_callback:
5685 status_callback(isub, nsub)
5687 isub += 1
5689 # Processing static targets through process_static
5690 if request.has_statics:
5691 work_static = [
5692 (i, nsub,
5693 [source_index[source] for source in m[k][0]],
5694 [target_index[target] for target in m[k][1]
5695 if isinstance(target, StaticTarget)])
5696 for (i, k) in enumerate(skeys)]
5698 for ii_results, tcounters_static in process_static(
5699 work_static, request.sources, request.targets, self,
5700 nthreads=nthreads):
5702 tcounters_static_list.append(num.diff(tcounters_static))
5703 isource, itarget, result = ii_results
5704 results_list[isource][itarget] = result
5706 if status_callback:
5707 status_callback(isub, nsub)
5709 isub += 1
5711 if status_callback:
5712 status_callback(nsub, nsub)
5714 tt1 = time.time()
5715 if resource:
5716 rs1 = resource.getrusage(resource.RUSAGE_SELF)
5717 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
5719 s = ProcessingStats()
5721 if request.has_dynamic:
5722 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
5723 t_dyn = float(num.sum(tcumu_dyn))
5724 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
5725 (s.t_perc_get_store_and_receiver,
5726 s.t_perc_discretize_source,
5727 s.t_perc_make_base_seismogram,
5728 s.t_perc_make_same_span,
5729 s.t_perc_post_process) = perc_dyn
5730 else:
5731 t_dyn = 0.
5733 if request.has_statics:
5734 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
5735 t_static = num.sum(tcumu_static)
5736 perc_static = map(float, tcumu_static / t_static * 100.)
5737 (s.t_perc_static_get_store,
5738 s.t_perc_static_discretize_basesource,
5739 s.t_perc_static_sum_statics,
5740 s.t_perc_static_post_process) = perc_static
5742 s.t_wallclock = tt1 - tt0
5743 if resource:
5744 s.t_cpu = (
5745 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
5746 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
5747 s.n_read_blocks = (
5748 (rs1.ru_inblock + rc1.ru_inblock) -
5749 (rs0.ru_inblock + rc0.ru_inblock))
5751 n_records_stacked = 0.
5752 for results in results_list:
5753 for result in results:
5754 if not isinstance(result, meta.Result):
5755 continue
5756 shr = float(result.n_shared_stacking)
5757 n_records_stacked += result.n_records_stacked / shr
5758 s.t_perc_optimize += result.t_optimize / shr
5759 s.t_perc_stack += result.t_stack / shr
5760 s.n_records_stacked = int(n_records_stacked)
5761 if t_dyn != 0.:
5762 s.t_perc_optimize /= t_dyn * 100
5763 s.t_perc_stack /= t_dyn * 100
5765 return Response(
5766 request=request,
5767 results_list=results_list,
5768 stats=s)
5771class RemoteEngine(Engine):
5772 '''
5773 Client for remote synthetic seismogram calculator.
5774 '''
5776 site = String.T(default=ws.g_default_site, optional=True)
5777 url = String.T(default=ws.g_url, optional=True)
5779 def process(self, request=None, status_callback=None, **kwargs):
5781 if request is None:
5782 request = Request(**kwargs)
5784 return ws.seismosizer(url=self.url, site=self.site, request=request)
5787g_engine = None
5790def get_engine(store_superdirs=[]):
5791 global g_engine
5792 if g_engine is None:
5793 g_engine = LocalEngine(use_env=True, use_config=True)
5795 for d in store_superdirs:
5796 if d not in g_engine.store_superdirs:
5797 g_engine.store_superdirs.append(d)
5799 return g_engine
5802class SourceGroup(Object):
5804 def __getattr__(self, k):
5805 return num.fromiter((getattr(s, k) for s in self),
5806 dtype=float)
5808 def __iter__(self):
5809 raise NotImplementedError(
5810 'This method should be implemented in subclass.')
5812 def __len__(self):
5813 raise NotImplementedError(
5814 'This method should be implemented in subclass.')
5817class SourceList(SourceGroup):
5818 sources = List.T(Source.T())
5820 def append(self, s):
5821 self.sources.append(s)
5823 def __iter__(self):
5824 return iter(self.sources)
5826 def __len__(self):
5827 return len(self.sources)
5830class SourceGrid(SourceGroup):
5832 base = Source.T()
5833 variables = Dict.T(String.T(), Range.T())
5834 order = List.T(String.T())
5836 def __len__(self):
5837 n = 1
5838 for (k, v) in self.make_coords(self.base):
5839 n *= len(list(v))
5841 return n
5843 def __iter__(self):
5844 for items in permudef(self.make_coords(self.base)):
5845 s = self.base.clone(**{k: v for (k, v) in items})
5846 s.regularize()
5847 yield s
5849 def ordered_params(self):
5850 ks = list(self.variables.keys())
5851 for k in self.order + list(self.base.keys()):
5852 if k in ks:
5853 yield k
5854 ks.remove(k)
5855 if ks:
5856 raise Exception('Invalid parameter "%s" for source type "%s".' %
5857 (ks[0], self.base.__class__.__name__))
5859 def make_coords(self, base):
5860 return [(param, self.variables[param].make(base=base[param]))
5861 for param in self.ordered_params()]
5864source_classes = [
5865 Source,
5866 SourceWithMagnitude,
5867 SourceWithDerivedMagnitude,
5868 ExplosionSource,
5869 RectangularExplosionSource,
5870 DCSource,
5871 CLVDSource,
5872 VLVDSource,
5873 MTSource,
5874 RectangularSource,
5875 PseudoDynamicRupture,
5876 DoubleDCSource,
5877 RingfaultSource,
5878 CombiSource,
5879 SFSource,
5880 PorePressurePointSource,
5881 PorePressureLineSource,
5882]
5884stf_classes = [
5885 STF,
5886 BoxcarSTF,
5887 TriangularSTF,
5888 HalfSinusoidSTF,
5889 ResonatorSTF,
5890 TremorSTF,
5891]
5893__all__ = '''
5894SeismosizerError
5895BadRequest
5896NoSuchStore
5897DerivedMagnitudeError
5898STFMode
5899'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
5900Request
5901ProcessingStats
5902Response
5903Engine
5904LocalEngine
5905RemoteEngine
5906source_classes
5907get_engine
5908Range
5909SourceGroup
5910SourceList
5911SourceGrid
5912map_anchor
5913'''.split()