1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
10.. _coordinate-system-names:
12Coordinate systems
13..................
15Coordinate system names commonly used in source models.
17================= ============================================
18Name Description
19================= ============================================
20``'xyz'`` northing, easting, depth in [m]
21``'xy'`` northing, easting in [m]
22``'latlon'`` latitude, longitude in [deg]
23``'lonlat'`` longitude, latitude in [deg]
24``'latlondepth'`` latitude, longitude in [deg], depth in [m]
25================= ============================================
26'''
28from collections import defaultdict
29from functools import cmp_to_key
30import time
31import math
32import os
33import re
34import logging
35try:
36 import resource
37except ImportError:
38 resource = None
39from hashlib import sha1
41import numpy as num
42from scipy.interpolate import RegularGridInterpolator
44from pyrocko.guts import (Object, Float, String, StringChoice, List,
45 Timestamp, Int, SObject, ArgumentError, Dict,
46 ValidationError, Bool)
47from pyrocko.guts_array import Array
49from pyrocko import moment_tensor as pmt
50from pyrocko import trace, util, config, model, eikonal_ext
51from pyrocko.orthodrome import ne_to_latlon
52from pyrocko.model import Location
53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \
54 okada_ext, invert_fault_dislocations_bem
56from . import meta, store, ws
57from .tractions import TractionField, DirectedTractions
58from .targets import Target, StaticTarget, SatelliteTarget
60pjoin = os.path.join
62guts_prefix = 'pf'
64d2r = math.pi / 180.
65r2d = 180. / math.pi
66km = 1e3
68logger = logging.getLogger('pyrocko.gf.seismosizer')
71def cmp_none_aware(a, b):
72 if isinstance(a, tuple) and isinstance(b, tuple):
73 for xa, xb in zip(a, b):
74 rv = cmp_none_aware(xa, xb)
75 if rv != 0:
76 return rv
78 return 0
80 anone = a is None
81 bnone = b is None
83 if anone and bnone:
84 return 0
86 if anone:
87 return -1
89 if bnone:
90 return 1
92 return bool(a > b) - bool(a < b)
95def xtime():
96 return time.time()
99class SeismosizerError(Exception):
100 pass
103class BadRequest(SeismosizerError):
104 pass
107class DuplicateStoreId(Exception):
108 pass
111class NoDefaultStoreSet(Exception):
112 pass
115class ConversionError(Exception):
116 pass
119class NoSuchStore(BadRequest):
121 def __init__(self, store_id=None, dirs=None):
122 BadRequest.__init__(self)
123 self.store_id = store_id
124 self.dirs = dirs
126 def __str__(self):
127 if self.store_id is not None:
128 rstr = 'no GF store with id "%s" found.' % self.store_id
129 else:
130 rstr = 'GF store not found.'
132 if self.dirs is not None:
133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
134 return rstr
137def ufloat(s):
138 units = {
139 'k': 1e3,
140 'M': 1e6,
141 }
143 factor = 1.0
144 if s and s[-1] in units:
145 factor = units[s[-1]]
146 s = s[:-1]
147 if not s:
148 raise ValueError("unit without a number: '%s'" % s)
150 return float(s) * factor
153def ufloat_or_none(s):
154 if s:
155 return ufloat(s)
156 else:
157 return None
160def int_or_none(s):
161 if s:
162 return int(s)
163 else:
164 return None
167def nonzero(x, eps=1e-15):
168 return abs(x) > eps
171def permudef(ln, j=0):
172 if j < len(ln):
173 k, v = ln[j]
174 for y in v:
175 ln[j] = k, y
176 for s in permudef(ln, j + 1):
177 yield s
179 ln[j] = k, v
180 return
181 else:
182 yield ln
185def arr(x):
186 return num.atleast_1d(num.asarray(x))
189def discretize_rect_source(deltas, deltat, time, north, east, depth,
190 strike, dip, length, width,
191 anchor, velocity=None, stf=None,
192 nucleation_x=None, nucleation_y=None,
193 decimation_factor=1, pointsonly=False,
194 plane_coords=False,
195 aggressive_oversampling=False):
197 if stf is None:
198 stf = STF()
200 if not velocity and not pointsonly:
201 raise AttributeError('velocity is required in time mode')
203 mindeltagf = float(num.min(deltas))
204 if velocity:
205 mindeltagf = min(mindeltagf, deltat * velocity)
207 ln = length
208 wd = width
210 if aggressive_oversampling:
211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
213 else:
214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
217 n = int(nl * nw)
219 dl = ln / nl
220 dw = wd / nw
222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
225 points = num.zeros((n, 3))
226 points[:, 0] = num.tile(xl, nw)
227 points[:, 1] = num.repeat(xw, nl)
229 if nucleation_x is not None:
230 dist_x = num.abs(nucleation_x - points[:, 0])
231 else:
232 dist_x = num.zeros(n)
234 if nucleation_y is not None:
235 dist_y = num.abs(nucleation_y - points[:, 1])
236 else:
237 dist_y = num.zeros(n)
239 dist = num.sqrt(dist_x**2 + dist_y**2)
240 times = dist / velocity
242 anch_x, anch_y = map_anchor[anchor]
244 points[:, 0] -= anch_x * 0.5 * length
245 points[:, 1] -= anch_y * 0.5 * width
247 if plane_coords:
248 return points, dl, dw, nl, nw
250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
251 points = num.dot(rotmat.T, points.T).T
253 points[:, 0] += north
254 points[:, 1] += east
255 points[:, 2] += depth
257 if pointsonly:
258 return points, dl, dw, nl, nw
260 xtau, amplitudes = stf.discretize_t(deltat, time)
261 nt = xtau.size
263 points2 = num.repeat(points, nt, axis=0)
264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel()
265 amplitudes2 = num.tile(amplitudes, n)
267 return points2, times2, amplitudes2, dl, dw, nl, nw
270def check_rect_source_discretisation(points2, nl, nw, store):
271 # We assume a non-rotated fault plane
272 N_CRITICAL = 8
273 points = points2.T.reshape((3, nl, nw))
274 if points.size <= N_CRITICAL:
275 logger.warning('RectangularSource is defined by only %d sub-sources!'
276 % points.size)
277 return True
279 distances = num.sqrt(
280 (points[0, 0, :] - points[0, 1, :])**2 +
281 (points[1, 0, :] - points[1, 1, :])**2 +
282 (points[2, 0, :] - points[2, 1, :])**2)
284 depths = points[2, 0, :]
285 vs_profile = store.config.get_vs(
286 lat=0., lon=0.,
287 points=num.repeat(depths[:, num.newaxis], 3, axis=1),
288 interpolation='multilinear')
290 min_wavelength = vs_profile * (store.config.deltat * 2)
291 if not num.all(min_wavelength > distances / 2):
292 return False
293 return True
296def outline_rect_source(strike, dip, length, width, anchor):
297 ln = length
298 wd = width
299 points = num.array(
300 [[-0.5 * ln, -0.5 * wd, 0.],
301 [0.5 * ln, -0.5 * wd, 0.],
302 [0.5 * ln, 0.5 * wd, 0.],
303 [-0.5 * ln, 0.5 * wd, 0.],
304 [-0.5 * ln, -0.5 * wd, 0.]])
306 anch_x, anch_y = map_anchor[anchor]
307 points[:, 0] -= anch_x * 0.5 * length
308 points[:, 1] -= anch_y * 0.5 * width
310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
312 return num.dot(rotmat.T, points.T).T
315def from_plane_coords(
316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
317 lat=0., lon=0.,
318 north_shift=0, east_shift=0,
319 anchor='top', cs='xy'):
321 ln = length
322 wd = width
323 x_abs = []
324 y_abs = []
325 if not isinstance(x_plane_coords, list):
326 x_plane_coords = [x_plane_coords]
327 y_plane_coords = [y_plane_coords]
329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
330 points = num.array(
331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]])
337 anch_x, anch_y = map_anchor[anchor]
338 points[:, 0] -= anch_x * 0.5 * length
339 points[:, 1] -= anch_y * 0.5 * width
341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
343 points = num.dot(rotmat.T, points.T).T
344 points[:, 0] += north_shift
345 points[:, 1] += east_shift
346 points[:, 2] += depth
347 if cs in ('latlon', 'lonlat'):
348 latlon = ne_to_latlon(lat, lon,
349 points[:, 0], points[:, 1])
350 latlon = num.array(latlon).T
351 x_abs.append(latlon[1:2, 1])
352 y_abs.append(latlon[2:3, 0])
353 if cs == 'xy':
354 x_abs.append(points[1:2, 1])
355 y_abs.append(points[2:3, 0])
357 if cs == 'lonlat':
358 return y_abs, x_abs
359 else:
360 return x_abs, y_abs
363def points_on_rect_source(
364 strike, dip, length, width, anchor,
365 discretized_basesource=None, points_x=None, points_y=None):
367 ln = length
368 wd = width
370 if isinstance(points_x, list) or isinstance(points_x, float):
371 points_x = num.array([points_x])
372 if isinstance(points_y, list) or isinstance(points_y, float):
373 points_y = num.array([points_y])
375 if discretized_basesource:
376 ds = discretized_basesource
378 nl_patches = ds.nl + 1
379 nw_patches = ds.nw + 1
381 npoints = nl_patches * nw_patches
382 points = num.zeros((npoints, 3))
383 ln_patches = num.array([il for il in range(nl_patches)])
384 wd_patches = num.array([iw for iw in range(nw_patches)])
386 points_ln =\
387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1
388 points_wd =\
389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1
391 for il in range(nl_patches):
392 for iw in range(nw_patches):
393 points[il * nw_patches + iw, :] = num.array([
394 points_ln[il] * ln * 0.5,
395 points_wd[iw] * wd * 0.5, 0.0])
397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0:
398 points = num.zeros(shape=((len(points_x), 3)))
399 for i, (x, y) in enumerate(zip(points_x, points_y)):
400 points[i, :] = num.array(
401 [x * 0.5 * ln, y * 0.5 * wd, 0.0])
403 anch_x, anch_y = map_anchor[anchor]
405 points[:, 0] -= anch_x * 0.5 * ln
406 points[:, 1] -= anch_y * 0.5 * wd
408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
410 return num.dot(rotmat.T, points.T).T
413class InvalidGridDef(Exception):
414 pass
417class Range(SObject):
418 '''
419 Convenient range specification.
421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
423 Range('0 .. 10k : 1k')
424 Range(start=0., stop=10e3, step=1e3)
425 Range(0, 10e3, 1e3)
426 Range('0 .. 10k @ 11')
427 Range(start=0., stop=10*km, n=11)
429 Range(0, 10e3, n=11)
430 Range(values=[x*1e3 for x in range(11)])
432 Depending on the use context, it can be possible to omit any part of the
433 specification. E.g. in the context of extracting a subset of an already
434 existing range, the existing range's specification values would be filled
435 in where missing.
437 The values are distributed with equal spacing, unless the ``spacing``
438 argument is modified. The values can be created offset or relative to an
439 external base value with the ``relative`` argument if the use context
440 supports this.
442 The range specification can be expressed with a short string
443 representation::
445 'start .. stop @ num | spacing, relative'
446 'start .. stop : step | spacing, relative'
448 most parts of the expression can be omitted if not needed. Whitespace is
449 allowed for readability but can also be omitted.
450 '''
452 start = Float.T(optional=True)
453 stop = Float.T(optional=True)
454 step = Float.T(optional=True)
455 n = Int.T(optional=True)
456 values = Array.T(optional=True, dtype=float, shape=(None,))
458 spacing = StringChoice.T(
459 choices=['lin', 'log', 'symlog'],
460 default='lin',
461 optional=True)
463 relative = StringChoice.T(
464 choices=['', 'add', 'mult'],
465 default='',
466 optional=True)
468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
470 r'(\|(?P<stuff>.+))?$')
472 def __init__(self, *args, **kwargs):
473 d = {}
474 if len(args) == 1:
475 d = self.parse(args[0])
476 elif len(args) in (2, 3):
477 d['start'], d['stop'] = [float(x) for x in args[:2]]
478 if len(args) == 3:
479 d['step'] = float(args[2])
481 for k, v in kwargs.items():
482 if k in d:
483 raise ArgumentError('%s specified more than once' % k)
485 d[k] = v
487 SObject.__init__(self, **d)
489 def __str__(self):
490 def sfloat(x):
491 if x is not None:
492 return '%g' % x
493 else:
494 return ''
496 if self.values:
497 return ','.join('%g' % x for x in self.values)
499 if self.start is None and self.stop is None:
500 s0 = ''
501 else:
502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
504 s1 = ''
505 if self.step is not None:
506 s1 = [' : %g', ':%g'][s0 == ''] % self.step
507 elif self.n is not None:
508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n
510 if self.spacing == 'lin' and self.relative == '':
511 s2 = ''
512 else:
513 x = []
514 if self.spacing != 'lin':
515 x.append(self.spacing)
517 if self.relative != '':
518 x.append(self.relative)
520 s2 = ' | %s' % ','.join(x)
522 return s0 + s1 + s2
524 @classmethod
525 def parse(cls, s):
526 s = re.sub(r'\s+', '', s)
527 m = cls.pattern.match(s)
528 if not m:
529 try:
530 vals = [ufloat(x) for x in s.split(',')]
531 except Exception:
532 raise InvalidGridDef(
533 '"%s" is not a valid range specification' % s)
535 return dict(values=num.array(vals, dtype=float))
537 d = m.groupdict()
538 try:
539 start = ufloat_or_none(d['start'])
540 stop = ufloat_or_none(d['stop'])
541 step = ufloat_or_none(d['step'])
542 n = int_or_none(d['n'])
543 except Exception:
544 raise InvalidGridDef(
545 '"%s" is not a valid range specification' % s)
547 spacing = 'lin'
548 relative = ''
550 if d['stuff'] is not None:
551 t = d['stuff'].split(',')
552 for x in t:
553 if x in cls.spacing.choices:
554 spacing = x
555 elif x and x in cls.relative.choices:
556 relative = x
557 else:
558 raise InvalidGridDef(
559 '"%s" is not a valid range specification' % s)
561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
562 relative=relative)
564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
565 if self.values:
566 return self.values
568 start = self.start
569 stop = self.stop
570 step = self.step
571 n = self.n
573 swap = step is not None and step < 0.
574 if start is None:
575 start = [mi, ma][swap]
576 if stop is None:
577 stop = [ma, mi][swap]
578 if step is None and inc is not None:
579 step = [inc, -inc][ma < mi]
581 if start is None or stop is None:
582 raise InvalidGridDef(
583 'Cannot use range specification "%s" without start '
584 'and stop in this context' % self)
586 if step is None and n is None:
587 step = stop - start
589 if n is None:
590 if (step < 0) != (stop - start < 0):
591 raise InvalidGridDef(
592 'Range specification "%s" has inconsistent ordering '
593 '(step < 0 => stop > start)' % self)
595 n = int(round((stop - start) / step)) + 1
596 stop2 = start + (n - 1) * step
597 if abs(stop - stop2) > eps:
598 n = int(math.floor((stop - start) / step)) + 1
599 stop = start + (n - 1) * step
600 else:
601 stop = stop2
603 if start == stop:
604 n = 1
606 if self.spacing == 'lin':
607 vals = num.linspace(start, stop, n)
609 elif self.spacing in ('log', 'symlog'):
610 if start > 0. and stop > 0.:
611 vals = num.exp(num.linspace(num.log(start),
612 num.log(stop), n))
613 elif start < 0. and stop < 0.:
614 vals = -num.exp(num.linspace(num.log(-start),
615 num.log(-stop), n))
616 else:
617 raise InvalidGridDef(
618 'Log ranges should not include or cross zero '
619 '(in range specification "%s").' % self)
621 if self.spacing == 'symlog':
622 nvals = - vals
623 vals = num.concatenate((nvals[::-1], vals))
625 if self.relative in ('add', 'mult') and base is None:
626 raise InvalidGridDef(
627 'Cannot use relative range specification in this context.')
629 vals = self.make_relative(base, vals)
631 return list(map(float, vals))
633 def make_relative(self, base, vals):
634 if self.relative == 'add':
635 vals += base
637 if self.relative == 'mult':
638 vals *= base
640 return vals
643class GridDefElement(Object):
645 param = meta.StringID.T()
646 rs = Range.T()
648 def __init__(self, shorthand=None, **kwargs):
649 if shorthand is not None:
650 t = shorthand.split('=')
651 if len(t) != 2:
652 raise InvalidGridDef(
653 'Invalid grid specification element: %s' % shorthand)
655 sp, sr = t[0].strip(), t[1].strip()
657 kwargs['param'] = sp
658 kwargs['rs'] = Range(sr)
660 Object.__init__(self, **kwargs)
662 def shorthand(self):
663 return self.param + ' = ' + str(self.rs)
666class GridDef(Object):
668 elements = List.T(GridDefElement.T())
670 def __init__(self, shorthand=None, **kwargs):
671 if shorthand is not None:
672 t = shorthand.splitlines()
673 tt = []
674 for x in t:
675 x = x.strip()
676 if x:
677 tt.extend(x.split(';'))
679 elements = []
680 for se in tt:
681 elements.append(GridDef(se))
683 kwargs['elements'] = elements
685 Object.__init__(self, **kwargs)
687 def shorthand(self):
688 return '; '.join(str(x) for x in self.elements)
691class Cloneable(object):
693 def __iter__(self):
694 return iter(self.T.propnames)
696 def __getitem__(self, k):
697 if k not in self.keys():
698 raise KeyError(k)
700 return getattr(self, k)
702 def __setitem__(self, k, v):
703 if k not in self.keys():
704 raise KeyError(k)
706 return setattr(self, k, v)
708 def clone(self, **kwargs):
709 '''
710 Make a copy of the object.
712 A new object of the same class is created and initialized with the
713 parameters of the object on which this method is called on. If
714 ``kwargs`` are given, these are used to override any of the
715 initialization parameters.
716 '''
718 d = dict(self)
719 for k in d:
720 v = d[k]
721 if isinstance(v, Cloneable):
722 d[k] = v.clone()
724 d.update(kwargs)
725 return self.__class__(**d)
727 @classmethod
728 def keys(cls):
729 '''
730 Get list of the source model's parameter names.
731 '''
733 return cls.T.propnames
736class STF(Object, Cloneable):
738 '''
739 Base class for source time functions.
740 '''
742 def __init__(self, effective_duration=None, **kwargs):
743 if effective_duration is not None:
744 kwargs['duration'] = effective_duration / \
745 self.factor_duration_to_effective()
747 Object.__init__(self, **kwargs)
749 @classmethod
750 def factor_duration_to_effective(cls):
751 return 1.0
753 def centroid_time(self, tref):
754 return tref
756 @property
757 def effective_duration(self):
758 return self.duration * self.factor_duration_to_effective()
760 def discretize_t(self, deltat, tref):
761 tl = math.floor(tref / deltat) * deltat
762 th = math.ceil(tref / deltat) * deltat
763 if tl == th:
764 return num.array([tl], dtype=float), num.ones(1)
765 else:
766 return (
767 num.array([tl, th], dtype=float),
768 num.array([th - tref, tref - tl], dtype=float) / deltat)
770 def base_key(self):
771 return (type(self).__name__,)
774g_unit_pulse = STF()
777def sshift(times, amplitudes, tshift, deltat):
779 t0 = math.floor(tshift / deltat) * deltat
780 t1 = math.ceil(tshift / deltat) * deltat
781 if t0 == t1:
782 return times, amplitudes
784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float)
786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
789 times2 = num.arange(times.size + 1, dtype=float) * \
790 deltat + times[0] + t0
792 return times2, amplitudes2
795class BoxcarSTF(STF):
797 '''
798 Boxcar type source time function.
800 .. figure :: /static/stf-BoxcarSTF.svg
801 :width: 40%
802 :align: center
803 :alt: boxcar source time function
804 '''
806 duration = Float.T(
807 default=0.0,
808 help='duration of the boxcar')
810 anchor = Float.T(
811 default=0.0,
812 help='anchor point with respect to source.time: ('
813 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
817 @classmethod
818 def factor_duration_to_effective(cls):
819 return 1.0
821 def centroid_time(self, tref):
822 return tref - 0.5 * self.duration * self.anchor
824 def discretize_t(self, deltat, tref):
825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
827 tmin = round(tmin_stf / deltat) * deltat
828 tmax = round(tmax_stf / deltat) * deltat
829 nt = int(round((tmax - tmin) / deltat)) + 1
830 times = num.linspace(tmin, tmax, nt)
831 amplitudes = num.ones_like(times)
832 if times.size > 1:
833 t_edges = num.linspace(
834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
835 t = tmin_stf + self.duration * num.array(
836 [0.0, 0.0, 1.0, 1.0], dtype=float)
837 f = num.array([0., 1., 1., 0.], dtype=float)
838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
839 amplitudes /= num.sum(amplitudes)
841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
843 return sshift(times, amplitudes, -tshift, deltat)
845 def base_key(self):
846 return (type(self).__name__, self.duration, self.anchor)
849class TriangularSTF(STF):
851 '''
852 Triangular type source time function.
854 .. figure :: /static/stf-TriangularSTF.svg
855 :width: 40%
856 :align: center
857 :alt: triangular source time function
858 '''
860 duration = Float.T(
861 default=0.0,
862 help='baseline of the triangle')
864 peak_ratio = Float.T(
865 default=0.5,
866 help='fraction of time compared to duration, '
867 'when the maximum amplitude is reached')
869 anchor = Float.T(
870 default=0.0,
871 help='anchor point with respect to source.time: ('
872 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
876 @classmethod
877 def factor_duration_to_effective(cls, peak_ratio=None):
878 if peak_ratio is None:
879 peak_ratio = cls.peak_ratio.default()
881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
883 def __init__(self, effective_duration=None, **kwargs):
884 if effective_duration is not None:
885 kwargs['duration'] = effective_duration / \
886 self.factor_duration_to_effective(
887 kwargs.get('peak_ratio', None))
889 STF.__init__(self, **kwargs)
891 @property
892 def centroid_ratio(self):
893 ra = self.peak_ratio
894 rb = 1.0 - ra
895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
897 def centroid_time(self, tref):
898 ca = self.centroid_ratio
899 cb = 1.0 - ca
900 if self.anchor <= 0.:
901 return tref - ca * self.duration * self.anchor
902 else:
903 return tref - cb * self.duration * self.anchor
905 @property
906 def effective_duration(self):
907 return self.duration * self.factor_duration_to_effective(
908 self.peak_ratio)
910 def tminmax_stf(self, tref):
911 ca = self.centroid_ratio
912 cb = 1.0 - ca
913 if self.anchor <= 0.:
914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
915 tmax_stf = tmin_stf + self.duration
916 else:
917 tmax_stf = tref + cb * self.duration * (1. - self.anchor)
918 tmin_stf = tmax_stf - self.duration
920 return tmin_stf, tmax_stf
922 def discretize_t(self, deltat, tref):
923 tmin_stf, tmax_stf = self.tminmax_stf(tref)
925 tmin = round(tmin_stf / deltat) * deltat
926 tmax = round(tmax_stf / deltat) * deltat
927 nt = int(round((tmax - tmin) / deltat)) + 1
928 if nt > 1:
929 t_edges = num.linspace(
930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
931 t = tmin_stf + self.duration * num.array(
932 [0.0, self.peak_ratio, 1.0], dtype=float)
933 f = num.array([0., 1., 0.], dtype=float)
934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
935 amplitudes /= num.sum(amplitudes)
936 else:
937 amplitudes = num.ones(1)
939 times = num.linspace(tmin, tmax, nt)
940 return times, amplitudes
942 def base_key(self):
943 return (
944 type(self).__name__, self.duration, self.peak_ratio, self.anchor)
947class HalfSinusoidSTF(STF):
949 '''
950 Half sinusoid type source time function.
952 .. figure :: /static/stf-HalfSinusoidSTF.svg
953 :width: 40%
954 :align: center
955 :alt: half-sinusouid source time function
956 '''
958 duration = Float.T(
959 default=0.0,
960 help='duration of the half-sinusoid (baseline)')
962 anchor = Float.T(
963 default=0.0,
964 help='anchor point with respect to source.time: ('
965 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
969 exponent = Int.T(
970 default=1,
971 help='set to 2 to use square of the half-period sinusoidal function.')
973 def __init__(self, effective_duration=None, **kwargs):
974 if effective_duration is not None:
975 kwargs['duration'] = effective_duration / \
976 self.factor_duration_to_effective(
977 kwargs.get('exponent', 1))
979 STF.__init__(self, **kwargs)
981 @classmethod
982 def factor_duration_to_effective(cls, exponent):
983 if exponent == 1:
984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
985 elif exponent == 2:
986 return math.sqrt(math.pi**2 - 6) / math.pi
987 else:
988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
990 @property
991 def effective_duration(self):
992 return self.duration * self.factor_duration_to_effective(self.exponent)
994 def centroid_time(self, tref):
995 return tref - 0.5 * self.duration * self.anchor
997 def discretize_t(self, deltat, tref):
998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1000 tmin = round(tmin_stf / deltat) * deltat
1001 tmax = round(tmax_stf / deltat) * deltat
1002 nt = int(round((tmax - tmin) / deltat)) + 1
1003 if nt > 1:
1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
1007 if self.exponent == 1:
1008 fint = -num.cos(
1009 (t_edges - tmin_stf) * (math.pi / self.duration))
1011 elif self.exponent == 2:
1012 fint = (t_edges - tmin_stf) / self.duration \
1013 - 1.0 / (2.0 * math.pi) * num.sin(
1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
1015 else:
1016 raise ValueError(
1017 'Exponent for HalfSinusoidSTF must be 1 or 2.')
1019 amplitudes = fint[1:] - fint[:-1]
1020 amplitudes /= num.sum(amplitudes)
1021 else:
1022 amplitudes = num.ones(1)
1024 times = num.linspace(tmin, tmax, nt)
1025 return times, amplitudes
1027 def base_key(self):
1028 return (type(self).__name__, self.duration, self.anchor)
1031class SmoothRampSTF(STF):
1032 '''
1033 Smooth-ramp type source time function for near-field displacement.
1034 Based on moment function of double-couple point source proposed by Bruestle
1035 and Mueller (PEPI, 1983).
1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
1038 earthquakes from Love-wave modelling for regional distances, PEPI 32,
1039 312-324.
1041 .. figure :: /static/stf-SmoothRampSTF.svg
1042 :width: 40%
1043 :alt: smooth ramp source time function
1044 '''
1045 duration = Float.T(
1046 default=0.0,
1047 help='duration of the ramp (baseline)')
1049 rise_ratio = Float.T(
1050 default=0.5,
1051 help='fraction of time compared to duration, '
1052 'when the maximum amplitude is reached')
1054 anchor = Float.T(
1055 default=0.0,
1056 help='anchor point with respect to source.time: ('
1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
1061 def discretize_t(self, deltat, tref):
1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1064 tmin = round(tmin_stf / deltat) * deltat
1065 tmax = round(tmax_stf / deltat) * deltat
1066 D = round((tmax - tmin) / deltat) * deltat
1067 nt = int(round(D / deltat)) + 1
1068 times = num.linspace(tmin, tmax, nt)
1069 if nt > 1:
1070 rise_time = self.rise_ratio * self.duration
1071 amplitudes = num.ones_like(times)
1072 tp = tmin + rise_time
1073 ii = num.where(times <= tp)
1074 t_inc = times[ii]
1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
1079 amplitudes /= num.sum(amplitudes)
1080 else:
1081 amplitudes = num.ones(1)
1083 return times, amplitudes
1085 def base_key(self):
1086 return (type(self).__name__,
1087 self.duration, self.rise_ratio, self.anchor)
1090class ResonatorSTF(STF):
1091 '''
1092 Simple resonator like source time function.
1094 .. math ::
1096 f(t) = 0 for t < 0
1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
1100 .. figure :: /static/stf-SmoothRampSTF.svg
1101 :width: 40%
1102 :alt: smooth ramp source time function
1104 '''
1106 duration = Float.T(
1107 default=0.0,
1108 help='decay time')
1110 frequency = Float.T(
1111 default=1.0,
1112 help='resonance frequency')
1114 def discretize_t(self, deltat, tref):
1115 tmin_stf = tref
1116 tmax_stf = tref + self.duration * 3
1117 tmin = math.floor(tmin_stf / deltat) * deltat
1118 tmax = math.ceil(tmax_stf / deltat) * deltat
1119 times = util.arange2(tmin, tmax, deltat)
1120 amplitudes = num.exp(-(times - tref) / self.duration) \
1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref))
1123 return times, amplitudes
1125 def base_key(self):
1126 return (type(self).__name__,
1127 self.duration, self.frequency)
1130class STFMode(StringChoice):
1131 choices = ['pre', 'post']
1134class Source(Location, Cloneable):
1135 '''
1136 Base class for all source models.
1137 '''
1139 name = String.T(optional=True, default='')
1141 time = Timestamp.T(
1142 default=Timestamp.D('1970-01-01 00:00:00'),
1143 help='source origin time.')
1145 stf = STF.T(
1146 optional=True,
1147 help='source time function.')
1149 stf_mode = STFMode.T(
1150 default='post',
1151 help='whether to apply source time function in pre or '
1152 'post-processing.')
1154 def __init__(self, **kwargs):
1155 Location.__init__(self, **kwargs)
1157 def update(self, **kwargs):
1158 '''
1159 Change some of the source models parameters.
1161 Example::
1163 >>> from pyrocko import gf
1164 >>> s = gf.DCSource()
1165 >>> s.update(strike=66., dip=33.)
1166 >>> print(s)
1167 --- !pf.DCSource
1168 depth: 0.0
1169 time: 1970-01-01 00:00:00
1170 magnitude: 6.0
1171 strike: 66.0
1172 dip: 33.0
1173 rake: 0.0
1175 '''
1177 for (k, v) in kwargs.items():
1178 self[k] = v
1180 def grid(self, **variables):
1181 '''
1182 Create grid of source model variations.
1184 :returns: :py:class:`SourceGrid` instance.
1186 Example::
1188 >>> from pyrocko import gf
1189 >>> base = DCSource()
1190 >>> R = gf.Range
1191 >>> for s in base.grid(R('
1193 '''
1194 return SourceGrid(base=self, variables=variables)
1196 def base_key(self):
1197 '''
1198 Get key to decide about source discretization / GF stack sharing.
1200 When two source models differ only in amplitude and origin time, the
1201 discretization and the GF stacking can be done only once for a unit
1202 amplitude and a zero origin time and the amplitude and origin times of
1203 the seismograms can be applied during post-processing of the synthetic
1204 seismogram.
1206 For any derived parameterized source model, this method is called to
1207 decide if discretization and stacking of the source should be shared.
1208 When two source models return an equal vector of values discretization
1209 is shared.
1210 '''
1211 return (self.depth, self.lat, self.north_shift,
1212 self.lon, self.east_shift, self.time, type(self).__name__) + \
1213 self.effective_stf_pre().base_key()
1215 def get_factor(self):
1216 '''
1217 Get the scaling factor to be applied during post-processing.
1219 Discretization of the base seismogram is usually done for a unit
1220 amplitude, because a common factor can be efficiently multiplied to
1221 final seismograms. This eliminates to do repeat the stacking when
1222 creating seismograms for a series of source models only differing in
1223 amplitude.
1225 This method should return the scaling factor to apply in the
1226 post-processing (often this is simply the scalar moment of the source).
1227 '''
1229 return 1.0
1231 def effective_stf_pre(self):
1232 '''
1233 Return the STF applied before stacking of the Green's functions.
1235 This STF is used during discretization of the parameterized source
1236 models, i.e. to produce a temporal distribution of point sources.
1238 Handling of the STF before stacking of the GFs is less efficient but
1239 allows to use different source time functions for different parts of
1240 the source.
1241 '''
1243 if self.stf is not None and self.stf_mode == 'pre':
1244 return self.stf
1245 else:
1246 return g_unit_pulse
1248 def effective_stf_post(self):
1249 '''
1250 Return the STF applied after stacking of the Green's fuctions.
1252 This STF is used in the post-processing of the synthetic seismograms.
1254 Handling of the STF after stacking of the GFs is usually more efficient
1255 but is only possible when a common STF is used for all subsources.
1256 '''
1258 if self.stf is not None and self.stf_mode == 'post':
1259 return self.stf
1260 else:
1261 return g_unit_pulse
1263 def _dparams_base(self):
1264 return dict(times=arr(self.time),
1265 lat=self.lat, lon=self.lon,
1266 north_shifts=arr(self.north_shift),
1267 east_shifts=arr(self.east_shift),
1268 depths=arr(self.depth))
1270 def _hash(self):
1271 sha = sha1()
1272 for k in self.base_key():
1273 sha.update(str(k).encode())
1274 return sha.hexdigest()
1276 def _dparams_base_repeated(self, times):
1277 if times is None:
1278 return self._dparams_base()
1280 nt = times.size
1281 north_shifts = num.repeat(self.north_shift, nt)
1282 east_shifts = num.repeat(self.east_shift, nt)
1283 depths = num.repeat(self.depth, nt)
1284 return dict(times=times,
1285 lat=self.lat, lon=self.lon,
1286 north_shifts=north_shifts,
1287 east_shifts=east_shifts,
1288 depths=depths)
1290 def pyrocko_event(self, store=None, target=None, **kwargs):
1291 duration = None
1292 if self.stf:
1293 duration = self.stf.effective_duration
1295 return model.Event(
1296 lat=self.lat,
1297 lon=self.lon,
1298 north_shift=self.north_shift,
1299 east_shift=self.east_shift,
1300 time=self.time,
1301 name=self.name,
1302 depth=self.depth,
1303 duration=duration,
1304 **kwargs)
1306 def geometry(self, **kwargs):
1307 raise NotImplementedError
1309 def outline(self, cs='xyz'):
1310 points = num.atleast_2d(num.zeros([1, 3]))
1312 points[:, 0] += self.north_shift
1313 points[:, 1] += self.east_shift
1314 points[:, 2] += self.depth
1315 if cs == 'xyz':
1316 return points
1317 elif cs == 'xy':
1318 return points[:, :2]
1319 elif cs in ('latlon', 'lonlat'):
1320 latlon = ne_to_latlon(
1321 self.lat, self.lon, points[:, 0], points[:, 1])
1323 latlon = num.array(latlon).T
1324 if cs == 'latlon':
1325 return latlon
1326 else:
1327 return latlon[:, ::-1]
1329 @classmethod
1330 def from_pyrocko_event(cls, ev, **kwargs):
1331 if ev.depth is None:
1332 raise ConversionError(
1333 'Cannot convert event object to source object: '
1334 'no depth information available')
1336 stf = None
1337 if ev.duration is not None:
1338 stf = HalfSinusoidSTF(effective_duration=ev.duration)
1340 d = dict(
1341 name=ev.name,
1342 time=ev.time,
1343 lat=ev.lat,
1344 lon=ev.lon,
1345 north_shift=ev.north_shift,
1346 east_shift=ev.east_shift,
1347 depth=ev.depth,
1348 stf=stf)
1349 d.update(kwargs)
1350 return cls(**d)
1352 def get_magnitude(self):
1353 raise NotImplementedError(
1354 '%s does not implement get_magnitude()'
1355 % self.__class__.__name__)
1358class SourceWithMagnitude(Source):
1359 '''
1360 Base class for sources containing a moment magnitude.
1361 '''
1363 magnitude = Float.T(
1364 default=6.0,
1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1367 def __init__(self, **kwargs):
1368 if 'moment' in kwargs:
1369 mom = kwargs.pop('moment')
1370 if 'magnitude' not in kwargs:
1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1373 Source.__init__(self, **kwargs)
1375 @property
1376 def moment(self):
1377 return float(pmt.magnitude_to_moment(self.magnitude))
1379 @moment.setter
1380 def moment(self, value):
1381 self.magnitude = float(pmt.moment_to_magnitude(value))
1383 def pyrocko_event(self, store=None, target=None, **kwargs):
1384 return Source.pyrocko_event(
1385 self, store, target,
1386 magnitude=self.magnitude,
1387 **kwargs)
1389 @classmethod
1390 def from_pyrocko_event(cls, ev, **kwargs):
1391 d = {}
1392 if ev.magnitude:
1393 d.update(magnitude=ev.magnitude)
1395 d.update(kwargs)
1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
1398 def get_magnitude(self):
1399 return self.magnitude
1402class DerivedMagnitudeError(ValidationError):
1403 pass
1406class SourceWithDerivedMagnitude(Source):
1408 class __T(Source.T):
1410 def validate_extra(self, val):
1411 Source.T.validate_extra(self, val)
1412 val.check_conflicts()
1414 def check_conflicts(self):
1415 '''
1416 Check for parameter conflicts.
1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
1419 on conflicts.
1420 '''
1421 pass
1423 def get_magnitude(self, store=None, target=None):
1424 raise DerivedMagnitudeError('No magnitude set.')
1426 def get_moment(self, store=None, target=None):
1427 return float(pmt.magnitude_to_moment(
1428 self.get_magnitude(store, target)))
1430 def pyrocko_moment_tensor(self, store=None, target=None):
1431 raise NotImplementedError(
1432 '%s does not implement pyrocko_moment_tensor()'
1433 % self.__class__.__name__)
1435 def pyrocko_event(self, store=None, target=None, **kwargs):
1436 try:
1437 mt = self.pyrocko_moment_tensor(store, target)
1438 magnitude = self.get_magnitude()
1439 except (DerivedMagnitudeError, NotImplementedError):
1440 mt = None
1441 magnitude = None
1443 return Source.pyrocko_event(
1444 self, store, target,
1445 moment_tensor=mt,
1446 magnitude=magnitude,
1447 **kwargs)
1450class ExplosionSource(SourceWithDerivedMagnitude):
1451 '''
1452 An isotropic explosion point source.
1453 '''
1455 magnitude = Float.T(
1456 optional=True,
1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1459 volume_change = Float.T(
1460 optional=True,
1461 help='volume change of the explosion/implosion or '
1462 'the contracting/extending magmatic source. [m^3]')
1464 discretized_source_class = meta.DiscretizedExplosionSource
1466 def __init__(self, **kwargs):
1467 if 'moment' in kwargs:
1468 mom = kwargs.pop('moment')
1469 if 'magnitude' not in kwargs:
1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1472 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1474 def base_key(self):
1475 return SourceWithDerivedMagnitude.base_key(self) + \
1476 (self.volume_change,)
1478 def check_conflicts(self):
1479 if self.magnitude is not None and self.volume_change is not None:
1480 raise DerivedMagnitudeError(
1481 'Magnitude and volume_change are both defined.')
1483 def get_magnitude(self, store=None, target=None):
1484 self.check_conflicts()
1486 if self.magnitude is not None:
1487 return self.magnitude
1489 elif self.volume_change is not None:
1490 moment = self.volume_change * \
1491 self.get_moment_to_volume_change_ratio(store, target)
1493 return float(pmt.moment_to_magnitude(abs(moment)))
1494 else:
1495 return float(pmt.moment_to_magnitude(1.0))
1497 def get_volume_change(self, store=None, target=None):
1498 self.check_conflicts()
1500 if self.volume_change is not None:
1501 return self.volume_change
1503 elif self.magnitude is not None:
1504 moment = float(pmt.magnitude_to_moment(self.magnitude))
1505 return moment / self.get_moment_to_volume_change_ratio(
1506 store, target)
1508 else:
1509 return 1.0 / self.get_moment_to_volume_change_ratio(store)
1511 def get_moment_to_volume_change_ratio(self, store, target=None):
1512 if store is None:
1513 raise DerivedMagnitudeError(
1514 'Need earth model to convert between volume change and '
1515 'magnitude.')
1517 points = num.array(
1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1520 interpolation = target.interpolation if target else 'multilinear'
1521 try:
1522 shear_moduli = store.config.get_shear_moduli(
1523 self.lat, self.lon,
1524 points=points,
1525 interpolation=interpolation)[0]
1527 bulk_moduli = store.config.get_bulk_moduli(
1528 self.lat, self.lon,
1529 points=points,
1530 interpolation=interpolation)[0]
1531 except meta.OutOfBounds:
1532 raise DerivedMagnitudeError(
1533 'Could not get shear modulus at source position.')
1535 return float(2. * shear_moduli + bulk_moduli)
1537 def get_factor(self):
1538 return 1.0
1540 def discretize_basesource(self, store, target=None):
1541 times, amplitudes = self.effective_stf_pre().discretize_t(
1542 store.config.deltat, self.time)
1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
1546 if self.volume_change is not None:
1547 if self.volume_change < 0.:
1548 amplitudes *= -1
1550 return meta.DiscretizedExplosionSource(
1551 m0s=amplitudes,
1552 **self._dparams_base_repeated(times))
1554 def pyrocko_moment_tensor(self, store=None, target=None):
1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.)
1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
1559class RectangularExplosionSource(ExplosionSource):
1560 '''
1561 Rectangular or line explosion source.
1562 '''
1564 discretized_source_class = meta.DiscretizedExplosionSource
1566 strike = Float.T(
1567 default=0.0,
1568 help='strike direction in [deg], measured clockwise from north')
1570 dip = Float.T(
1571 default=90.0,
1572 help='dip angle in [deg], measured downward from horizontal')
1574 length = Float.T(
1575 default=0.,
1576 help='length of rectangular source area [m]')
1578 width = Float.T(
1579 default=0.,
1580 help='width of rectangular source area [m]')
1582 anchor = StringChoice.T(
1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1584 'bottom_left', 'bottom_right'],
1585 default='center',
1586 optional=True,
1587 help='Anchor point for positioning the plane, can be: top, center or'
1588 'bottom and also top_left, top_right,bottom_left,'
1589 'bottom_right, center_left and center right')
1591 nucleation_x = Float.T(
1592 optional=True,
1593 help='horizontal position of rupture nucleation in normalized fault '
1594 'plane coordinates (-1 = left edge, +1 = right edge)')
1596 nucleation_y = Float.T(
1597 optional=True,
1598 help='down-dip position of rupture nucleation in normalized fault '
1599 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1601 velocity = Float.T(
1602 default=3500.,
1603 help='speed of explosion front [m/s]')
1605 aggressive_oversampling = Bool.T(
1606 default=False,
1607 help='Aggressive oversampling for basesource discretization. '
1608 "When using 'multilinear' interpolation oversampling has"
1609 ' practically no effect.')
1611 def base_key(self):
1612 return Source.base_key(self) + (self.strike, self.dip, self.length,
1613 self.width, self.nucleation_x,
1614 self.nucleation_y, self.velocity,
1615 self.anchor)
1617 def discretize_basesource(self, store, target=None):
1619 if self.nucleation_x is not None:
1620 nucx = self.nucleation_x * 0.5 * self.length
1621 else:
1622 nucx = None
1624 if self.nucleation_y is not None:
1625 nucy = self.nucleation_y * 0.5 * self.width
1626 else:
1627 nucy = None
1629 stf = self.effective_stf_pre()
1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
1632 store.config.deltas, store.config.deltat,
1633 self.time, self.north_shift, self.east_shift, self.depth,
1634 self.strike, self.dip, self.length, self.width, self.anchor,
1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
1637 amplitudes /= num.sum(amplitudes)
1638 amplitudes *= self.get_moment(store, target)
1640 return meta.DiscretizedExplosionSource(
1641 lat=self.lat,
1642 lon=self.lon,
1643 times=times,
1644 north_shifts=points[:, 0],
1645 east_shifts=points[:, 1],
1646 depths=points[:, 2],
1647 m0s=amplitudes)
1649 def outline(self, cs='xyz'):
1650 points = outline_rect_source(self.strike, self.dip, self.length,
1651 self.width, self.anchor)
1653 points[:, 0] += self.north_shift
1654 points[:, 1] += self.east_shift
1655 points[:, 2] += self.depth
1656 if cs == 'xyz':
1657 return points
1658 elif cs == 'xy':
1659 return points[:, :2]
1660 elif cs in ('latlon', 'lonlat'):
1661 latlon = ne_to_latlon(
1662 self.lat, self.lon, points[:, 0], points[:, 1])
1664 latlon = num.array(latlon).T
1665 if cs == 'latlon':
1666 return latlon
1667 else:
1668 return latlon[:, ::-1]
1670 def get_nucleation_abs_coord(self, cs='xy'):
1672 if self.nucleation_x is None:
1673 return None, None
1675 coords = from_plane_coords(self.strike, self.dip, self.length,
1676 self.width, self.depth, self.nucleation_x,
1677 self.nucleation_y, lat=self.lat,
1678 lon=self.lon, north_shift=self.north_shift,
1679 east_shift=self.east_shift, cs=cs)
1680 return coords
1683class DCSource(SourceWithMagnitude):
1684 '''
1685 A double-couple point source.
1686 '''
1688 strike = Float.T(
1689 default=0.0,
1690 help='strike direction in [deg], measured clockwise from north')
1692 dip = Float.T(
1693 default=90.0,
1694 help='dip angle in [deg], measured downward from horizontal')
1696 rake = Float.T(
1697 default=0.0,
1698 help='rake angle in [deg], '
1699 'measured counter-clockwise from right-horizontal '
1700 'in on-plane view')
1702 discretized_source_class = meta.DiscretizedMTSource
1704 def base_key(self):
1705 return Source.base_key(self) + (self.strike, self.dip, self.rake)
1707 def get_factor(self):
1708 return float(pmt.magnitude_to_moment(self.magnitude))
1710 def discretize_basesource(self, store, target=None):
1711 mot = pmt.MomentTensor(
1712 strike=self.strike, dip=self.dip, rake=self.rake)
1714 times, amplitudes = self.effective_stf_pre().discretize_t(
1715 store.config.deltat, self.time)
1716 return meta.DiscretizedMTSource(
1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
1718 **self._dparams_base_repeated(times))
1720 def pyrocko_moment_tensor(self, store=None, target=None):
1721 return pmt.MomentTensor(
1722 strike=self.strike,
1723 dip=self.dip,
1724 rake=self.rake,
1725 scalar_moment=self.moment)
1727 def pyrocko_event(self, store=None, target=None, **kwargs):
1728 return SourceWithMagnitude.pyrocko_event(
1729 self, store, target,
1730 moment_tensor=self.pyrocko_moment_tensor(store, target),
1731 **kwargs)
1733 @classmethod
1734 def from_pyrocko_event(cls, ev, **kwargs):
1735 d = {}
1736 mt = ev.moment_tensor
1737 if mt:
1738 (strike, dip, rake), _ = mt.both_strike_dip_rake()
1739 d.update(
1740 strike=float(strike),
1741 dip=float(dip),
1742 rake=float(rake),
1743 magnitude=float(mt.moment_magnitude()))
1745 d.update(kwargs)
1746 return super(DCSource, cls).from_pyrocko_event(ev, **d)
1749class CLVDSource(SourceWithMagnitude):
1750 '''
1751 A pure CLVD point source.
1752 '''
1754 discretized_source_class = meta.DiscretizedMTSource
1756 azimuth = Float.T(
1757 default=0.0,
1758 help='azimuth direction of largest dipole, clockwise from north [deg]')
1760 dip = Float.T(
1761 default=90.,
1762 help='dip direction of largest dipole, downward from horizontal [deg]')
1764 def base_key(self):
1765 return Source.base_key(self) + (self.azimuth, self.dip)
1767 def get_factor(self):
1768 return float(pmt.magnitude_to_moment(self.magnitude))
1770 @property
1771 def m6(self):
1772 a = math.sqrt(4. / 3.) * self.get_factor()
1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1774 rotmat1 = pmt.euler_to_matrix(
1775 d2r * (self.dip - 90.),
1776 d2r * (self.azimuth - 90.),
1777 0.)
1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1))
1779 return pmt.to6(m)
1781 @property
1782 def m6_astuple(self):
1783 return tuple(self.m6.tolist())
1785 def discretize_basesource(self, store, target=None):
1786 factor = self.get_factor()
1787 times, amplitudes = self.effective_stf_pre().discretize_t(
1788 store.config.deltat, self.time)
1789 return meta.DiscretizedMTSource(
1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
1791 **self._dparams_base_repeated(times))
1793 def pyrocko_moment_tensor(self, store=None, target=None):
1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1796 def pyrocko_event(self, store=None, target=None, **kwargs):
1797 mt = self.pyrocko_moment_tensor(store, target)
1798 return Source.pyrocko_event(
1799 self, store, target,
1800 moment_tensor=self.pyrocko_moment_tensor(store, target),
1801 magnitude=float(mt.moment_magnitude()),
1802 **kwargs)
1805class VLVDSource(SourceWithDerivedMagnitude):
1806 '''
1807 Volumetric linear vector dipole source.
1809 This source is a parameterization for a restricted moment tensor point
1810 source, useful to represent dyke or sill like inflation or deflation
1811 sources. The restriction is such that the moment tensor is rotational
1812 symmetric. It can be represented by a superposition of a linear vector
1813 dipole (here we use a CLVD for convenience) and an isotropic component. The
1814 restricted moment tensor has 4 degrees of freedom: 2 independent
1815 eigenvalues and 2 rotation angles orienting the the symmetry axis.
1817 In this parameterization, the isotropic component is controlled by
1818 ``volume_change``. To define the moment tensor, it must be converted to the
1819 scalar moment of the the MT's isotropic component. For the conversion, the
1820 shear modulus at the source's position must be known. This value is
1821 extracted from the earth model defined in the GF store in use.
1823 The CLVD part by controlled by its scalar moment :math:`M_0`:
1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
1825 positiv or negativ CLVD (the sign of the largest eigenvalue).
1826 '''
1828 discretized_source_class = meta.DiscretizedMTSource
1830 azimuth = Float.T(
1831 default=0.0,
1832 help='azimuth direction of symmetry axis, clockwise from north [deg].')
1834 dip = Float.T(
1835 default=90.,
1836 help='dip direction of symmetry axis, downward from horizontal [deg].')
1838 volume_change = Float.T(
1839 default=0.,
1840 help='volume change of the inflation/deflation [m^3].')
1842 clvd_moment = Float.T(
1843 default=0.,
1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
1845 'controls the sign of the CLVD (the sign of its largest '
1846 'eigenvalue).')
1848 def get_moment_to_volume_change_ratio(self, store, target):
1849 if store is None or target is None:
1850 raise DerivedMagnitudeError(
1851 'Need earth model to convert between volume change and '
1852 'magnitude.')
1854 points = num.array(
1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1857 try:
1858 shear_moduli = store.config.get_shear_moduli(
1859 self.lat, self.lon,
1860 points=points,
1861 interpolation=target.interpolation)[0]
1863 bulk_moduli = store.config.get_bulk_moduli(
1864 self.lat, self.lon,
1865 points=points,
1866 interpolation=target.interpolation)[0]
1867 except meta.OutOfBounds:
1868 raise DerivedMagnitudeError(
1869 'Could not get shear modulus at source position.')
1871 return float(2. * shear_moduli + bulk_moduli)
1873 def base_key(self):
1874 return Source.base_key(self) + \
1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
1877 def get_magnitude(self, store=None, target=None):
1878 mt = self.pyrocko_moment_tensor(store, target)
1879 return float(pmt.moment_to_magnitude(mt.moment))
1881 def get_m6(self, store, target):
1882 a = math.sqrt(4. / 3.) * self.clvd_moment
1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1885 rotmat1 = pmt.euler_to_matrix(
1886 d2r * (self.dip - 90.),
1887 d2r * (self.azimuth - 90.),
1888 0.)
1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1))
1891 m_iso = self.volume_change * \
1892 self.get_moment_to_volume_change_ratio(store, target)
1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0.,
1895 0., 0.,) * math.sqrt(2. / 3)
1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso)
1898 return m
1900 def get_moment(self, store=None, target=None):
1901 return float(pmt.magnitude_to_moment(
1902 self.get_magnitude(store, target)))
1904 def get_m6_astuple(self, store, target):
1905 m6 = self.get_m6(store, target)
1906 return tuple(m6.tolist())
1908 def discretize_basesource(self, store, target=None):
1909 times, amplitudes = self.effective_stf_pre().discretize_t(
1910 store.config.deltat, self.time)
1912 m6 = self.get_m6(store, target)
1913 m6 *= amplitudes / self.get_factor()
1915 return meta.DiscretizedMTSource(
1916 m6s=m6[num.newaxis, :],
1917 **self._dparams_base_repeated(times))
1919 def pyrocko_moment_tensor(self, store=None, target=None):
1920 m6_astuple = self.get_m6_astuple(store, target)
1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
1924class MTSource(Source):
1925 '''
1926 A moment tensor point source.
1927 '''
1929 discretized_source_class = meta.DiscretizedMTSource
1931 mnn = Float.T(
1932 default=1.,
1933 help='north-north component of moment tensor in [Nm]')
1935 mee = Float.T(
1936 default=1.,
1937 help='east-east component of moment tensor in [Nm]')
1939 mdd = Float.T(
1940 default=1.,
1941 help='down-down component of moment tensor in [Nm]')
1943 mne = Float.T(
1944 default=0.,
1945 help='north-east component of moment tensor in [Nm]')
1947 mnd = Float.T(
1948 default=0.,
1949 help='north-down component of moment tensor in [Nm]')
1951 med = Float.T(
1952 default=0.,
1953 help='east-down component of moment tensor in [Nm]')
1955 def __init__(self, **kwargs):
1956 if 'm6' in kwargs:
1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
1958 kwargs.pop('m6')):
1959 kwargs[k] = float(v)
1961 Source.__init__(self, **kwargs)
1963 @property
1964 def m6(self):
1965 return num.array(self.m6_astuple)
1967 @property
1968 def m6_astuple(self):
1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
1971 @m6.setter
1972 def m6(self, value):
1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
1975 def base_key(self):
1976 return Source.base_key(self) + self.m6_astuple
1978 def discretize_basesource(self, store, target=None):
1979 times, amplitudes = self.effective_stf_pre().discretize_t(
1980 store.config.deltat, self.time)
1981 return meta.DiscretizedMTSource(
1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
1983 **self._dparams_base_repeated(times))
1985 def get_magnitude(self, store=None, target=None):
1986 m6 = self.m6
1987 return pmt.moment_to_magnitude(
1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
1989 math.sqrt(2.))
1991 def pyrocko_moment_tensor(self, store=None, target=None):
1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1994 def pyrocko_event(self, store=None, target=None, **kwargs):
1995 mt = self.pyrocko_moment_tensor(store, target)
1996 return Source.pyrocko_event(
1997 self, store, target,
1998 moment_tensor=self.pyrocko_moment_tensor(store, target),
1999 magnitude=float(mt.moment_magnitude()),
2000 **kwargs)
2002 @classmethod
2003 def from_pyrocko_event(cls, ev, **kwargs):
2004 d = {}
2005 mt = ev.moment_tensor
2006 if mt:
2007 d.update(m6=tuple(map(float, mt.m6())))
2008 else:
2009 if ev.magnitude is not None:
2010 mom = pmt.magnitude_to_moment(ev.magnitude)
2011 v = math.sqrt(2. / 3.) * mom
2012 d.update(m6=(v, v, v, 0., 0., 0.))
2014 d.update(kwargs)
2015 return super(MTSource, cls).from_pyrocko_event(ev, **d)
2018map_anchor = {
2019 'center': (0.0, 0.0),
2020 'center_left': (-1.0, 0.0),
2021 'center_right': (1.0, 0.0),
2022 'top': (0.0, -1.0),
2023 'top_left': (-1.0, -1.0),
2024 'top_right': (1.0, -1.0),
2025 'bottom': (0.0, 1.0),
2026 'bottom_left': (-1.0, 1.0),
2027 'bottom_right': (1.0, 1.0)}
2030class RectangularSource(SourceWithDerivedMagnitude):
2031 '''
2032 Classical Haskell source model modified for bilateral rupture.
2033 '''
2035 discretized_source_class = meta.DiscretizedMTSource
2037 magnitude = Float.T(
2038 optional=True,
2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
2041 strike = Float.T(
2042 default=0.0,
2043 help='strike direction in [deg], measured clockwise from north')
2045 dip = Float.T(
2046 default=90.0,
2047 help='dip angle in [deg], measured downward from horizontal')
2049 rake = Float.T(
2050 default=0.0,
2051 help='rake angle in [deg], '
2052 'measured counter-clockwise from right-horizontal '
2053 'in on-plane view')
2055 length = Float.T(
2056 default=0.,
2057 help='length of rectangular source area [m]')
2059 width = Float.T(
2060 default=0.,
2061 help='width of rectangular source area [m]')
2063 anchor = StringChoice.T(
2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2065 'bottom_left', 'bottom_right'],
2066 default='center',
2067 optional=True,
2068 help='Anchor point for positioning the plane, can be: ``top, center '
2069 'bottom, top_left, top_right,bottom_left,'
2070 'bottom_right, center_left, center right``.')
2072 nucleation_x = Float.T(
2073 optional=True,
2074 help='horizontal position of rupture nucleation in normalized fault '
2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)')
2077 nucleation_y = Float.T(
2078 optional=True,
2079 help='down-dip position of rupture nucleation in normalized fault '
2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)')
2082 velocity = Float.T(
2083 default=3500.,
2084 help='speed of rupture front [m/s]')
2086 slip = Float.T(
2087 optional=True,
2088 help='Slip on the rectangular source area [m]')
2090 opening_fraction = Float.T(
2091 default=0.,
2092 help='Determines fraction of slip related to opening. '
2093 '(``-1``: pure tensile closing, '
2094 '``0``: pure shear, '
2095 '``1``: pure tensile opening)')
2097 decimation_factor = Int.T(
2098 optional=True,
2099 default=1,
2100 help='Sub-source decimation factor, a larger decimation will'
2101 ' make the result inaccurate but shorten the necessary'
2102 ' computation time (use for testing puposes only).')
2104 aggressive_oversampling = Bool.T(
2105 default=False,
2106 help='Aggressive oversampling for basesource discretization. '
2107 "When using 'multilinear' interpolation oversampling has"
2108 ' practically no effect.')
2110 def __init__(self, **kwargs):
2111 if 'moment' in kwargs:
2112 mom = kwargs.pop('moment')
2113 if 'magnitude' not in kwargs:
2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
2116 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2118 def base_key(self):
2119 return SourceWithDerivedMagnitude.base_key(self) + (
2120 self.magnitude,
2121 self.slip,
2122 self.strike,
2123 self.dip,
2124 self.rake,
2125 self.length,
2126 self.width,
2127 self.nucleation_x,
2128 self.nucleation_y,
2129 self.velocity,
2130 self.decimation_factor,
2131 self.anchor)
2133 def check_conflicts(self):
2134 if self.magnitude is not None and self.slip is not None:
2135 raise DerivedMagnitudeError(
2136 'Magnitude and slip are both defined.')
2138 def get_magnitude(self, store=None, target=None):
2139 self.check_conflicts()
2140 if self.magnitude is not None:
2141 return self.magnitude
2143 elif self.slip is not None:
2144 if None in (store, target):
2145 raise DerivedMagnitudeError(
2146 'Magnitude for a rectangular source with slip defined '
2147 'can only be derived when earth model and target '
2148 'interpolation method are available.')
2150 amplitudes = self._discretize(store, target)[2]
2151 if amplitudes.ndim == 2:
2152 # CLVD component has no net moment, leave out
2153 return float(pmt.moment_to_magnitude(
2154 num.sum(num.abs(amplitudes[0:2, :]).sum())))
2155 else:
2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
2158 else:
2159 return float(pmt.moment_to_magnitude(1.0))
2161 def get_factor(self):
2162 return 1.0
2164 def get_slip_tensile(self):
2165 return self.slip * self.opening_fraction
2167 def get_slip_shear(self):
2168 return self.slip - abs(self.get_slip_tensile)
2170 def _discretize(self, store, target):
2171 if self.nucleation_x is not None:
2172 nucx = self.nucleation_x * 0.5 * self.length
2173 else:
2174 nucx = None
2176 if self.nucleation_y is not None:
2177 nucy = self.nucleation_y * 0.5 * self.width
2178 else:
2179 nucy = None
2181 stf = self.effective_stf_pre()
2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
2184 store.config.deltas, store.config.deltat,
2185 self.time, self.north_shift, self.east_shift, self.depth,
2186 self.strike, self.dip, self.length, self.width, self.anchor,
2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
2188 decimation_factor=self.decimation_factor,
2189 aggressive_oversampling=self.aggressive_oversampling)
2191 if self.slip is not None:
2192 if target is not None:
2193 interpolation = target.interpolation
2194 else:
2195 interpolation = 'nearest_neighbor'
2196 logger.warning(
2197 'no target information available, will use '
2198 '"nearest_neighbor" interpolation when extracting shear '
2199 'modulus from earth model')
2201 shear_moduli = store.config.get_shear_moduli(
2202 self.lat, self.lon,
2203 points=points,
2204 interpolation=interpolation)
2206 tensile_slip = self.get_slip_tensile()
2207 shear_slip = self.slip - abs(tensile_slip)
2209 amplitudes_total = [shear_moduli * shear_slip]
2210 if tensile_slip != 0:
2211 bulk_moduli = store.config.get_bulk_moduli(
2212 self.lat, self.lon,
2213 points=points,
2214 interpolation=interpolation)
2216 tensile_iso = bulk_moduli * tensile_slip
2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
2219 amplitudes_total.extend([tensile_iso, tensile_clvd])
2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
2222 amplitudes * dl * dw
2224 else:
2225 # normalization to retain total moment
2226 amplitudes_norm = amplitudes / num.sum(amplitudes)
2227 moment = self.get_moment(store, target)
2229 amplitudes_total = [
2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
2231 if self.opening_fraction != 0.:
2232 amplitudes_total.append(
2233 amplitudes_norm * self.opening_fraction * moment)
2235 amplitudes_total = num.vstack(amplitudes_total).squeeze()
2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw
2239 def discretize_basesource(self, store, target=None):
2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize(
2242 store, target)
2244 mot = pmt.MomentTensor(
2245 strike=self.strike, dip=self.dip, rake=self.rake)
2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
2248 if amplitudes.ndim == 1:
2249 m6s[:, :] *= amplitudes[:, num.newaxis]
2250 elif amplitudes.ndim == 2:
2251 # shear MT components
2252 rotmat1 = pmt.euler_to_matrix(
2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake)
2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
2256 if amplitudes.shape[0] == 2:
2257 # tensile MT components - moment/magnitude input
2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
2259 rot_tensile = pmt.to6(
2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1)))
2262 m6s_tensile = rot_tensile[
2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2264 m6s += m6s_tensile
2266 elif amplitudes.shape[0] == 3:
2267 # tensile MT components - slip input
2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
2271 rot_iso = pmt.to6(
2272 num.dot(rotmat1.T, num.dot(iso, rotmat1)))
2273 rot_clvd = pmt.to6(
2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1)))
2276 m6s_iso = rot_iso[
2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2278 m6s_clvd = rot_clvd[
2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
2280 m6s += m6s_iso + m6s_clvd
2281 else:
2282 raise ValueError('Unknwown amplitudes shape!')
2283 else:
2284 raise ValueError(
2285 'Unexpected dimension of {}'.format(amplitudes.ndim))
2287 ds = meta.DiscretizedMTSource(
2288 lat=self.lat,
2289 lon=self.lon,
2290 times=times,
2291 north_shifts=points[:, 0],
2292 east_shifts=points[:, 1],
2293 depths=points[:, 2],
2294 m6s=m6s,
2295 dl=dl,
2296 dw=dw,
2297 nl=nl,
2298 nw=nw)
2300 return ds
2302 def xy_to_coord(self, x, y, cs='xyz'):
2303 ln, wd = self.length, self.width
2304 strike, dip = self.strike, self.dip
2306 def array_check(variable):
2307 if not isinstance(variable, num.ndarray):
2308 return num.array(variable)
2309 else:
2310 return variable
2312 x, y = array_check(x), array_check(y)
2314 if x.shape[0] != y.shape[0]:
2315 raise ValueError('Shapes of x and y mismatch')
2317 x, y = x * 0.5 * ln, y * 0.5 * wd
2319 points = num.hstack((
2320 x.reshape(-1, 1), y.reshape(-1, 1), num.zeros((x.shape[0], 1))))
2322 anch_x, anch_y = map_anchor[self.anchor]
2323 points[:, 0] -= anch_x * 0.5 * ln
2324 points[:, 1] -= anch_y * 0.5 * wd
2326 rotmat = num.asarray(
2327 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
2329 points_rot = num.dot(rotmat.T, points.T).T
2331 points_rot[:, 0] += self.north_shift
2332 points_rot[:, 1] += self.east_shift
2333 points_rot[:, 2] += self.depth
2335 if cs == 'xyz':
2336 return points_rot
2337 elif cs == 'xy':
2338 return points_rot[:, :2]
2339 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2340 latlon = ne_to_latlon(
2341 self.lat, self.lon, points_rot[:, 0], points_rot[:, 1])
2342 latlon = num.array(latlon).T
2343 if cs == 'latlon':
2344 return latlon
2345 elif cs == 'lonlat':
2346 return latlon[:, ::-1]
2347 else:
2348 return num.concatenate(
2349 (latlon, points_rot[:, 2].reshape((len(points_rot), 1))),
2350 axis=1)
2352 def outline(self, cs='xyz'):
2353 x = num.array([-1., 1., 1., -1., -1.])
2354 y = num.array([-1., -1., 1., 1., -1.])
2356 return self.xy_to_coord(x, y, cs=cs)
2358 def points_on_source(self, cs='xyz', **kwargs):
2360 points = points_on_rect_source(
2361 self.strike, self.dip, self.length, self.width,
2362 self.anchor, **kwargs)
2364 points[:, 0] += self.north_shift
2365 points[:, 1] += self.east_shift
2366 points[:, 2] += self.depth
2367 if cs == 'xyz':
2368 return points
2369 elif cs == 'xy':
2370 return points[:, :2]
2371 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2372 latlon = ne_to_latlon(
2373 self.lat, self.lon, points[:, 0], points[:, 1])
2375 latlon = num.array(latlon).T
2376 if cs == 'latlon':
2377 return latlon
2378 elif cs == 'lonlat':
2379 return latlon[:, ::-1]
2380 else:
2381 return num.concatenate(
2382 (latlon, points[:, 2].reshape((len(points), 1))),
2383 axis=1)
2385 def geometry(self, *args, **kwargs):
2386 from pyrocko.model import Geometry
2388 ds = self.discretize_basesource(*args, **kwargs)
2389 nx, ny = ds.nl, ds.nw
2391 def patch_outlines_xy(nx, ny):
2392 points = num.zeros((nx * ny, 2))
2393 points[:, 0] = num.tile(num.linspace(-1., 1., nx), ny)
2394 points[:, 1] = num.repeat(num.linspace(-1., 1., ny), nx)
2396 return points
2398 points_ds = patch_outlines_xy(nx + 1, ny + 1)
2399 npoints = (nx + 1) * (ny + 1)
2401 vertices = num.hstack((
2402 num.ones((npoints, 1)) * self.lat,
2403 num.ones((npoints, 1)) * self.lon,
2404 self.xy_to_coord(points_ds[:, 0], points_ds[:, 1], cs='xyz')))
2406 faces = num.array([[
2407 iy * (nx + 1) + ix,
2408 iy * (nx + 1) + ix + 1,
2409 (iy + 1) * (nx + 1) + ix + 1,
2410 (iy + 1) * (nx + 1) + ix,
2411 iy * (nx + 1) + ix]
2412 for iy in range(ny) for ix in range(nx)])
2414 xyz = self.outline('xyz')
2415 latlon = num.ones((5, 2)) * num.array([self.lat, self.lon])
2416 patchverts = num.hstack((latlon, xyz))
2417 face_outlines = patchverts[:-1, :] # last vertex double
2419 geom = Geometry()
2420 geom.setup(vertices, faces)
2421 geom.set_outlines([face_outlines])
2423 if self.stf:
2424 geom.times = num.unique(ds.times)
2426 if self.nucleation_x is not None and self.nucleation_y is not None:
2427 geom.add_property('t_arrival', ds.times)
2429 geom.add_property(
2430 'moment', ds.moments().reshape(ds.nl*ds.nw, -1))
2432 geom.add_property(
2433 'slip', num.ones_like(ds.times) * self.slip)
2435 return geom
2437 def get_nucleation_abs_coord(self, cs='xy'):
2439 if self.nucleation_x is None:
2440 return None, None
2442 coords = from_plane_coords(self.strike, self.dip, self.length,
2443 self.width, self.depth, self.nucleation_x,
2444 self.nucleation_y, lat=self.lat,
2445 lon=self.lon, north_shift=self.north_shift,
2446 east_shift=self.east_shift, cs=cs)
2447 return coords
2449 def pyrocko_moment_tensor(self, store=None, target=None):
2450 return pmt.MomentTensor(
2451 strike=self.strike,
2452 dip=self.dip,
2453 rake=self.rake,
2454 scalar_moment=self.get_moment(store, target))
2456 def pyrocko_event(self, store=None, target=None, **kwargs):
2457 return SourceWithDerivedMagnitude.pyrocko_event(
2458 self, store, target,
2459 **kwargs)
2461 @classmethod
2462 def from_pyrocko_event(cls, ev, **kwargs):
2463 d = {}
2464 mt = ev.moment_tensor
2465 if mt:
2466 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2467 d.update(
2468 strike=float(strike),
2469 dip=float(dip),
2470 rake=float(rake),
2471 magnitude=float(mt.moment_magnitude()))
2473 d.update(kwargs)
2474 return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
2477class PseudoDynamicRupture(SourceWithDerivedMagnitude):
2478 '''
2479 Combined Eikonal and Okada quasi-dynamic rupture model.
2481 Details are described in :doc:`/topics/pseudo-dynamic-rupture`.
2482 Note: attribute `stf` is not used so far, but kept for future applications.
2483 '''
2485 discretized_source_class = meta.DiscretizedMTSource
2487 strike = Float.T(
2488 default=0.0,
2489 help='Strike direction in [deg], measured clockwise from north.')
2491 dip = Float.T(
2492 default=0.0,
2493 help='Dip angle in [deg], measured downward from horizontal.')
2495 length = Float.T(
2496 default=10. * km,
2497 help='Length of rectangular source area in [m].')
2499 width = Float.T(
2500 default=5. * km,
2501 help='Width of rectangular source area in [m].')
2503 anchor = StringChoice.T(
2504 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2505 'bottom_left', 'bottom_right'],
2506 default='center',
2507 optional=True,
2508 help='Anchor point for positioning the plane, can be: ``top, center, '
2509 'bottom, top_left, top_right, bottom_left, '
2510 'bottom_right, center_left, center_right``.')
2512 nucleation_x__ = Array.T(
2513 default=num.array([0.]),
2514 dtype=num.float64,
2515 serialize_as='list',
2516 help='Horizontal position of rupture nucleation in normalized fault '
2517 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).')
2519 nucleation_y__ = Array.T(
2520 default=num.array([0.]),
2521 dtype=num.float64,
2522 serialize_as='list',
2523 help='Down-dip position of rupture nucleation in normalized fault '
2524 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).')
2526 nucleation_time__ = Array.T(
2527 optional=True,
2528 help='Time in [s] after origin, when nucleation points defined by '
2529 '``nucleation_x`` and ``nucleation_y`` rupture.',
2530 dtype=num.float64,
2531 serialize_as='list')
2533 gamma = Float.T(
2534 default=0.8,
2535 help='Scaling factor between rupture velocity and S-wave velocity: '
2536 r':math:`v_r = \gamma * v_s`.')
2538 nx = Int.T(
2539 default=2,
2540 help='Number of discrete source patches in x direction (along '
2541 'strike).')
2543 ny = Int.T(
2544 default=2,
2545 help='Number of discrete source patches in y direction (down dip).')
2547 slip = Float.T(
2548 optional=True,
2549 help='Maximum slip of the rectangular source [m]. '
2550 'Setting the slip the tractions/stress field '
2551 'will be normalized to accomodate the desired maximum slip.')
2553 rake = Float.T(
2554 optional=True,
2555 help='Rake angle in [deg], '
2556 'measured counter-clockwise from right-horizontal '
2557 'in on-plane view. Rake is translated into homogenous tractions '
2558 'in strike and up-dip direction. ``rake`` is mutually exclusive '
2559 'with tractions parameter.')
2561 patches = List.T(
2562 OkadaSource.T(),
2563 optional=True,
2564 help='List of all boundary elements/sub faults/fault patches.')
2566 patch_mask__ = Array.T(
2567 dtype=bool,
2568 serialize_as='list',
2569 shape=(None,),
2570 optional=True,
2571 help='Mask for all boundary elements/sub faults/fault patches. True '
2572 'leaves the patch in the calculation, False excludes the patch.')
2574 tractions = TractionField.T(
2575 optional=True,
2576 help='Traction field the rupture plane is exposed to. See the'
2577 ':py:mod:`pyrocko.gf.tractions` module for more details. '
2578 'If ``tractions=None`` and ``rake`` is given'
2579 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will'
2580 ' be used.')
2582 coef_mat = Array.T(
2583 optional=True,
2584 help='Coefficient matrix linking traction and dislocation field.',
2585 dtype=num.float64,
2586 shape=(None, None))
2588 eikonal_decimation = Int.T(
2589 optional=True,
2590 default=1,
2591 help='Sub-source eikonal factor, a smaller eikonal factor will'
2592 ' increase the accuracy of rupture front calculation but'
2593 ' increases also the computation time.')
2595 decimation_factor = Int.T(
2596 optional=True,
2597 default=1,
2598 help='Sub-source decimation factor, a larger decimation will'
2599 ' make the result inaccurate but shorten the necessary'
2600 ' computation time (use for testing puposes only).')
2602 nthreads = Int.T(
2603 optional=True,
2604 default=1,
2605 help='Number of threads for Okada forward modelling, '
2606 'matrix inversion and calculation of point subsources. '
2607 'Note: for small/medium matrices 1 thread is most efficient.')
2609 pure_shear = Bool.T(
2610 optional=True,
2611 default=False,
2612 help='Calculate only shear tractions and omit tensile tractions.')
2614 smooth_rupture = Bool.T(
2615 default=True,
2616 help='Smooth the tractions by weighting partially ruptured'
2617 ' fault patches.')
2619 aggressive_oversampling = Bool.T(
2620 default=False,
2621 help='Aggressive oversampling for basesource discretization. '
2622 "When using 'multilinear' interpolation oversampling has"
2623 ' practically no effect.')
2625 def __init__(self, **kwargs):
2626 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2627 self._interpolators = {}
2628 self.check_conflicts()
2630 @property
2631 def nucleation_x(self):
2632 return self.nucleation_x__
2634 @nucleation_x.setter
2635 def nucleation_x(self, nucleation_x):
2636 if isinstance(nucleation_x, list):
2637 nucleation_x = num.array(nucleation_x)
2639 elif not isinstance(
2640 nucleation_x, num.ndarray) and nucleation_x is not None:
2642 nucleation_x = num.array([nucleation_x])
2643 self.nucleation_x__ = nucleation_x
2645 @property
2646 def nucleation_y(self):
2647 return self.nucleation_y__
2649 @nucleation_y.setter
2650 def nucleation_y(self, nucleation_y):
2651 if isinstance(nucleation_y, list):
2652 nucleation_y = num.array(nucleation_y)
2654 elif not isinstance(nucleation_y, num.ndarray) \
2655 and nucleation_y is not None:
2656 nucleation_y = num.array([nucleation_y])
2658 self.nucleation_y__ = nucleation_y
2660 @property
2661 def nucleation(self):
2662 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2664 if (nucl_x is None) or (nucl_y is None):
2665 return None
2667 assert nucl_x.shape[0] == nucl_y.shape[0]
2669 return num.concatenate(
2670 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1)
2672 @nucleation.setter
2673 def nucleation(self, nucleation):
2674 if isinstance(nucleation, list):
2675 nucleation = num.array(nucleation)
2677 assert nucleation.shape[1] == 2
2679 self.nucleation_x = nucleation[:, 0]
2680 self.nucleation_y = nucleation[:, 1]
2682 @property
2683 def nucleation_time(self):
2684 return self.nucleation_time__
2686 @nucleation_time.setter
2687 def nucleation_time(self, nucleation_time):
2688 if not isinstance(nucleation_time, num.ndarray) \
2689 and nucleation_time is not None:
2690 nucleation_time = num.array([nucleation_time])
2692 self.nucleation_time__ = nucleation_time
2694 @property
2695 def patch_mask(self):
2696 if (self.patch_mask__ is not None and
2697 self.patch_mask__.shape == (self.nx * self.ny,)):
2699 return self.patch_mask__
2700 else:
2701 return num.ones(self.nx * self.ny, dtype=bool)
2703 @patch_mask.setter
2704 def patch_mask(self, patch_mask):
2705 if isinstance(patch_mask, list):
2706 patch_mask = num.array(patch_mask)
2708 self.patch_mask__ = patch_mask
2710 def get_tractions(self):
2711 '''
2712 Get source traction vectors.
2714 If :py:attr:`rake` is given, unit length directed traction vectors
2715 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned,
2716 else the given :py:attr:`tractions` are used.
2718 :returns:
2719 Traction vectors per patch.
2720 :rtype:
2721 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``.
2722 '''
2724 if self.rake is not None:
2725 if num.isnan(self.rake):
2726 raise ValueError('Rake must be a real number, not NaN.')
2728 logger.warning(
2729 'Tractions are derived based on the given source rake.')
2730 tractions = DirectedTractions(rake=self.rake)
2731 else:
2732 tractions = self.tractions
2733 return tractions.get_tractions(self.nx, self.ny, self.patches)
2735 def base_key(self):
2736 return SourceWithDerivedMagnitude.base_key(self) + (
2737 self.slip,
2738 self.strike,
2739 self.dip,
2740 self.rake,
2741 self.length,
2742 self.width,
2743 float(self.nucleation_x.mean()),
2744 float(self.nucleation_y.mean()),
2745 self.decimation_factor,
2746 self.anchor,
2747 self.pure_shear,
2748 self.gamma,
2749 tuple(self.patch_mask))
2751 def check_conflicts(self):
2752 if self.tractions and self.rake:
2753 raise AttributeError(
2754 'Tractions and rake are mutually exclusive.')
2755 if self.tractions is None and self.rake is None:
2756 self.rake = 0.
2758 def get_magnitude(self, store=None, target=None):
2759 self.check_conflicts()
2760 if self.slip is not None or self.tractions is not None:
2761 if store is None:
2762 raise DerivedMagnitudeError(
2763 'Magnitude for a rectangular source with slip or '
2764 'tractions defined can only be derived when earth model '
2765 'is set.')
2767 moment_rate, calc_times = self.discretize_basesource(
2768 store, target=target).get_moment_rate(store.config.deltat)
2770 deltat = num.concatenate((
2771 (num.diff(calc_times)[0],),
2772 num.diff(calc_times)))
2774 return float(pmt.moment_to_magnitude(
2775 num.sum(moment_rate * deltat)))
2777 else:
2778 return float(pmt.moment_to_magnitude(1.0))
2780 def get_factor(self):
2781 return 1.0
2783 def outline(self, cs='xyz'):
2784 '''
2785 Get source outline corner coordinates.
2787 :param cs:
2788 :ref:`Output coordinate system <coordinate-system-names>`.
2789 :type cs:
2790 optional, str
2792 :returns:
2793 Corner points in desired coordinate system.
2794 :rtype:
2795 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``.
2796 '''
2797 points = outline_rect_source(self.strike, self.dip, self.length,
2798 self.width, self.anchor)
2800 points[:, 0] += self.north_shift
2801 points[:, 1] += self.east_shift
2802 points[:, 2] += self.depth
2803 if cs == 'xyz':
2804 return points
2805 elif cs == 'xy':
2806 return points[:, :2]
2807 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2808 latlon = ne_to_latlon(
2809 self.lat, self.lon, points[:, 0], points[:, 1])
2811 latlon = num.array(latlon).T
2812 if cs == 'latlon':
2813 return latlon
2814 elif cs == 'lonlat':
2815 return latlon[:, ::-1]
2816 else:
2817 return num.concatenate(
2818 (latlon, points[:, 2].reshape((len(points), 1))),
2819 axis=1)
2821 def points_on_source(self, cs='xyz', **kwargs):
2822 '''
2823 Convert relative plane coordinates to geographical coordinates.
2825 Given x and y coordinates (relative source coordinates between -1.
2826 and 1.) are converted to desired geographical coordinates. Coordinates
2827 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x``
2828 and ``points_y``.
2830 :param cs:
2831 :ref:`Output coordinate system <coordinate-system-names>`.
2832 :type cs:
2833 optional, str
2835 :returns:
2836 Point coordinates in desired coordinate system.
2837 :rtype:
2838 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``.
2839 '''
2840 points = points_on_rect_source(
2841 self.strike, self.dip, self.length, self.width,
2842 self.anchor, **kwargs)
2844 points[:, 0] += self.north_shift
2845 points[:, 1] += self.east_shift
2846 points[:, 2] += self.depth
2847 if cs == 'xyz':
2848 return points
2849 elif cs == 'xy':
2850 return points[:, :2]
2851 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2852 latlon = ne_to_latlon(
2853 self.lat, self.lon, points[:, 0], points[:, 1])
2855 latlon = num.array(latlon).T
2856 if cs == 'latlon':
2857 return latlon
2858 elif cs == 'lonlat':
2859 return latlon[:, ::-1]
2860 else:
2861 return num.concatenate(
2862 (latlon, points[:, 2].reshape((len(points), 1))),
2863 axis=1)
2865 def pyrocko_moment_tensor(self, store=None, target=None):
2866 if store is not None:
2867 if not self.patches:
2868 self.discretize_patches(store)
2870 data = self.get_slip()
2871 else:
2872 data = self.get_tractions()
2874 weights = num.linalg.norm(data, axis=1)
2875 weights /= weights.sum()
2877 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d
2878 rake = num.average(rakes, weights=weights)
2880 return pmt.MomentTensor(
2881 strike=self.strike,
2882 dip=self.dip,
2883 rake=rake,
2884 scalar_moment=self.get_moment(store, target))
2886 def pyrocko_event(self, store=None, target=None, **kwargs):
2887 return SourceWithDerivedMagnitude.pyrocko_event(
2888 self, store, target,
2889 **kwargs)
2891 @classmethod
2892 def from_pyrocko_event(cls, ev, **kwargs):
2893 d = {}
2894 mt = ev.moment_tensor
2895 if mt:
2896 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2897 d.update(
2898 strike=float(strike),
2899 dip=float(dip),
2900 rake=float(rake))
2902 d.update(kwargs)
2903 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d)
2905 def _discretize_points(self, store, *args, **kwargs):
2906 '''
2907 Discretize source plane with equal vertical and horizontal spacing.
2909 Additional ``*args`` and ``**kwargs`` are passed to
2910 :py:meth:`points_on_source`.
2912 :param store:
2913 Green's function database (needs to cover whole region of the
2914 source).
2915 :type store:
2916 :py:class:`~pyrocko.gf.store.Store`
2918 :returns:
2919 Number of points in strike and dip direction, distance
2920 between adjacent points, coordinates (latlondepth) and coordinates
2921 (xy on fault) for discrete points.
2922 :rtype:
2923 (int, int, float, :py:class:`~numpy.ndarray`,
2924 :py:class:`~numpy.ndarray`).
2925 '''
2926 anch_x, anch_y = map_anchor[self.anchor]
2928 npoints = int(self.width // km) + 1
2929 points = num.zeros((npoints, 3))
2930 points[:, 1] = num.linspace(-1., 1., npoints)
2931 points[:, 1] = (points[:, 1] - anch_y) * self.width/2
2933 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)
2934 points = num.dot(rotmat.T, points.T).T
2935 points[:, 2] += self.depth
2937 vs_min = store.config.get_vs(
2938 self.lat, self.lon, points,
2939 interpolation='nearest_neighbor')
2940 vr_min = max(vs_min.min(), .5*km) * self.gamma
2942 oversampling = 10.
2943 delta_l = self.length / (self.nx * oversampling)
2944 delta_w = self.width / (self.ny * oversampling)
2946 delta = self.eikonal_decimation * num.min([
2947 store.config.deltat * vr_min / oversampling,
2948 delta_l, delta_w] + [
2949 deltas for deltas in store.config.deltas])
2951 delta = delta_w / num.ceil(delta_w / delta)
2953 nx = int(num.ceil(self.length / delta)) + 1
2954 ny = int(num.ceil(self.width / delta)) + 1
2956 rem_l = (nx-1)*delta - self.length
2957 lim_x = rem_l / self.length
2959 points_xy = num.zeros((nx * ny, 2))
2960 points_xy[:, 0] = num.repeat(
2961 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny)
2962 points_xy[:, 1] = num.tile(
2963 num.linspace(-1., 1., ny), nx)
2965 points = self.points_on_source(
2966 points_x=points_xy[:, 0],
2967 points_y=points_xy[:, 1],
2968 **kwargs)
2970 return nx, ny, delta, points, points_xy
2972 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor',
2973 points=None):
2974 '''
2975 Get rupture velocity for discrete points on source plane.
2977 :param store:
2978 Green's function database (needs to cover the whole region of the
2979 source)
2980 :type store:
2981 optional, :py:class:`~pyrocko.gf.store.Store`
2983 :param interpolation:
2984 Interpolation method to use (choose between ``'nearest_neighbor'``
2985 and ``'multilinear'``).
2986 :type interpolation:
2987 optional, str
2989 :param points:
2990 Coordinates on fault (-1.:1.) of discrete points.
2991 :type points:
2992 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)``
2994 :returns:
2995 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete
2996 points.
2997 :rtype:
2998 :py:class:`~numpy.ndarray`: ``(n_points, )``.
2999 '''
3001 if points is None:
3002 _, _, _, points, _ = self._discretize_points(store, cs='xyz')
3004 return store.config.get_vs(
3005 self.lat, self.lon,
3006 points=points,
3007 interpolation=interpolation) * self.gamma
3009 def discretize_time(
3010 self, store, interpolation='nearest_neighbor',
3011 vr=None, times=None, *args, **kwargs):
3012 '''
3013 Get rupture start time for discrete points on source plane.
3015 :param store:
3016 Green's function database (needs to cover whole region of the
3017 source)
3018 :type store:
3019 :py:class:`~pyrocko.gf.store.Store`
3021 :param interpolation:
3022 Interpolation method to use (choose between ``'nearest_neighbor'``
3023 and ``'multilinear'``).
3024 :type interpolation:
3025 optional, str
3027 :param vr:
3028 Array, containing rupture user defined rupture velocity values.
3029 :type vr:
3030 optional, :py:class:`~numpy.ndarray`
3032 :param times:
3033 Array, containing zeros, where rupture is starting, real positive
3034 numbers at later secondary nucleation points and -1, where time
3035 will be calculated. If not given, rupture starts at nucleation_x,
3036 nucleation_y. Times are given for discrete points with equal
3037 horizontal and vertical spacing.
3038 :type times:
3039 optional, :py:class:`~numpy.ndarray`
3041 :returns:
3042 Coordinates (latlondepth), coordinates (xy), rupture velocity,
3043 rupture propagation time of discrete points.
3044 :rtype:
3045 :py:class:`~numpy.ndarray`: ``(n_points, 3)``,
3046 :py:class:`~numpy.ndarray`: ``(n_points, 2)``,
3047 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``,
3048 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``.
3049 '''
3050 nx, ny, delta, points, points_xy = self._discretize_points(
3051 store, cs='xyz')
3053 if vr is None or vr.shape != tuple((nx, ny)):
3054 if vr:
3055 logger.warning(
3056 'Given rupture velocities are not in right shape: '
3057 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny))
3058 vr = self._discretize_rupture_v(store, interpolation, points)\
3059 .reshape(nx, ny)
3061 if vr.shape != tuple((nx, ny)):
3062 logger.warning(
3063 'Given rupture velocities are not in right shape. Therefore'
3064 ' standard rupture velocity array is used.')
3066 def initialize_times():
3067 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
3069 if nucl_x.shape != nucl_y.shape:
3070 raise ValueError(
3071 'Nucleation coordinates have different shape.')
3073 dist_points = num.array([
3074 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1)
3075 for x, y in zip(nucl_x, nucl_y)])
3076 nucl_indices = num.argmin(dist_points, axis=1)
3078 if self.nucleation_time is None:
3079 nucl_times = num.zeros_like(nucl_indices)
3080 else:
3081 if self.nucleation_time.shape == nucl_x.shape:
3082 nucl_times = self.nucleation_time
3083 else:
3084 raise ValueError(
3085 'Nucleation coordinates and times have different '
3086 'shapes')
3088 t = num.full(nx * ny, -1.)
3089 t[nucl_indices] = nucl_times
3090 return t.reshape(nx, ny)
3092 if times is None:
3093 times = initialize_times()
3094 elif times.shape != tuple((nx, ny)):
3095 times = initialize_times()
3096 logger.warning(
3097 'Given times are not in right shape. Therefore standard time'
3098 ' array is used.')
3100 eikonal_ext.eikonal_solver_fmm_cartesian(
3101 speeds=vr, times=times, delta=delta)
3103 return points, points_xy, vr, times
3105 def get_vr_time_interpolators(
3106 self, store, interpolation='nearest_neighbor', force=False,
3107 **kwargs):
3108 '''
3109 Get interpolators for rupture velocity and rupture time.
3111 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3113 :param store:
3114 Green's function database (needs to cover whole region of the
3115 source).
3116 :type store:
3117 :py:class:`~pyrocko.gf.store.Store`
3119 :param interpolation:
3120 Interpolation method to use (choose between ``'nearest_neighbor'``
3121 and ``'multilinear'``).
3122 :type interpolation:
3123 optional, str
3125 :param force:
3126 Force recalculation of the interpolators (e.g. after change of
3127 nucleation point locations/times). Default is ``False``.
3128 :type force:
3129 optional, bool
3130 '''
3131 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'}
3132 if interpolation not in interp_map:
3133 raise TypeError(
3134 'Interpolation method %s not available' % interpolation)
3136 if not self._interpolators.get(interpolation, False) or force:
3137 _, points_xy, vr, times = self.discretize_time(
3138 store, **kwargs)
3140 if self.length <= 0.:
3141 raise ValueError(
3142 'length must be larger then 0. not %g' % self.length)
3144 if self.width <= 0.:
3145 raise ValueError(
3146 'width must be larger then 0. not %g' % self.width)
3148 nx, ny = times.shape
3149 anch_x, anch_y = map_anchor[self.anchor]
3151 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2.
3152 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2.
3154 ascont = num.ascontiguousarray
3156 self._interpolators[interpolation] = (
3157 nx, ny, times, vr,
3158 RegularGridInterpolator(
3159 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])),
3160 times,
3161 method=interp_map[interpolation]),
3162 RegularGridInterpolator(
3163 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])),
3164 vr,
3165 method=interp_map[interpolation]))
3167 return self._interpolators[interpolation]
3169 def discretize_patches(
3170 self, store, interpolation='nearest_neighbor', force=False,
3171 grid_shape=(),
3172 **kwargs):
3173 '''
3174 Get rupture start time and OkadaSource elements for points on rupture.
3176 All source elements and their corresponding center points are
3177 calculated and stored in the :py:attr:`patches` attribute.
3179 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3181 :param store:
3182 Green's function database (needs to cover whole region of the
3183 source).
3184 :type store:
3185 :py:class:`~pyrocko.gf.store.Store`
3187 :param interpolation:
3188 Interpolation method to use (choose between ``'nearest_neighbor'``
3189 and ``'multilinear'``).
3190 :type interpolation:
3191 optional, str
3193 :param force:
3194 Force recalculation of the vr and time interpolators ( e.g. after
3195 change of nucleation point locations/times). Default is ``False``.
3196 :type force:
3197 optional, bool
3199 :param grid_shape:
3200 Desired sub fault patch grid size (nlength, nwidth). Either factor
3201 or grid_shape should be set.
3202 :type grid_shape:
3203 optional, tuple of int
3204 '''
3205 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3206 self.get_vr_time_interpolators(
3207 store,
3208 interpolation=interpolation, force=force, **kwargs)
3209 anch_x, anch_y = map_anchor[self.anchor]
3211 al = self.length / 2.
3212 aw = self.width / 2.
3213 al1 = -(al + anch_x * al)
3214 al2 = al - anch_x * al
3215 aw1 = -aw + anch_y * aw
3216 aw2 = aw + anch_y * aw
3217 assert num.abs([al1, al2]).sum() == self.length
3218 assert num.abs([aw1, aw2]).sum() == self.width
3220 def get_lame(*a, **kw):
3221 shear_mod = store.config.get_shear_moduli(*a, **kw)
3222 lamb = store.config.get_vp(*a, **kw)**2 \
3223 * store.config.get_rho(*a, **kw) - 2. * shear_mod
3224 return shear_mod, lamb / (2. * (lamb + shear_mod))
3226 shear_mod, poisson = get_lame(
3227 self.lat, self.lon,
3228 num.array([[self.north_shift, self.east_shift, self.depth]]),
3229 interpolation=interpolation)
3231 okada_src = OkadaSource(
3232 lat=self.lat, lon=self.lon,
3233 strike=self.strike, dip=self.dip,
3234 north_shift=self.north_shift, east_shift=self.east_shift,
3235 depth=self.depth,
3236 al1=al1, al2=al2, aw1=aw1, aw2=aw2,
3237 poisson=poisson.mean(),
3238 shearmod=shear_mod.mean(),
3239 opening=kwargs.get('opening', 0.))
3241 if not (self.nx and self.ny):
3242 if grid_shape:
3243 self.nx, self.ny = grid_shape
3244 else:
3245 self.nx = nx
3246 self.ny = ny
3248 source_disc, source_points = okada_src.discretize(self.nx, self.ny)
3250 shear_mod, poisson = get_lame(
3251 self.lat, self.lon,
3252 num.array([src.source_patch()[:3] for src in source_disc]),
3253 interpolation=interpolation)
3255 if (self.nx, self.ny) != (nx, ny):
3256 times_interp = time_interpolator(
3257 num.ascontiguousarray(source_points[:, :2]))
3258 vr_interp = vr_interpolator(
3259 num.ascontiguousarray(source_points[:, :2]))
3260 else:
3261 times_interp = times.T.ravel()
3262 vr_interp = vr.T.ravel()
3264 for isrc, src in enumerate(source_disc):
3265 src.vr = vr_interp[isrc]
3266 src.time = times_interp[isrc] + self.time
3268 self.patches = source_disc
3270 def discretize_basesource(self, store, target=None):
3271 '''
3272 Prepare source for synthetic waveform calculation.
3274 :param store:
3275 Green's function database (needs to cover whole region of the
3276 source).
3277 :type store:
3278 :py:class:`~pyrocko.gf.store.Store`
3280 :param target:
3281 Target information.
3282 :type target:
3283 optional, :py:class:`~pyrocko.gf.targets.Target`
3285 :returns:
3286 Source discretized by a set of moment tensors and times.
3287 :rtype:
3288 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource`
3289 '''
3290 if not target:
3291 interpolation = 'nearest_neighbor'
3292 else:
3293 interpolation = target.interpolation
3295 if not self.patches:
3296 self.discretize_patches(store, interpolation)
3298 if self.coef_mat is None:
3299 self.calc_coef_mat()
3301 delta_slip, slip_times = self.get_delta_slip(store)
3302 npatches = self.nx * self.ny
3303 ntimes = slip_times.size
3305 anch_x, anch_y = map_anchor[self.anchor]
3307 pln = self.length / self.nx
3308 pwd = self.width / self.ny
3310 patch_coords = num.array([
3311 (p.ix, p.iy)
3312 for p in self.patches]).reshape(self.nx, self.ny, 2)
3314 # boundary condition is zero-slip
3315 # is not valid to avoid unwished interpolation effects
3316 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3))
3317 slip_grid[1:-1, 1:-1, :, :] = \
3318 delta_slip.reshape(self.nx, self.ny, ntimes, 3)
3320 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :]
3321 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :]
3322 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :]
3323 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :]
3325 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :]
3326 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :]
3327 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :]
3328 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :]
3330 def make_grid(patch_parameter):
3331 grid = num.zeros((self.nx + 2, self.ny + 2))
3332 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny)
3334 grid[0, 0] = grid[1, 1]
3335 grid[0, -1] = grid[1, -2]
3336 grid[-1, 0] = grid[-2, 1]
3337 grid[-1, -1] = grid[-2, -2]
3339 grid[1:-1, 0] = grid[1:-1, 1]
3340 grid[1:-1, -1] = grid[1:-1, -2]
3341 grid[0, 1:-1] = grid[1, 1:-1]
3342 grid[-1, 1:-1] = grid[-2, 1:-1]
3344 return grid
3346 lamb = self.get_patch_attribute('lamb')
3347 mu = self.get_patch_attribute('shearmod')
3349 lamb_grid = make_grid(lamb)
3350 mu_grid = make_grid(mu)
3352 coords_x = num.zeros(self.nx + 2)
3353 coords_x[1:-1] = patch_coords[:, 0, 0]
3354 coords_x[0] = coords_x[1] - pln / 2
3355 coords_x[-1] = coords_x[-2] + pln / 2
3357 coords_y = num.zeros(self.ny + 2)
3358 coords_y[1:-1] = patch_coords[0, :, 1]
3359 coords_y[0] = coords_y[1] - pwd / 2
3360 coords_y[-1] = coords_y[-2] + pwd / 2
3362 slip_interp = RegularGridInterpolator(
3363 (coords_x, coords_y, slip_times),
3364 slip_grid, method='nearest')
3366 lamb_interp = RegularGridInterpolator(
3367 (coords_x, coords_y),
3368 lamb_grid, method='nearest')
3370 mu_interp = RegularGridInterpolator(
3371 (coords_x, coords_y),
3372 mu_grid, method='nearest')
3374 # discretize basesources
3375 mindeltagf = min(tuple(
3376 (self.length / self.nx, self.width / self.ny) +
3377 tuple(store.config.deltas)))
3379 nl = int((1. / self.decimation_factor) *
3380 num.ceil(pln / mindeltagf)) + 1
3381 nw = int((1. / self.decimation_factor) *
3382 num.ceil(pwd / mindeltagf)) + 1
3383 nsrc_patch = int(nl * nw)
3384 dl = pln / nl
3385 dw = pwd / nw
3387 patch_area = dl * dw
3389 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl)
3390 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw)
3392 base_coords = num.zeros((nsrc_patch, 3))
3393 base_coords[:, 0] = num.tile(xl, nw)
3394 base_coords[:, 1] = num.repeat(xw, nl)
3395 base_coords = num.tile(base_coords, (npatches, 1))
3397 center_coords = num.zeros((npatches, 3))
3398 center_coords[:, 0] = num.repeat(
3399 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2
3400 center_coords[:, 1] = num.tile(
3401 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2
3403 base_coords -= center_coords.repeat(nsrc_patch, axis=0)
3404 nbaselocs = base_coords.shape[0]
3406 base_interp = base_coords.repeat(ntimes, axis=0)
3408 base_times = num.tile(slip_times, nbaselocs)
3409 base_interp[:, 0] -= anch_x * self.length / 2
3410 base_interp[:, 1] -= anch_y * self.width / 2
3411 base_interp[:, 2] = base_times
3413 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3414 store, interpolation=interpolation)
3416 time_eikonal_max = time_interpolator.values.max()
3418 nbasesrcs = base_interp.shape[0]
3419 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3)
3420 lamb = lamb_interp(base_interp[:, :2]).ravel()
3421 mu = mu_interp(base_interp[:, :2]).ravel()
3423 if False:
3424 try:
3425 import matplotlib.pyplot as plt
3426 coords = base_coords.copy()
3427 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1)
3428 plt.scatter(coords[:, 0], coords[:, 1], c=norm)
3429 plt.show()
3430 except AttributeError:
3431 pass
3433 base_interp[:, 2] = 0.
3434 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
3435 base_interp = num.dot(rotmat.T, base_interp.T).T
3436 base_interp[:, 0] += self.north_shift
3437 base_interp[:, 1] += self.east_shift
3438 base_interp[:, 2] += self.depth
3440 slip_strike = delta_slip[:, :, 0].ravel()
3441 slip_dip = delta_slip[:, :, 1].ravel()
3442 slip_norm = delta_slip[:, :, 2].ravel()
3444 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0)
3445 slip_rake = r2d * num.arctan2(slip_dip, slip_strike)
3447 m6s = okada_ext.patch2m6(
3448 strikes=num.full(nbasesrcs, self.strike, dtype=float),
3449 dips=num.full(nbasesrcs, self.dip, dtype=float),
3450 rakes=slip_rake,
3451 disl_shear=slip_shear,
3452 disl_norm=slip_norm,
3453 lamb=lamb,
3454 mu=mu,
3455 nthreads=self.nthreads)
3457 m6s *= patch_area
3459 dl = -self.patches[0].al1 + self.patches[0].al2
3460 dw = -self.patches[0].aw1 + self.patches[0].aw2
3462 base_times[base_times > time_eikonal_max] = time_eikonal_max
3464 ds = meta.DiscretizedMTSource(
3465 lat=self.lat,
3466 lon=self.lon,
3467 times=base_times + self.time,
3468 north_shifts=base_interp[:, 0],
3469 east_shifts=base_interp[:, 1],
3470 depths=base_interp[:, 2],
3471 m6s=m6s,
3472 dl=dl,
3473 dw=dw,
3474 nl=self.nx,
3475 nw=self.ny)
3477 return ds
3479 def calc_coef_mat(self):
3480 '''
3481 Calculate coefficients connecting tractions and dislocations.
3482 '''
3483 if not self.patches:
3484 raise ValueError(
3485 'Patches are needed. Please calculate them first.')
3487 self.coef_mat = make_okada_coefficient_matrix(
3488 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear)
3490 def get_patch_attribute(self, attr):
3491 '''
3492 Get patch attributes.
3494 :param attr:
3495 Name of selected attribute (see
3496 :py:class`pyrocko.modelling.okada.OkadaSource`).
3497 :type attr:
3498 str
3500 :returns:
3501 Array with attribute value for each fault patch.
3502 :rtype:
3503 :py:class:`~numpy.ndarray`
3505 '''
3506 if not self.patches:
3507 raise ValueError(
3508 'Patches are needed. Please calculate them first.')
3509 return num.array([getattr(p, attr) for p in self.patches])
3511 def get_slip(
3512 self,
3513 time=None,
3514 scale_slip=True,
3515 interpolation='nearest_neighbor',
3516 **kwargs):
3517 '''
3518 Get slip per subfault patch for given time after rupture start.
3520 :param time:
3521 Time after origin [s], for which slip is computed. If not
3522 given, final static slip is returned.
3523 :type time:
3524 optional, float > 0.
3526 :param scale_slip:
3527 If ``True`` and :py:attr:`slip` given, all slip values are scaled
3528 to fit the given maximum slip.
3529 :type scale_slip:
3530 optional, bool
3532 :param interpolation:
3533 Interpolation method to use (choose between ``'nearest_neighbor'``
3534 and ``'multilinear'``).
3535 :type interpolation:
3536 optional, str
3538 :returns:
3539 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`)
3540 for each source patch.
3541 :rtype:
3542 :py:class:`~numpy.ndarray`: ``(n_sources, 3)``
3543 '''
3545 if self.patches is None:
3546 raise ValueError(
3547 'Please discretize the source first (discretize_patches())')
3548 npatches = len(self.patches)
3549 tractions = self.get_tractions()
3550 time_patch_max = self.get_patch_attribute('time').max() - self.time
3552 time_patch = time
3553 if time is None:
3554 time_patch = time_patch_max
3556 if self.coef_mat is None:
3557 self.calc_coef_mat()
3559 if tractions.shape != (npatches, 3):
3560 raise AttributeError(
3561 'The traction vector is of invalid shape.'
3562 ' Required shape is (npatches, 3)')
3564 patch_mask = num.ones(npatches, dtype=bool)
3565 if self.patch_mask is not None:
3566 patch_mask = self.patch_mask
3568 times = self.get_patch_attribute('time') - self.time
3569 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches
3570 relevant_sources = num.nonzero(times <= time_patch)[0]
3571 disloc_est = num.zeros_like(tractions)
3573 if self.smooth_rupture:
3574 patch_activation = num.zeros(npatches)
3576 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3577 self.get_vr_time_interpolators(
3578 store, interpolation=interpolation)
3580 # Getting the native Eikonal grid, bit hackish
3581 points_x = num.round(time_interpolator.grid[0], decimals=2)
3582 points_y = num.round(time_interpolator.grid[1], decimals=2)
3583 times_eikonal = time_interpolator.values
3585 time_max = time
3586 if time is None:
3587 time_max = times_eikonal.max()
3589 for ip, p in enumerate(self.patches):
3590 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2)
3591 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2)
3593 idx_length = num.logical_and(
3594 points_x >= ul[0], points_x <= lr[0])
3595 idx_width = num.logical_and(
3596 points_y >= ul[1], points_y <= lr[1])
3598 times_patch = times_eikonal[num.ix_(idx_length, idx_width)]
3599 if times_patch.size == 0:
3600 raise AttributeError('could not use smooth_rupture')
3602 patch_activation[ip] = \
3603 (times_patch <= time_max).sum() / times_patch.size
3605 if time_patch == 0 and time_patch != time_patch_max:
3606 patch_activation[ip] = 0.
3608 patch_activation[~patch_mask] = 0. # exlcude unmasked patches
3610 relevant_sources = num.nonzero(patch_activation > 0.)[0]
3612 if relevant_sources.size == 0:
3613 return disloc_est
3615 indices_disl = num.repeat(relevant_sources * 3, 3)
3616 indices_disl[1::3] += 1
3617 indices_disl[2::3] += 2
3619 disloc_est[relevant_sources] = invert_fault_dislocations_bem(
3620 stress_field=tractions[relevant_sources, :].ravel(),
3621 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3622 pure_shear=self.pure_shear, nthreads=self.nthreads,
3623 epsilon=None,
3624 **kwargs)
3626 if self.smooth_rupture:
3627 disloc_est *= patch_activation[:, num.newaxis]
3629 if scale_slip and self.slip is not None:
3630 disloc_tmax = num.zeros(npatches)
3632 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3)
3633 indices_disl[1::3] += 1
3634 indices_disl[2::3] += 2
3636 disloc_tmax[patch_mask] = num.linalg.norm(
3637 invert_fault_dislocations_bem(
3638 stress_field=tractions[patch_mask, :].ravel(),
3639 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3640 pure_shear=self.pure_shear, nthreads=self.nthreads,
3641 epsilon=None,
3642 **kwargs), axis=1)
3644 disloc_tmax_max = disloc_tmax.max()
3645 if disloc_tmax_max == 0.:
3646 logger.warning(
3647 'slip scaling not performed. Maximum slip is 0.')
3649 disloc_est *= self.slip / disloc_tmax_max
3651 return disloc_est
3653 def get_delta_slip(
3654 self,
3655 store=None,
3656 deltat=None,
3657 delta=True,
3658 interpolation='nearest_neighbor',
3659 **kwargs):
3660 '''
3661 Get slip change snapshots.
3663 The time interval, within which the slip changes are computed is
3664 determined by the sampling rate of the Green's function database or
3665 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`.
3667 :param store:
3668 Green's function database (needs to cover whole region of of the
3669 source). Its sampling interval is used as time increment for slip
3670 difference calculation. Either ``deltat`` or ``store`` should be
3671 given.
3672 :type store:
3673 optional, :py:class:`~pyrocko.gf.store.Store`
3675 :param deltat:
3676 Time interval for slip difference calculation [s]. Either
3677 ``deltat`` or ``store`` should be given.
3678 :type deltat:
3679 optional, float
3681 :param delta:
3682 If ``True``, slip differences between two time steps are given. If
3683 ``False``, cumulative slip for all time steps.
3684 :type delta:
3685 optional, bool
3687 :param interpolation:
3688 Interpolation method to use (choose between ``'nearest_neighbor'``
3689 and ``'multilinear'``).
3690 :type interpolation:
3691 optional, str
3693 :returns:
3694 Displacement changes(:math:`\\Delta u_{strike},
3695 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and
3696 time; corner times, for which delta slip is computed. The order of
3697 displacement changes array is:
3699 .. math::
3701 &[[\\\\
3702 &[\\Delta u_{strike, patch1, t1},
3703 \\Delta u_{dip, patch1, t1},
3704 \\Delta u_{tensile, patch1, t1}],\\\\
3705 &[\\Delta u_{strike, patch1, t2},
3706 \\Delta u_{dip, patch1, t2},
3707 \\Delta u_{tensile, patch1, t2}]\\\\
3708 &], [\\\\
3709 &[\\Delta u_{strike, patch2, t1}, ...],\\\\
3710 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\
3712 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3713 :py:class:`~numpy.ndarray`: ``(n_times, )``
3714 '''
3715 if store and deltat:
3716 raise AttributeError(
3717 'Argument collision. '
3718 'Please define only the store or the deltat argument.')
3720 if store:
3721 deltat = store.config.deltat
3723 if not deltat:
3724 raise AttributeError('Please give a GF store or set deltat.')
3726 npatches = len(self.patches)
3728 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3729 store, interpolation=interpolation)
3730 tmax = time_interpolator.values.max()
3732 calc_times = num.arange(0., tmax + deltat, deltat)
3733 calc_times[calc_times > tmax] = tmax
3735 disloc_est = num.zeros((npatches, calc_times.size, 3))
3737 for itime, t in enumerate(calc_times):
3738 disloc_est[:, itime, :] = self.get_slip(
3739 time=t, scale_slip=False, **kwargs)
3741 if self.slip:
3742 disloc_tmax = num.linalg.norm(
3743 self.get_slip(scale_slip=False, time=tmax),
3744 axis=1)
3746 disloc_tmax_max = disloc_tmax.max()
3747 if disloc_tmax_max == 0.:
3748 logger.warning(
3749 'Slip scaling not performed. Maximum slip is 0.')
3750 else:
3751 disloc_est *= self.slip / disloc_tmax_max
3753 if not delta:
3754 return disloc_est, calc_times
3756 # if we have only one timestep there is no gradient
3757 if calc_times.size > 1:
3758 disloc_init = disloc_est[:, 0, :]
3759 disloc_est = num.diff(disloc_est, axis=1)
3760 disloc_est = num.concatenate((
3761 disloc_init[:, num.newaxis, :], disloc_est), axis=1)
3763 calc_times = calc_times
3765 return disloc_est, calc_times
3767 def get_slip_rate(self, *args, **kwargs):
3768 '''
3769 Get slip rate inverted from patches.
3771 The time interval, within which the slip rates are computed is
3772 determined by the sampling rate of the Green's function database or
3773 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to
3774 :py:meth:`get_delta_slip`.
3776 :returns:
3777 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`,
3778 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`)
3779 for each source patch and time; corner times, for which slip rate
3780 is computed. The order of sliprate array is:
3782 .. math::
3784 &[[\\\\
3785 &[\\Delta u_{strike, patch1, t1}/\\Delta t,
3786 \\Delta u_{dip, patch1, t1}/\\Delta t,
3787 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\
3788 &[\\Delta u_{strike, patch1, t2}/\\Delta t,
3789 \\Delta u_{dip, patch1, t2}/\\Delta t,
3790 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\
3791 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\
3792 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\
3794 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3795 :py:class:`~numpy.ndarray`: ``(n_times, )``
3796 '''
3797 ddisloc_est, calc_times = self.get_delta_slip(
3798 *args, delta=True, **kwargs)
3800 dt = num.concatenate(
3801 [(num.diff(calc_times)[0], ), num.diff(calc_times)])
3802 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt
3804 return slip_rate, calc_times
3806 def get_moment_rate_patches(self, *args, **kwargs):
3807 '''
3808 Get scalar seismic moment rate for each patch individually.
3810 Additional ``*args`` and ``**kwargs`` are passed to
3811 :py:meth:`get_slip_rate`.
3813 :returns:
3814 Seismic moment rate for each source patch and time; corner times,
3815 for which patch moment rate is computed based on slip rate. The
3816 order of the moment rate array is:
3818 .. math::
3820 &[\\\\
3821 &[(\\Delta M / \\Delta t)_{patch1, t1},
3822 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\
3823 &[(\\Delta M / \\Delta t)_{patch2, t1},
3824 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\
3825 &[...]]\\\\
3827 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``,
3828 :py:class:`~numpy.ndarray`: ``(n_times, )``
3829 '''
3830 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs)
3832 shear_mod = self.get_patch_attribute('shearmod')
3833 p_length = self.get_patch_attribute('length')
3834 p_width = self.get_patch_attribute('width')
3836 dA = p_length * p_width
3838 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis]
3840 return mom_rate, calc_times
3842 def get_moment_rate(self, store, target=None, deltat=None):
3843 '''
3844 Get seismic source moment rate for the total source (STF).
3846 :param store:
3847 Green's function database (needs to cover whole region of of the
3848 source). Its ``deltat`` [s] is used as time increment for slip
3849 difference calculation. Either ``deltat`` or ``store`` should be
3850 given.
3851 :type store:
3852 :py:class:`~pyrocko.gf.store.Store`
3854 :param target:
3855 Target information, needed for interpolation method.
3856 :type target:
3857 optional, :py:class:`~pyrocko.gf.targets.Target`
3859 :param deltat:
3860 Time increment for slip difference calculation [s]. If not given
3861 ``store.deltat`` is used.
3862 :type deltat:
3863 optional, float
3865 :return:
3866 Seismic moment rate [Nm/s] for each time; corner times, for which
3867 moment rate is computed. The order of the moment rate array is:
3869 .. math::
3871 &[\\\\
3872 &(\\Delta M / \\Delta t)_{t1},\\\\
3873 &(\\Delta M / \\Delta t)_{t2},\\\\
3874 &...]\\\\
3876 :rtype:
3877 :py:class:`~numpy.ndarray`: ``(n_times, )``,
3878 :py:class:`~numpy.ndarray`: ``(n_times, )``
3879 '''
3880 if not deltat:
3881 deltat = store.config.deltat
3882 return self.discretize_basesource(
3883 store, target=target).get_moment_rate(deltat)
3885 def get_moment(self, *args, **kwargs):
3886 '''
3887 Get seismic cumulative moment.
3889 Additional ``*args`` and ``**kwargs`` are passed to
3890 :py:meth:`get_magnitude`.
3892 :returns:
3893 Cumulative seismic moment in [Nm].
3894 :rtype:
3895 float
3896 '''
3897 return float(pmt.magnitude_to_moment(self.get_magnitude(
3898 *args, **kwargs)))
3900 def rescale_slip(self, magnitude=None, moment=None, **kwargs):
3901 '''
3902 Rescale source slip based on given target magnitude or seismic moment.
3904 Rescale the maximum source slip to fit the source moment magnitude or
3905 seismic moment to the given target values. Either ``magnitude`` or
3906 ``moment`` need to be given. Additional ``**kwargs`` are passed to
3907 :py:meth:`get_moment`.
3909 :param magnitude:
3910 Target moment magnitude :math:`M_\\mathrm{w}` as in
3911 [Hanks and Kanamori, 1979]
3912 :type magnitude:
3913 optional, float
3915 :param moment:
3916 Target seismic moment :math:`M_0` [Nm].
3917 :type moment:
3918 optional, float
3919 '''
3920 if self.slip is None:
3921 self.slip = 1.
3922 logger.warning('No slip found for rescaling. '
3923 'An initial slip of 1 m is assumed.')
3925 if magnitude is None and moment is None:
3926 raise ValueError(
3927 'Either target magnitude or moment need to be given.')
3929 moment_init = self.get_moment(**kwargs)
3931 if magnitude is not None:
3932 moment = pmt.magnitude_to_moment(magnitude)
3934 self.slip *= moment / moment_init
3936 def get_centroid(self, store, *args, **kwargs):
3937 '''
3938 Centroid of the pseudo dynamic rupture model.
3940 The centroid location and time are derived from the locations and times
3941 of the individual patches weighted with their moment contribution.
3942 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`.
3944 :param store:
3945 Green's function database (needs to cover whole region of of the
3946 source). Its ``deltat`` [s] is used as time increment for slip
3947 difference calculation. Either ``deltat`` or ``store`` should be
3948 given.
3949 :type store:
3950 :py:class:`~pyrocko.gf.store.Store`
3952 :returns:
3953 The centroid location and associated moment tensor.
3954 :rtype:
3955 :py:class:`pyrocko.model.Event`
3956 '''
3957 _, _, _, _, time, _ = self.get_vr_time_interpolators(store)
3958 t_max = time.values.max()
3960 moment_rate, times = self.get_moment_rate_patches(deltat=t_max)
3962 moment = num.sum(moment_rate * times, axis=1)
3963 weights = moment / moment.sum()
3965 norths = self.get_patch_attribute('north_shift')
3966 easts = self.get_patch_attribute('east_shift')
3967 depths = self.get_patch_attribute('depth')
3969 centroid_n = num.sum(weights * norths)
3970 centroid_e = num.sum(weights * easts)
3971 centroid_d = num.sum(weights * depths)
3973 centroid_lat, centroid_lon = ne_to_latlon(
3974 self.lat, self.lon, centroid_n, centroid_e)
3976 moment_rate_, times = self.get_moment_rate(store)
3977 delta_times = num.concatenate((
3978 [times[1] - times[0]],
3979 num.diff(times)))
3980 moment_src = delta_times * moment_rate
3982 centroid_t = num.sum(
3983 moment_src / num.sum(moment_src) * times) + self.time
3985 mt = self.pyrocko_moment_tensor(store, *args, **kwargs)
3987 return model.Event(
3988 lat=centroid_lat,
3989 lon=centroid_lon,
3990 depth=centroid_d,
3991 time=centroid_t,
3992 moment_tensor=mt,
3993 magnitude=mt.magnitude,
3994 duration=t_max)
3997class DoubleDCSource(SourceWithMagnitude):
3998 '''
3999 Two double-couple point sources separated in space and time.
4000 Moment share between the sub-sources is controlled by the
4001 parameter mix.
4002 The position of the subsources is dependent on the moment
4003 distribution between the two sources. Depth, east and north
4004 shift are given for the centroid between the two double-couples.
4005 The subsources will positioned according to their moment shares
4006 around this centroid position.
4007 This is done according to their delta parameters, which are
4008 therefore in relation to that centroid.
4009 Note that depth of the subsources therefore can be
4010 depth+/-delta_depth. For shallow earthquakes therefore
4011 the depth has to be chosen deeper to avoid sampling
4012 above surface.
4013 '''
4015 strike1 = Float.T(
4016 default=0.0,
4017 help='strike direction in [deg], measured clockwise from north')
4019 dip1 = Float.T(
4020 default=90.0,
4021 help='dip angle in [deg], measured downward from horizontal')
4023 azimuth = Float.T(
4024 default=0.0,
4025 help='azimuth to second double-couple [deg], '
4026 'measured at first, clockwise from north')
4028 rake1 = Float.T(
4029 default=0.0,
4030 help='rake angle in [deg], '
4031 'measured counter-clockwise from right-horizontal '
4032 'in on-plane view')
4034 strike2 = Float.T(
4035 default=0.0,
4036 help='strike direction in [deg], measured clockwise from north')
4038 dip2 = Float.T(
4039 default=90.0,
4040 help='dip angle in [deg], measured downward from horizontal')
4042 rake2 = Float.T(
4043 default=0.0,
4044 help='rake angle in [deg], '
4045 'measured counter-clockwise from right-horizontal '
4046 'in on-plane view')
4048 delta_time = Float.T(
4049 default=0.0,
4050 help='separation of double-couples in time (t2-t1) [s]')
4052 delta_depth = Float.T(
4053 default=0.0,
4054 help='difference in depth (z2-z1) [m]')
4056 distance = Float.T(
4057 default=0.0,
4058 help='distance between the two double-couples [m]')
4060 mix = Float.T(
4061 default=0.5,
4062 help='how to distribute the moment to the two doublecouples '
4063 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
4065 stf1 = STF.T(
4066 optional=True,
4067 help='Source time function of subsource 1 '
4068 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
4070 stf2 = STF.T(
4071 optional=True,
4072 help='Source time function of subsource 2 '
4073 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
4075 discretized_source_class = meta.DiscretizedMTSource
4077 def base_key(self):
4078 return (
4079 self.time, self.depth, self.lat, self.north_shift,
4080 self.lon, self.east_shift, type(self).__name__) + \
4081 self.effective_stf1_pre().base_key() + \
4082 self.effective_stf2_pre().base_key() + (
4083 self.strike1, self.dip1, self.rake1,
4084 self.strike2, self.dip2, self.rake2,
4085 self.delta_time, self.delta_depth,
4086 self.azimuth, self.distance, self.mix)
4088 def get_factor(self):
4089 return self.moment
4091 def effective_stf1_pre(self):
4092 return self.stf1 or self.stf or g_unit_pulse
4094 def effective_stf2_pre(self):
4095 return self.stf2 or self.stf or g_unit_pulse
4097 def effective_stf_post(self):
4098 return g_unit_pulse
4100 def split(self):
4101 a1 = 1.0 - self.mix
4102 a2 = self.mix
4103 delta_north = math.cos(self.azimuth * d2r) * self.distance
4104 delta_east = math.sin(self.azimuth * d2r) * self.distance
4106 dc1 = DCSource(
4107 lat=self.lat,
4108 lon=self.lon,
4109 time=self.time - self.delta_time * a2,
4110 north_shift=self.north_shift - delta_north * a2,
4111 east_shift=self.east_shift - delta_east * a2,
4112 depth=self.depth - self.delta_depth * a2,
4113 moment=self.moment * a1,
4114 strike=self.strike1,
4115 dip=self.dip1,
4116 rake=self.rake1,
4117 stf=self.stf1 or self.stf)
4119 dc2 = DCSource(
4120 lat=self.lat,
4121 lon=self.lon,
4122 time=self.time + self.delta_time * a1,
4123 north_shift=self.north_shift + delta_north * a1,
4124 east_shift=self.east_shift + delta_east * a1,
4125 depth=self.depth + self.delta_depth * a1,
4126 moment=self.moment * a2,
4127 strike=self.strike2,
4128 dip=self.dip2,
4129 rake=self.rake2,
4130 stf=self.stf2 or self.stf)
4132 return [dc1, dc2]
4134 def discretize_basesource(self, store, target=None):
4135 a1 = 1.0 - self.mix
4136 a2 = self.mix
4137 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4138 rake=self.rake1, scalar_moment=a1)
4139 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4140 rake=self.rake2, scalar_moment=a2)
4142 delta_north = math.cos(self.azimuth * d2r) * self.distance
4143 delta_east = math.sin(self.azimuth * d2r) * self.distance
4145 times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
4146 store.config.deltat, self.time - self.delta_time * a2)
4148 times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
4149 store.config.deltat, self.time + self.delta_time * a1)
4151 nt1 = times1.size
4152 nt2 = times2.size
4154 ds = meta.DiscretizedMTSource(
4155 lat=self.lat,
4156 lon=self.lon,
4157 times=num.concatenate((times1, times2)),
4158 north_shifts=num.concatenate((
4159 num.repeat(self.north_shift - delta_north * a2, nt1),
4160 num.repeat(self.north_shift + delta_north * a1, nt2))),
4161 east_shifts=num.concatenate((
4162 num.repeat(self.east_shift - delta_east * a2, nt1),
4163 num.repeat(self.east_shift + delta_east * a1, nt2))),
4164 depths=num.concatenate((
4165 num.repeat(self.depth - self.delta_depth * a2, nt1),
4166 num.repeat(self.depth + self.delta_depth * a1, nt2))),
4167 m6s=num.vstack((
4168 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
4169 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
4171 return ds
4173 def pyrocko_moment_tensor(self, store=None, target=None):
4174 a1 = 1.0 - self.mix
4175 a2 = self.mix
4176 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4177 rake=self.rake1,
4178 scalar_moment=a1 * self.moment)
4179 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4180 rake=self.rake2,
4181 scalar_moment=a2 * self.moment)
4182 return pmt.MomentTensor(m=mot1.m() + mot2.m())
4184 def pyrocko_event(self, store=None, target=None, **kwargs):
4185 return SourceWithMagnitude.pyrocko_event(
4186 self, store, target,
4187 moment_tensor=self.pyrocko_moment_tensor(store, target),
4188 **kwargs)
4190 @classmethod
4191 def from_pyrocko_event(cls, ev, **kwargs):
4192 d = {}
4193 mt = ev.moment_tensor
4194 if mt:
4195 (strike, dip, rake), _ = mt.both_strike_dip_rake()
4196 d.update(
4197 strike1=float(strike),
4198 dip1=float(dip),
4199 rake1=float(rake),
4200 strike2=float(strike),
4201 dip2=float(dip),
4202 rake2=float(rake),
4203 mix=0.0,
4204 magnitude=float(mt.moment_magnitude()))
4206 d.update(kwargs)
4207 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
4208 source.stf1 = source.stf
4209 source.stf2 = HalfSinusoidSTF(effective_duration=0.)
4210 source.stf = None
4211 return source
4214class RingfaultSource(SourceWithMagnitude):
4215 '''
4216 A ring fault with vertical doublecouples.
4217 '''
4219 diameter = Float.T(
4220 default=1.0,
4221 help='diameter of the ring in [m]')
4223 sign = Float.T(
4224 default=1.0,
4225 help='inside of the ring moves up (+1) or down (-1)')
4227 strike = Float.T(
4228 default=0.0,
4229 help='strike direction of the ring plane, clockwise from north,'
4230 ' in [deg]')
4232 dip = Float.T(
4233 default=0.0,
4234 help='dip angle of the ring plane from horizontal in [deg]')
4236 npointsources = Int.T(
4237 default=360,
4238 help='number of point sources to use')
4240 discretized_source_class = meta.DiscretizedMTSource
4242 def base_key(self):
4243 return Source.base_key(self) + (
4244 self.strike, self.dip, self.diameter, self.npointsources)
4246 def get_factor(self):
4247 return self.sign * self.moment
4249 def discretize_basesource(self, store=None, target=None):
4250 n = self.npointsources
4251 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
4253 points = num.zeros((n, 3))
4254 points[:, 0] = num.cos(phi) * 0.5 * self.diameter
4255 points[:, 1] = num.sin(phi) * 0.5 * self.diameter
4257 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
4258 points = num.dot(rotmat.T, points.T).T # !!! ?
4260 points[:, 0] += self.north_shift
4261 points[:, 1] += self.east_shift
4262 points[:, 2] += self.depth
4264 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
4265 scalar_moment=1.0 / n).m())
4267 rotmats = num.transpose(
4268 [[num.cos(phi), num.sin(phi), num.zeros(n)],
4269 [-num.sin(phi), num.cos(phi), num.zeros(n)],
4270 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
4272 ms = num.zeros((n, 3, 3))
4273 for i in range(n):
4274 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
4275 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
4277 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
4278 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
4280 times, amplitudes = self.effective_stf_pre().discretize_t(
4281 store.config.deltat, self.time)
4283 nt = times.size
4285 return meta.DiscretizedMTSource(
4286 times=num.tile(times, n),
4287 lat=self.lat,
4288 lon=self.lon,
4289 north_shifts=num.repeat(points[:, 0], nt),
4290 east_shifts=num.repeat(points[:, 1], nt),
4291 depths=num.repeat(points[:, 2], nt),
4292 m6s=num.repeat(m6s, nt, axis=0) * num.tile(
4293 amplitudes, n)[:, num.newaxis])
4296class CombiSource(Source):
4297 '''
4298 Composite source model.
4299 '''
4301 discretized_source_class = meta.DiscretizedMTSource
4303 subsources = List.T(Source.T())
4305 def __init__(self, subsources=[], **kwargs):
4306 if not subsources:
4307 raise BadRequest(
4308 'Need at least one sub-source to create a CombiSource object.')
4310 lats = num.array(
4311 [subsource.lat for subsource in subsources], dtype=float)
4312 lons = num.array(
4313 [subsource.lon for subsource in subsources], dtype=float)
4315 lat, lon = lats[0], lons[0]
4316 if not num.all(lats == lat) and num.all(lons == lon):
4317 subsources = [s.clone() for s in subsources]
4318 for subsource in subsources[1:]:
4319 subsource.set_origin(lat, lon)
4321 depth = float(num.mean([p.depth for p in subsources]))
4322 time = float(num.mean([p.time for p in subsources]))
4323 north_shift = float(num.mean([p.north_shift for p in subsources]))
4324 east_shift = float(num.mean([p.east_shift for p in subsources]))
4325 kwargs.update(
4326 time=time,
4327 lat=float(lat),
4328 lon=float(lon),
4329 north_shift=north_shift,
4330 east_shift=east_shift,
4331 depth=depth)
4333 Source.__init__(self, subsources=subsources, **kwargs)
4335 def get_factor(self):
4336 return 1.0
4338 def discretize_basesource(self, store, target=None):
4339 dsources = []
4340 for sf in self.subsources:
4341 ds = sf.discretize_basesource(store, target)
4342 ds.m6s *= sf.get_factor()
4343 dsources.append(ds)
4345 return meta.DiscretizedMTSource.combine(dsources)
4348class SFSource(Source):
4349 '''
4350 A single force point source.
4352 Supported GF schemes: `'elastic5'`.
4353 '''
4355 discretized_source_class = meta.DiscretizedSFSource
4357 fn = Float.T(
4358 default=0.,
4359 help='northward component of single force [N]')
4361 fe = Float.T(
4362 default=0.,
4363 help='eastward component of single force [N]')
4365 fd = Float.T(
4366 default=0.,
4367 help='downward component of single force [N]')
4369 def __init__(self, **kwargs):
4370 Source.__init__(self, **kwargs)
4372 def base_key(self):
4373 return Source.base_key(self) + (self.fn, self.fe, self.fd)
4375 def get_factor(self):
4376 return 1.0
4378 def discretize_basesource(self, store, target=None):
4379 times, amplitudes = self.effective_stf_pre().discretize_t(
4380 store.config.deltat, self.time)
4381 forces = amplitudes[:, num.newaxis] * num.array(
4382 [[self.fn, self.fe, self.fd]], dtype=float)
4384 return meta.DiscretizedSFSource(forces=forces,
4385 **self._dparams_base_repeated(times))
4387 def pyrocko_event(self, store=None, target=None, **kwargs):
4388 return Source.pyrocko_event(
4389 self, store, target,
4390 **kwargs)
4392 @classmethod
4393 def from_pyrocko_event(cls, ev, **kwargs):
4394 d = {}
4395 d.update(kwargs)
4396 return super(SFSource, cls).from_pyrocko_event(ev, **d)
4399class PorePressurePointSource(Source):
4400 '''
4401 Excess pore pressure point source.
4403 For poro-elastic initial value problem where an excess pore pressure is
4404 brought into a small source volume.
4405 '''
4407 discretized_source_class = meta.DiscretizedPorePressureSource
4409 pp = Float.T(
4410 default=1.0,
4411 help='initial excess pore pressure in [Pa]')
4413 def base_key(self):
4414 return Source.base_key(self)
4416 def get_factor(self):
4417 return self.pp
4419 def discretize_basesource(self, store, target=None):
4420 return meta.DiscretizedPorePressureSource(pp=arr(1.0),
4421 **self._dparams_base())
4424class PorePressureLineSource(Source):
4425 '''
4426 Excess pore pressure line source.
4428 The line source is centered at (north_shift, east_shift, depth).
4429 '''
4431 discretized_source_class = meta.DiscretizedPorePressureSource
4433 pp = Float.T(
4434 default=1.0,
4435 help='initial excess pore pressure in [Pa]')
4437 length = Float.T(
4438 default=0.0,
4439 help='length of the line source [m]')
4441 azimuth = Float.T(
4442 default=0.0,
4443 help='azimuth direction, clockwise from north [deg]')
4445 dip = Float.T(
4446 default=90.,
4447 help='dip direction, downward from horizontal [deg]')
4449 def base_key(self):
4450 return Source.base_key(self) + (self.azimuth, self.dip, self.length)
4452 def get_factor(self):
4453 return self.pp
4455 def discretize_basesource(self, store, target=None):
4457 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
4459 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
4461 sa = math.sin(self.azimuth * d2r)
4462 ca = math.cos(self.azimuth * d2r)
4463 sd = math.sin(self.dip * d2r)
4464 cd = math.cos(self.dip * d2r)
4466 points = num.zeros((n, 3))
4467 points[:, 0] = self.north_shift + a * ca * cd
4468 points[:, 1] = self.east_shift + a * sa * cd
4469 points[:, 2] = self.depth + a * sd
4471 return meta.DiscretizedPorePressureSource(
4472 times=util.num_full(n, self.time),
4473 lat=self.lat,
4474 lon=self.lon,
4475 north_shifts=points[:, 0],
4476 east_shifts=points[:, 1],
4477 depths=points[:, 2],
4478 pp=num.ones(n) / n)
4481class Request(Object):
4482 '''
4483 Synthetic seismogram computation request.
4485 ::
4487 Request(**kwargs)
4488 Request(sources, targets, **kwargs)
4489 '''
4491 sources = List.T(
4492 Source.T(),
4493 help='list of sources for which to produce synthetics.')
4495 targets = List.T(
4496 Target.T(),
4497 help='list of targets for which to produce synthetics.')
4499 @classmethod
4500 def args2kwargs(cls, args):
4501 if len(args) not in (0, 2, 3):
4502 raise BadRequest('Invalid arguments.')
4504 if len(args) == 2:
4505 return dict(sources=args[0], targets=args[1])
4506 else:
4507 return {}
4509 def __init__(self, *args, **kwargs):
4510 kwargs.update(self.args2kwargs(args))
4511 sources = kwargs.pop('sources', [])
4512 targets = kwargs.pop('targets', [])
4514 if isinstance(sources, Source):
4515 sources = [sources]
4517 if isinstance(targets, Target) or isinstance(targets, StaticTarget):
4518 targets = [targets]
4520 Object.__init__(self, sources=sources, targets=targets, **kwargs)
4522 @property
4523 def targets_dynamic(self):
4524 return [t for t in self.targets if isinstance(t, Target)]
4526 @property
4527 def targets_static(self):
4528 return [t for t in self.targets if isinstance(t, StaticTarget)]
4530 @property
4531 def has_dynamic(self):
4532 return True if len(self.targets_dynamic) > 0 else False
4534 @property
4535 def has_statics(self):
4536 return True if len(self.targets_static) > 0 else False
4538 def subsources_map(self):
4539 m = defaultdict(list)
4540 for source in self.sources:
4541 m[source.base_key()].append(source)
4543 return m
4545 def subtargets_map(self):
4546 m = defaultdict(list)
4547 for target in self.targets:
4548 m[target.base_key()].append(target)
4550 return m
4552 def subrequest_map(self):
4553 ms = self.subsources_map()
4554 mt = self.subtargets_map()
4555 m = {}
4556 for (ks, ls) in ms.items():
4557 for (kt, lt) in mt.items():
4558 m[ks, kt] = (ls, lt)
4560 return m
4563class ProcessingStats(Object):
4564 t_perc_get_store_and_receiver = Float.T(default=0.)
4565 t_perc_discretize_source = Float.T(default=0.)
4566 t_perc_make_base_seismogram = Float.T(default=0.)
4567 t_perc_make_same_span = Float.T(default=0.)
4568 t_perc_post_process = Float.T(default=0.)
4569 t_perc_optimize = Float.T(default=0.)
4570 t_perc_stack = Float.T(default=0.)
4571 t_perc_static_get_store = Float.T(default=0.)
4572 t_perc_static_discretize_basesource = Float.T(default=0.)
4573 t_perc_static_sum_statics = Float.T(default=0.)
4574 t_perc_static_post_process = Float.T(default=0.)
4575 t_wallclock = Float.T(default=0.)
4576 t_cpu = Float.T(default=0.)
4577 n_read_blocks = Int.T(default=0)
4578 n_results = Int.T(default=0)
4579 n_subrequests = Int.T(default=0)
4580 n_stores = Int.T(default=0)
4581 n_records_stacked = Int.T(default=0)
4584class Response(Object):
4585 '''
4586 Resonse object to a synthetic seismogram computation request.
4587 '''
4589 request = Request.T()
4590 results_list = List.T(List.T(meta.SeismosizerResult.T()))
4591 stats = ProcessingStats.T()
4593 def pyrocko_traces(self):
4594 '''
4595 Return a list of requested
4596 :class:`~pyrocko.trace.Trace` instances.
4597 '''
4599 traces = []
4600 for results in self.results_list:
4601 for result in results:
4602 if not isinstance(result, meta.Result):
4603 continue
4604 traces.append(result.trace.pyrocko_trace())
4606 return traces
4608 def kite_scenes(self):
4609 '''
4610 Return a list of requested
4611 :class:`~kite.scenes` instances.
4612 '''
4613 kite_scenes = []
4614 for results in self.results_list:
4615 for result in results:
4616 if isinstance(result, meta.KiteSceneResult):
4617 sc = result.get_scene()
4618 kite_scenes.append(sc)
4620 return kite_scenes
4622 def static_results(self):
4623 '''
4624 Return a list of requested
4625 :class:`~pyrocko.gf.meta.StaticResult` instances.
4626 '''
4627 statics = []
4628 for results in self.results_list:
4629 for result in results:
4630 if not isinstance(result, meta.StaticResult):
4631 continue
4632 statics.append(result)
4634 return statics
4636 def iter_results(self, get='pyrocko_traces'):
4637 '''
4638 Generator function to iterate over results of request.
4640 Yields associated :py:class:`Source`,
4641 :class:`~pyrocko.gf.targets.Target`,
4642 :class:`~pyrocko.trace.Trace` instances in each iteration.
4643 '''
4645 for isource, source in enumerate(self.request.sources):
4646 for itarget, target in enumerate(self.request.targets):
4647 result = self.results_list[isource][itarget]
4648 if get == 'pyrocko_traces':
4649 yield source, target, result.trace.pyrocko_trace()
4650 elif get == 'results':
4651 yield source, target, result
4653 def snuffle(self, **kwargs):
4654 '''
4655 Open *snuffler* with requested traces.
4656 '''
4658 trace.snuffle(self.pyrocko_traces(), **kwargs)
4661class Engine(Object):
4662 '''
4663 Base class for synthetic seismogram calculators.
4664 '''
4666 def get_store_ids(self):
4667 '''
4668 Get list of available GF store IDs
4669 '''
4671 return []
4674class Rule(object):
4675 pass
4678class VectorRule(Rule):
4680 def __init__(self, quantity, differentiate=0, integrate=0):
4681 self.components = [quantity + '.' + c for c in 'ned']
4682 self.differentiate = differentiate
4683 self.integrate = integrate
4685 def required_components(self, target):
4686 n, e, d = self.components
4687 sa, ca, sd, cd = target.get_sin_cos_factors()
4689 comps = []
4690 if nonzero(ca * cd):
4691 comps.append(n)
4693 if nonzero(sa * cd):
4694 comps.append(e)
4696 if nonzero(sd):
4697 comps.append(d)
4699 return tuple(comps)
4701 def apply_(self, target, base_seismogram):
4702 n, e, d = self.components
4703 sa, ca, sd, cd = target.get_sin_cos_factors()
4705 if nonzero(ca * cd):
4706 data = base_seismogram[n].data * (ca * cd)
4707 deltat = base_seismogram[n].deltat
4708 else:
4709 data = 0.0
4711 if nonzero(sa * cd):
4712 data = data + base_seismogram[e].data * (sa * cd)
4713 deltat = base_seismogram[e].deltat
4715 if nonzero(sd):
4716 data = data + base_seismogram[d].data * sd
4717 deltat = base_seismogram[d].deltat
4719 if self.differentiate:
4720 data = util.diff_fd(self.differentiate, 4, deltat, data)
4722 if self.integrate:
4723 raise NotImplementedError('Integration is not implemented yet.')
4725 return data
4728class HorizontalVectorRule(Rule):
4730 def __init__(self, quantity, differentiate=0, integrate=0):
4731 self.components = [quantity + '.' + c for c in 'ne']
4732 self.differentiate = differentiate
4733 self.integrate = integrate
4735 def required_components(self, target):
4736 n, e = self.components
4737 sa, ca, _, _ = target.get_sin_cos_factors()
4739 comps = []
4740 if nonzero(ca):
4741 comps.append(n)
4743 if nonzero(sa):
4744 comps.append(e)
4746 return tuple(comps)
4748 def apply_(self, target, base_seismogram):
4749 n, e = self.components
4750 sa, ca, _, _ = target.get_sin_cos_factors()
4752 if nonzero(ca):
4753 data = base_seismogram[n].data * ca
4754 else:
4755 data = 0.0
4757 if nonzero(sa):
4758 data = data + base_seismogram[e].data * sa
4760 if self.differentiate:
4761 deltat = base_seismogram[e].deltat
4762 data = util.diff_fd(self.differentiate, 4, deltat, data)
4764 if self.integrate:
4765 raise NotImplementedError('Integration is not implemented yet.')
4767 return data
4770class ScalarRule(Rule):
4772 def __init__(self, quantity, differentiate=0):
4773 self.c = quantity
4775 def required_components(self, target):
4776 return (self.c, )
4778 def apply_(self, target, base_seismogram):
4779 data = base_seismogram[self.c].data.copy()
4780 deltat = base_seismogram[self.c].deltat
4781 if self.differentiate:
4782 data = util.diff_fd(self.differentiate, 4, deltat, data)
4784 return data
4787class StaticDisplacement(Rule):
4789 def required_components(self, target):
4790 return tuple(['displacement.%s' % c for c in list('ned')])
4792 def apply_(self, target, base_statics):
4793 if isinstance(target, SatelliteTarget):
4794 los_fac = target.get_los_factors()
4795 base_statics['displacement.los'] =\
4796 (los_fac[:, 0] * -base_statics['displacement.d'] +
4797 los_fac[:, 1] * base_statics['displacement.e'] +
4798 los_fac[:, 2] * base_statics['displacement.n'])
4799 return base_statics
4802channel_rules = {
4803 'displacement': [VectorRule('displacement')],
4804 'rotation': [VectorRule('rotation')],
4805 'velocity': [
4806 VectorRule('velocity'),
4807 VectorRule('displacement', differentiate=1)],
4808 'acceleration': [
4809 VectorRule('acceleration'),
4810 VectorRule('velocity', differentiate=1),
4811 VectorRule('displacement', differentiate=2)],
4812 'pore_pressure': [ScalarRule('pore_pressure')],
4813 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
4814 'darcy_velocity': [VectorRule('darcy_velocity')],
4815}
4817static_rules = {
4818 'displacement': [StaticDisplacement()]
4819}
4822class OutOfBoundsContext(Object):
4823 source = Source.T()
4824 target = Target.T()
4825 distance = Float.T()
4826 components = List.T(String.T())
4829def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
4830 dsource_cache = {}
4831 tcounters = list(range(6))
4833 store_ids = set()
4834 sources = set()
4835 targets = set()
4837 for itarget, target in enumerate(ptargets):
4838 target._id = itarget
4840 for w in work:
4841 _, _, isources, itargets = w
4843 sources.update([psources[isource] for isource in isources])
4844 targets.update([ptargets[itarget] for itarget in itargets])
4846 store_ids = set([t.store_id for t in targets])
4848 for isource, source in enumerate(psources):
4850 components = set()
4851 for itarget, target in enumerate(targets):
4852 rule = engine.get_rule(source, target)
4853 components.update(rule.required_components(target))
4855 for store_id in store_ids:
4856 store_targets = [t for t in targets if t.store_id == store_id]
4858 sample_rates = set([t.sample_rate for t in store_targets])
4859 interpolations = set([t.interpolation for t in store_targets])
4861 base_seismograms = []
4862 store_targets_out = []
4864 for samp_rate in sample_rates:
4865 for interp in interpolations:
4866 engine_targets = [
4867 t for t in store_targets if t.sample_rate == samp_rate
4868 and t.interpolation == interp]
4870 if not engine_targets:
4871 continue
4873 store_targets_out += engine_targets
4875 base_seismograms += engine.base_seismograms(
4876 source,
4877 engine_targets,
4878 components,
4879 dsource_cache,
4880 nthreads)
4882 for iseis, seismogram in enumerate(base_seismograms):
4883 for tr in seismogram.values():
4884 if tr.err != store.SeismosizerErrorEnum.SUCCESS:
4885 e = SeismosizerError(
4886 'Seismosizer failed with return code %i\n%s' % (
4887 tr.err, str(
4888 OutOfBoundsContext(
4889 source=source,
4890 target=store_targets[iseis],
4891 distance=source.distance_to(
4892 store_targets[iseis]),
4893 components=components))))
4894 raise e
4896 for seismogram, target in zip(base_seismograms, store_targets_out):
4898 try:
4899 result = engine._post_process_dynamic(
4900 seismogram, source, target)
4901 except SeismosizerError as e:
4902 result = e
4904 yield (isource, target._id, result), tcounters
4907def process_dynamic(work, psources, ptargets, engine, nthreads=0):
4908 dsource_cache = {}
4910 for w in work:
4911 _, _, isources, itargets = w
4913 sources = [psources[isource] for isource in isources]
4914 targets = [ptargets[itarget] for itarget in itargets]
4916 components = set()
4917 for target in targets:
4918 rule = engine.get_rule(sources[0], target)
4919 components.update(rule.required_components(target))
4921 for isource, source in zip(isources, sources):
4922 for itarget, target in zip(itargets, targets):
4924 try:
4925 base_seismogram, tcounters = engine.base_seismogram(
4926 source, target, components, dsource_cache, nthreads)
4927 except meta.OutOfBounds as e:
4928 e.context = OutOfBoundsContext(
4929 source=sources[0],
4930 target=targets[0],
4931 distance=sources[0].distance_to(targets[0]),
4932 components=components)
4933 raise
4935 n_records_stacked = 0
4936 t_optimize = 0.0
4937 t_stack = 0.0
4939 for _, tr in base_seismogram.items():
4940 n_records_stacked += tr.n_records_stacked
4941 t_optimize += tr.t_optimize
4942 t_stack += tr.t_stack
4944 try:
4945 result = engine._post_process_dynamic(
4946 base_seismogram, source, target)
4947 result.n_records_stacked = n_records_stacked
4948 result.n_shared_stacking = len(sources) *\
4949 len(targets)
4950 result.t_optimize = t_optimize
4951 result.t_stack = t_stack
4952 except SeismosizerError as e:
4953 result = e
4955 tcounters.append(xtime())
4956 yield (isource, itarget, result), tcounters
4959def process_static(work, psources, ptargets, engine, nthreads=0):
4960 for w in work:
4961 _, _, isources, itargets = w
4963 sources = [psources[isource] for isource in isources]
4964 targets = [ptargets[itarget] for itarget in itargets]
4966 for isource, source in zip(isources, sources):
4967 for itarget, target in zip(itargets, targets):
4968 components = engine.get_rule(source, target)\
4969 .required_components(target)
4971 try:
4972 base_statics, tcounters = engine.base_statics(
4973 source, target, components, nthreads)
4974 except meta.OutOfBounds as e:
4975 e.context = OutOfBoundsContext(
4976 source=sources[0],
4977 target=targets[0],
4978 distance=float('nan'),
4979 components=components)
4980 raise
4981 result = engine._post_process_statics(
4982 base_statics, source, target)
4983 tcounters.append(xtime())
4985 yield (isource, itarget, result), tcounters
4988class LocalEngine(Engine):
4989 '''
4990 Offline synthetic seismogram calculator.
4992 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and
4993 :py:attr:`store_dirs` with paths set in environment variables
4994 GF_STORE_SUPERDIRS and GF_STORE_DIRS.
4995 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and
4996 :py:attr:`store_dirs` with paths set in the user's config file.
4998 The config file can be found at :file:`~/.pyrocko/config.pf`
5000 .. code-block :: python
5002 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
5003 gf_store_superdirs: ['/home/pyrocko/gf_stores/']
5004 '''
5006 store_superdirs = List.T(
5007 String.T(),
5008 help="directories which are searched for Green's function stores")
5010 store_dirs = List.T(
5011 String.T(),
5012 help="additional individual Green's function store directories")
5014 default_store_id = String.T(
5015 optional=True,
5016 help='default store ID to be used when a request does not provide '
5017 'one')
5019 def __init__(self, **kwargs):
5020 use_env = kwargs.pop('use_env', False)
5021 use_config = kwargs.pop('use_config', False)
5022 Engine.__init__(self, **kwargs)
5023 if use_env:
5024 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
5025 env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
5026 if env_store_superdirs:
5027 self.store_superdirs.extend(env_store_superdirs.split(':'))
5029 if env_store_dirs:
5030 self.store_dirs.extend(env_store_dirs.split(':'))
5032 if use_config:
5033 c = config.config()
5034 self.store_superdirs.extend(c.gf_store_superdirs)
5035 self.store_dirs.extend(c.gf_store_dirs)
5037 self._check_store_dirs_type()
5038 self._id_to_store_dir = {}
5039 self._open_stores = {}
5040 self._effective_default_store_id = None
5042 def _check_store_dirs_type(self):
5043 for sdir in ['store_dirs', 'store_superdirs']:
5044 if not isinstance(self.__getattribute__(sdir), list):
5045 raise TypeError('{} of {} is not of type list'.format(
5046 sdir, self.__class__.__name__))
5048 def _get_store_id(self, store_dir):
5049 store_ = store.Store(store_dir)
5050 store_id = store_.config.id
5051 store_.close()
5052 return store_id
5054 def _looks_like_store_dir(self, store_dir):
5055 return os.path.isdir(store_dir) and \
5056 all(os.path.isfile(pjoin(store_dir, x)) for x in
5057 ('index', 'traces', 'config'))
5059 def iter_store_dirs(self):
5060 store_dirs = set()
5061 for d in self.store_superdirs:
5062 if not os.path.exists(d):
5063 logger.warning('store_superdir not available: %s' % d)
5064 continue
5066 for entry in os.listdir(d):
5067 store_dir = os.path.realpath(pjoin(d, entry))
5068 if self._looks_like_store_dir(store_dir):
5069 store_dirs.add(store_dir)
5071 for store_dir in self.store_dirs:
5072 store_dirs.add(os.path.realpath(store_dir))
5074 return store_dirs
5076 def _scan_stores(self):
5077 for store_dir in self.iter_store_dirs():
5078 store_id = self._get_store_id(store_dir)
5079 if store_id not in self._id_to_store_dir:
5080 self._id_to_store_dir[store_id] = store_dir
5081 else:
5082 if store_dir != self._id_to_store_dir[store_id]:
5083 raise DuplicateStoreId(
5084 'GF store ID %s is used in (at least) two '
5085 'different stores. Locations are: %s and %s' %
5086 (store_id, self._id_to_store_dir[store_id], store_dir))
5088 def get_store_dir(self, store_id):
5089 '''
5090 Lookup directory given a GF store ID.
5091 '''
5093 if store_id not in self._id_to_store_dir:
5094 self._scan_stores()
5096 if store_id not in self._id_to_store_dir:
5097 raise NoSuchStore(store_id, self.iter_store_dirs())
5099 return self._id_to_store_dir[store_id]
5101 def get_store_ids(self):
5102 '''
5103 Get list of available store IDs.
5104 '''
5106 self._scan_stores()
5107 return sorted(self._id_to_store_dir.keys())
5109 def effective_default_store_id(self):
5110 if self._effective_default_store_id is None:
5111 if self.default_store_id is None:
5112 store_ids = self.get_store_ids()
5113 if len(store_ids) == 1:
5114 self._effective_default_store_id = self.get_store_ids()[0]
5115 else:
5116 raise NoDefaultStoreSet()
5117 else:
5118 self._effective_default_store_id = self.default_store_id
5120 return self._effective_default_store_id
5122 def get_store(self, store_id=None):
5123 '''
5124 Get a store from the engine.
5126 :param store_id: identifier of the store (optional)
5127 :returns: :py:class:`~pyrocko.gf.store.Store` object
5129 If no ``store_id`` is provided the store
5130 associated with the :py:gattr:`default_store_id` is returned.
5131 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
5132 undefined.
5133 '''
5135 if store_id is None:
5136 store_id = self.effective_default_store_id()
5138 if store_id not in self._open_stores:
5139 store_dir = self.get_store_dir(store_id)
5140 self._open_stores[store_id] = store.Store(store_dir)
5142 return self._open_stores[store_id]
5144 def get_store_config(self, store_id):
5145 store = self.get_store(store_id)
5146 return store.config
5148 def get_store_extra(self, store_id, key):
5149 store = self.get_store(store_id)
5150 return store.get_extra(key)
5152 def close_cashed_stores(self):
5153 '''
5154 Close and remove ids from cashed stores.
5155 '''
5156 store_ids = []
5157 for store_id, store_ in self._open_stores.items():
5158 store_.close()
5159 store_ids.append(store_id)
5161 for store_id in store_ids:
5162 self._open_stores.pop(store_id)
5164 def get_rule(self, source, target):
5165 cprovided = self.get_store(target.store_id).get_provided_components()
5167 if isinstance(target, StaticTarget):
5168 quantity = target.quantity
5169 available_rules = static_rules
5170 elif isinstance(target, Target):
5171 quantity = target.effective_quantity()
5172 available_rules = channel_rules
5174 try:
5175 for rule in available_rules[quantity]:
5176 cneeded = rule.required_components(target)
5177 if all(c in cprovided for c in cneeded):
5178 return rule
5180 except KeyError:
5181 pass
5183 raise BadRequest(
5184 'No rule to calculate "%s" with GFs from store "%s" '
5185 'for source model "%s".' % (
5186 target.effective_quantity(),
5187 target.store_id,
5188 source.__class__.__name__))
5190 def _cached_discretize_basesource(self, source, store, cache, target):
5191 if (source, store) not in cache:
5192 cache[source, store] = source.discretize_basesource(store, target)
5194 return cache[source, store]
5196 def base_seismograms(self, source, targets, components, dsource_cache,
5197 nthreads=0):
5199 target = targets[0]
5201 interp = set([t.interpolation for t in targets])
5202 if len(interp) > 1:
5203 raise BadRequest('Targets have different interpolation schemes.')
5205 rates = set([t.sample_rate for t in targets])
5206 if len(rates) > 1:
5207 raise BadRequest('Targets have different sample rates.')
5209 store_ = self.get_store(target.store_id)
5210 receivers = [t.receiver(store_) for t in targets]
5212 if target.sample_rate is not None:
5213 deltat = 1. / target.sample_rate
5214 rate = target.sample_rate
5215 else:
5216 deltat = None
5217 rate = store_.config.sample_rate
5219 tmin = num.fromiter(
5220 (t.tmin for t in targets), dtype=float, count=len(targets))
5221 tmax = num.fromiter(
5222 (t.tmax for t in targets), dtype=float, count=len(targets))
5224 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax))
5226 itmin = num.zeros_like(tmin, dtype=num.int64)
5227 itmax = num.zeros_like(tmin, dtype=num.int64)
5228 nsamples = num.full_like(tmin, -1, dtype=num.int64)
5230 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64)
5231 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64)
5232 nsamples = itmax - itmin + 1
5233 nsamples[num.logical_not(mask)] = -1
5235 base_source = self._cached_discretize_basesource(
5236 source, store_, dsource_cache, target)
5238 base_seismograms = store_.calc_seismograms(
5239 base_source, receivers, components,
5240 deltat=deltat,
5241 itmin=itmin, nsamples=nsamples,
5242 interpolation=target.interpolation,
5243 optimization=target.optimization,
5244 nthreads=nthreads)
5246 for i, base_seismogram in enumerate(base_seismograms):
5247 base_seismograms[i] = store.make_same_span(base_seismogram)
5249 return base_seismograms
5251 def base_seismogram(self, source, target, components, dsource_cache,
5252 nthreads):
5254 tcounters = [xtime()]
5256 store_ = self.get_store(target.store_id)
5257 receiver = target.receiver(store_)
5259 if target.tmin and target.tmax is not None:
5260 rate = store_.config.sample_rate
5261 itmin = int(num.floor(target.tmin * rate))
5262 itmax = int(num.ceil(target.tmax * rate))
5263 nsamples = itmax - itmin + 1
5264 else:
5265 itmin = None
5266 nsamples = None
5268 tcounters.append(xtime())
5269 base_source = self._cached_discretize_basesource(
5270 source, store_, dsource_cache, target)
5272 tcounters.append(xtime())
5274 if target.sample_rate is not None:
5275 deltat = 1. / target.sample_rate
5276 else:
5277 deltat = None
5279 base_seismogram = store_.seismogram(
5280 base_source, receiver, components,
5281 deltat=deltat,
5282 itmin=itmin, nsamples=nsamples,
5283 interpolation=target.interpolation,
5284 optimization=target.optimization,
5285 nthreads=nthreads)
5287 tcounters.append(xtime())
5289 base_seismogram = store.make_same_span(base_seismogram)
5291 tcounters.append(xtime())
5293 return base_seismogram, tcounters
5295 def base_statics(self, source, target, components, nthreads):
5296 tcounters = [xtime()]
5297 store_ = self.get_store(target.store_id)
5299 if target.tsnapshot is not None:
5300 rate = store_.config.sample_rate
5301 itsnapshot = int(num.floor(target.tsnapshot * rate))
5302 else:
5303 itsnapshot = None
5304 tcounters.append(xtime())
5306 base_source = source.discretize_basesource(store_, target=target)
5308 tcounters.append(xtime())
5310 base_statics = store_.statics(
5311 base_source,
5312 target,
5313 itsnapshot,
5314 components,
5315 target.interpolation,
5316 nthreads)
5318 tcounters.append(xtime())
5320 return base_statics, tcounters
5322 def _post_process_dynamic(self, base_seismogram, source, target):
5323 base_any = next(iter(base_seismogram.values()))
5324 deltat = base_any.deltat
5325 itmin = base_any.itmin
5327 rule = self.get_rule(source, target)
5328 data = rule.apply_(target, base_seismogram)
5330 factor = source.get_factor() * target.get_factor()
5331 if factor != 1.0:
5332 data = data * factor
5334 stf = source.effective_stf_post()
5336 times, amplitudes = stf.discretize_t(
5337 deltat, 0.0)
5339 # repeat end point to prevent boundary effects
5340 padded_data = num.empty(data.size + amplitudes.size, dtype=float)
5341 padded_data[:data.size] = data
5342 padded_data[data.size:] = data[-1]
5343 data = num.convolve(amplitudes, padded_data)
5345 tmin = itmin * deltat + times[0]
5347 tr = meta.SeismosizerTrace(
5348 codes=target.codes,
5349 data=data[:-amplitudes.size],
5350 deltat=deltat,
5351 tmin=tmin)
5353 return target.post_process(self, source, tr)
5355 def _post_process_statics(self, base_statics, source, starget):
5356 rule = self.get_rule(source, starget)
5357 data = rule.apply_(starget, base_statics)
5359 factor = source.get_factor()
5360 if factor != 1.0:
5361 for v in data.values():
5362 v *= factor
5364 return starget.post_process(self, source, base_statics)
5366 def process(self, *args, **kwargs):
5367 '''
5368 Process a request.
5370 ::
5372 process(**kwargs)
5373 process(request, **kwargs)
5374 process(sources, targets, **kwargs)
5376 The request can be given a a :py:class:`Request` object, or such an
5377 object is created using ``Request(**kwargs)`` for convenience.
5379 :returns: :py:class:`Response` object
5380 '''
5382 if len(args) not in (0, 1, 2):
5383 raise BadRequest('Invalid arguments.')
5385 if len(args) == 1:
5386 kwargs['request'] = args[0]
5388 elif len(args) == 2:
5389 kwargs.update(Request.args2kwargs(args))
5391 request = kwargs.pop('request', None)
5392 status_callback = kwargs.pop('status_callback', None)
5393 calc_timeseries = kwargs.pop('calc_timeseries', True)
5395 nprocs = kwargs.pop('nprocs', None)
5396 nthreads = kwargs.pop('nthreads', 1)
5397 if nprocs is not None:
5398 nthreads = nprocs
5400 if request is None:
5401 request = Request(**kwargs)
5403 if resource:
5404 rs0 = resource.getrusage(resource.RUSAGE_SELF)
5405 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
5406 tt0 = xtime()
5408 # make sure stores are open before fork()
5409 store_ids = set(target.store_id for target in request.targets)
5410 for store_id in store_ids:
5411 self.get_store(store_id)
5413 source_index = dict((x, i) for (i, x) in
5414 enumerate(request.sources))
5415 target_index = dict((x, i) for (i, x) in
5416 enumerate(request.targets))
5418 m = request.subrequest_map()
5420 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
5421 results_list = []
5423 for i in range(len(request.sources)):
5424 results_list.append([None] * len(request.targets))
5426 tcounters_dyn_list = []
5427 tcounters_static_list = []
5428 nsub = len(skeys)
5429 isub = 0
5431 # Processing dynamic targets through
5432 # parimap(process_subrequest_dynamic)
5434 if calc_timeseries:
5435 _process_dynamic = process_dynamic_timeseries
5436 else:
5437 _process_dynamic = process_dynamic
5439 if request.has_dynamic:
5440 work_dynamic = [
5441 (i, nsub,
5442 [source_index[source] for source in m[k][0]],
5443 [target_index[target] for target in m[k][1]
5444 if not isinstance(target, StaticTarget)])
5445 for (i, k) in enumerate(skeys)]
5447 for ii_results, tcounters_dyn in _process_dynamic(
5448 work_dynamic, request.sources, request.targets, self,
5449 nthreads):
5451 tcounters_dyn_list.append(num.diff(tcounters_dyn))
5452 isource, itarget, result = ii_results
5453 results_list[isource][itarget] = result
5455 if status_callback:
5456 status_callback(isub, nsub)
5458 isub += 1
5460 # Processing static targets through process_static
5461 if request.has_statics:
5462 work_static = [
5463 (i, nsub,
5464 [source_index[source] for source in m[k][0]],
5465 [target_index[target] for target in m[k][1]
5466 if isinstance(target, StaticTarget)])
5467 for (i, k) in enumerate(skeys)]
5469 for ii_results, tcounters_static in process_static(
5470 work_static, request.sources, request.targets, self,
5471 nthreads=nthreads):
5473 tcounters_static_list.append(num.diff(tcounters_static))
5474 isource, itarget, result = ii_results
5475 results_list[isource][itarget] = result
5477 if status_callback:
5478 status_callback(isub, nsub)
5480 isub += 1
5482 if status_callback:
5483 status_callback(nsub, nsub)
5485 tt1 = time.time()
5486 if resource:
5487 rs1 = resource.getrusage(resource.RUSAGE_SELF)
5488 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
5490 s = ProcessingStats()
5492 if request.has_dynamic:
5493 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
5494 t_dyn = float(num.sum(tcumu_dyn))
5495 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
5496 (s.t_perc_get_store_and_receiver,
5497 s.t_perc_discretize_source,
5498 s.t_perc_make_base_seismogram,
5499 s.t_perc_make_same_span,
5500 s.t_perc_post_process) = perc_dyn
5501 else:
5502 t_dyn = 0.
5504 if request.has_statics:
5505 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
5506 t_static = num.sum(tcumu_static)
5507 perc_static = map(float, tcumu_static / t_static * 100.)
5508 (s.t_perc_static_get_store,
5509 s.t_perc_static_discretize_basesource,
5510 s.t_perc_static_sum_statics,
5511 s.t_perc_static_post_process) = perc_static
5513 s.t_wallclock = tt1 - tt0
5514 if resource:
5515 s.t_cpu = (
5516 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
5517 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
5518 s.n_read_blocks = (
5519 (rs1.ru_inblock + rc1.ru_inblock) -
5520 (rs0.ru_inblock + rc0.ru_inblock))
5522 n_records_stacked = 0.
5523 for results in results_list:
5524 for result in results:
5525 if not isinstance(result, meta.Result):
5526 continue
5527 shr = float(result.n_shared_stacking)
5528 n_records_stacked += result.n_records_stacked / shr
5529 s.t_perc_optimize += result.t_optimize / shr
5530 s.t_perc_stack += result.t_stack / shr
5531 s.n_records_stacked = int(n_records_stacked)
5532 if t_dyn != 0.:
5533 s.t_perc_optimize /= t_dyn * 100
5534 s.t_perc_stack /= t_dyn * 100
5536 return Response(
5537 request=request,
5538 results_list=results_list,
5539 stats=s)
5542class RemoteEngine(Engine):
5543 '''
5544 Client for remote synthetic seismogram calculator.
5545 '''
5547 site = String.T(default=ws.g_default_site, optional=True)
5548 url = String.T(default=ws.g_url, optional=True)
5550 def process(self, request=None, status_callback=None, **kwargs):
5552 if request is None:
5553 request = Request(**kwargs)
5555 return ws.seismosizer(url=self.url, site=self.site, request=request)
5558g_engine = None
5561def get_engine(store_superdirs=[]):
5562 global g_engine
5563 if g_engine is None:
5564 g_engine = LocalEngine(use_env=True, use_config=True)
5566 for d in store_superdirs:
5567 if d not in g_engine.store_superdirs:
5568 g_engine.store_superdirs.append(d)
5570 return g_engine
5573class SourceGroup(Object):
5575 def __getattr__(self, k):
5576 return num.fromiter((getattr(s, k) for s in self),
5577 dtype=float)
5579 def __iter__(self):
5580 raise NotImplementedError(
5581 'This method should be implemented in subclass.')
5583 def __len__(self):
5584 raise NotImplementedError(
5585 'This method should be implemented in subclass.')
5588class SourceList(SourceGroup):
5589 sources = List.T(Source.T())
5591 def append(self, s):
5592 self.sources.append(s)
5594 def __iter__(self):
5595 return iter(self.sources)
5597 def __len__(self):
5598 return len(self.sources)
5601class SourceGrid(SourceGroup):
5603 base = Source.T()
5604 variables = Dict.T(String.T(), Range.T())
5605 order = List.T(String.T())
5607 def __len__(self):
5608 n = 1
5609 for (k, v) in self.make_coords(self.base):
5610 n *= len(list(v))
5612 return n
5614 def __iter__(self):
5615 for items in permudef(self.make_coords(self.base)):
5616 s = self.base.clone(**{k: v for (k, v) in items})
5617 s.regularize()
5618 yield s
5620 def ordered_params(self):
5621 ks = list(self.variables.keys())
5622 for k in self.order + list(self.base.keys()):
5623 if k in ks:
5624 yield k
5625 ks.remove(k)
5626 if ks:
5627 raise Exception('Invalid parameter "%s" for source type "%s".' %
5628 (ks[0], self.base.__class__.__name__))
5630 def make_coords(self, base):
5631 return [(param, self.variables[param].make(base=base[param]))
5632 for param in self.ordered_params()]
5635source_classes = [
5636 Source,
5637 SourceWithMagnitude,
5638 SourceWithDerivedMagnitude,
5639 ExplosionSource,
5640 RectangularExplosionSource,
5641 DCSource,
5642 CLVDSource,
5643 VLVDSource,
5644 MTSource,
5645 RectangularSource,
5646 PseudoDynamicRupture,
5647 DoubleDCSource,
5648 RingfaultSource,
5649 CombiSource,
5650 SFSource,
5651 PorePressurePointSource,
5652 PorePressureLineSource,
5653]
5655stf_classes = [
5656 STF,
5657 BoxcarSTF,
5658 TriangularSTF,
5659 HalfSinusoidSTF,
5660 ResonatorSTF,
5661]
5663__all__ = '''
5664SeismosizerError
5665BadRequest
5666NoSuchStore
5667DerivedMagnitudeError
5668STFMode
5669'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
5670Request
5671ProcessingStats
5672Response
5673Engine
5674LocalEngine
5675RemoteEngine
5676source_classes
5677get_engine
5678Range
5679SourceGroup
5680SourceList
5681SourceGrid
5682map_anchor
5683'''.split()