1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
10.. _coordinate-system-names:
12Coordinate systems
13..................
15Coordinate system names commonly used in source models.
17================= ============================================
18Name Description
19================= ============================================
20``'xyz'`` northing, easting, depth in [m]
21``'xy'`` northing, easting in [m]
22``'latlon'`` latitude, longitude in [deg]
23``'lonlat'`` longitude, latitude in [deg]
24``'latlondepth'`` latitude, longitude in [deg], depth in [m]
25================= ============================================
26'''
29from __future__ import absolute_import, division, print_function
31from collections import defaultdict
32from functools import cmp_to_key
33import time
34import math
35import os
36import re
37import logging
38try:
39 import resource
40except ImportError:
41 resource = None
42from hashlib import sha1
44import numpy as num
45from scipy.interpolate import RegularGridInterpolator
47from pyrocko.guts import (Object, Float, String, StringChoice, List,
48 Timestamp, Int, SObject, ArgumentError, Dict,
49 ValidationError, Bool)
50from pyrocko.guts_array import Array
52from pyrocko import moment_tensor as pmt
53from pyrocko import trace, util, config, model, eikonal_ext
54from pyrocko.orthodrome import ne_to_latlon
55from pyrocko.model import Location
56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \
57 okada_ext, invert_fault_dislocations_bem
59from . import meta, store, ws
60from .tractions import TractionField, DirectedTractions
61from .targets import Target, StaticTarget, SatelliteTarget
63pjoin = os.path.join
65guts_prefix = 'pf'
67d2r = math.pi / 180.
68r2d = 180. / math.pi
69km = 1e3
71logger = logging.getLogger('pyrocko.gf.seismosizer')
74def cmp_none_aware(a, b):
75 if isinstance(a, tuple) and isinstance(b, tuple):
76 for xa, xb in zip(a, b):
77 rv = cmp_none_aware(xa, xb)
78 if rv != 0:
79 return rv
81 return 0
83 anone = a is None
84 bnone = b is None
86 if anone and bnone:
87 return 0
89 if anone:
90 return -1
92 if bnone:
93 return 1
95 return bool(a > b) - bool(a < b)
98def xtime():
99 return time.time()
102class SeismosizerError(Exception):
103 pass
106class BadRequest(SeismosizerError):
107 pass
110class DuplicateStoreId(Exception):
111 pass
114class NoDefaultStoreSet(Exception):
115 pass
118class ConversionError(Exception):
119 pass
122class NoSuchStore(BadRequest):
124 def __init__(self, store_id=None, dirs=None):
125 BadRequest.__init__(self)
126 self.store_id = store_id
127 self.dirs = dirs
129 def __str__(self):
130 if self.store_id is not None:
131 rstr = 'no GF store with id "%s" found.' % self.store_id
132 else:
133 rstr = 'GF store not found.'
135 if self.dirs is not None:
136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
137 return rstr
140def ufloat(s):
141 units = {
142 'k': 1e3,
143 'M': 1e6,
144 }
146 factor = 1.0
147 if s and s[-1] in units:
148 factor = units[s[-1]]
149 s = s[:-1]
150 if not s:
151 raise ValueError('unit without a number: \'%s\'' % s)
153 return float(s) * factor
156def ufloat_or_none(s):
157 if s:
158 return ufloat(s)
159 else:
160 return None
163def int_or_none(s):
164 if s:
165 return int(s)
166 else:
167 return None
170def nonzero(x, eps=1e-15):
171 return abs(x) > eps
174def permudef(ln, j=0):
175 if j < len(ln):
176 k, v = ln[j]
177 for y in v:
178 ln[j] = k, y
179 for s in permudef(ln, j + 1):
180 yield s
182 ln[j] = k, v
183 return
184 else:
185 yield ln
188def arr(x):
189 return num.atleast_1d(num.asarray(x))
192def discretize_rect_source(deltas, deltat, time, north, east, depth,
193 strike, dip, length, width,
194 anchor, velocity=None, stf=None,
195 nucleation_x=None, nucleation_y=None,
196 decimation_factor=1, pointsonly=False,
197 plane_coords=False,
198 aggressive_oversampling=False):
200 if stf is None:
201 stf = STF()
203 if not velocity and not pointsonly:
204 raise AttributeError('velocity is required in time mode')
206 mindeltagf = float(num.min(deltas))
207 if velocity:
208 mindeltagf = min(mindeltagf, deltat * velocity)
210 ln = length
211 wd = width
213 if aggressive_oversampling:
214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
216 else:
217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
220 n = int(nl * nw)
222 dl = ln / nl
223 dw = wd / nw
225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
228 points = num.zeros((n, 3))
229 points[:, 0] = num.tile(xl, nw)
230 points[:, 1] = num.repeat(xw, nl)
232 if nucleation_x is not None:
233 dist_x = num.abs(nucleation_x - points[:, 0])
234 else:
235 dist_x = num.zeros(n)
237 if nucleation_y is not None:
238 dist_y = num.abs(nucleation_y - points[:, 1])
239 else:
240 dist_y = num.zeros(n)
242 dist = num.sqrt(dist_x**2 + dist_y**2)
243 times = dist / velocity
245 anch_x, anch_y = map_anchor[anchor]
247 points[:, 0] -= anch_x * 0.5 * length
248 points[:, 1] -= anch_y * 0.5 * width
250 if plane_coords:
251 return points, dl, dw, nl, nw
253 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
254 points = num.dot(rotmat.T, points.T).T
256 points[:, 0] += north
257 points[:, 1] += east
258 points[:, 2] += depth
260 if pointsonly:
261 return points, dl, dw, nl, nw
263 xtau, amplitudes = stf.discretize_t(deltat, time)
264 nt = xtau.size
266 points2 = num.repeat(points, nt, axis=0)
267 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel()
268 amplitudes2 = num.tile(amplitudes, n)
270 return points2, times2, amplitudes2, dl, dw, nl, nw
273def check_rect_source_discretisation(points2, nl, nw, store):
274 # We assume a non-rotated fault plane
275 N_CRITICAL = 8
276 points = points2.T.reshape((3, nl, nw))
277 if points.size <= N_CRITICAL:
278 logger.warning('RectangularSource is defined by only %d sub-sources!'
279 % points.size)
280 return True
282 distances = num.sqrt(
283 (points[0, 0, :] - points[0, 1, :])**2 +
284 (points[1, 0, :] - points[1, 1, :])**2 +
285 (points[2, 0, :] - points[2, 1, :])**2)
287 depths = points[2, 0, :]
288 vs_profile = store.config.get_vs(
289 lat=0., lon=0.,
290 points=num.repeat(depths[:, num.newaxis], 3, axis=1),
291 interpolation='multilinear')
293 min_wavelength = vs_profile * (store.config.deltat * 2)
294 if not num.all(min_wavelength > distances / 2):
295 return False
296 return True
299def outline_rect_source(strike, dip, length, width, anchor):
300 ln = length
301 wd = width
302 points = num.array(
303 [[-0.5 * ln, -0.5 * wd, 0.],
304 [0.5 * ln, -0.5 * wd, 0.],
305 [0.5 * ln, 0.5 * wd, 0.],
306 [-0.5 * ln, 0.5 * wd, 0.],
307 [-0.5 * ln, -0.5 * wd, 0.]])
309 anch_x, anch_y = map_anchor[anchor]
310 points[:, 0] -= anch_x * 0.5 * length
311 points[:, 1] -= anch_y * 0.5 * width
313 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
315 return num.dot(rotmat.T, points.T).T
318def from_plane_coords(
319 strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
320 lat=0., lon=0.,
321 north_shift=0, east_shift=0,
322 anchor='top', cs='xy'):
324 ln = length
325 wd = width
326 x_abs = []
327 y_abs = []
328 if not isinstance(x_plane_coords, list):
329 x_plane_coords = [x_plane_coords]
330 y_plane_coords = [y_plane_coords]
332 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
333 points = num.array(
334 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
335 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
336 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
337 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
338 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]])
340 anch_x, anch_y = map_anchor[anchor]
341 points[:, 0] -= anch_x * 0.5 * length
342 points[:, 1] -= anch_y * 0.5 * width
344 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
346 points = num.dot(rotmat.T, points.T).T
347 points[:, 0] += north_shift
348 points[:, 1] += east_shift
349 points[:, 2] += depth
350 if cs in ('latlon', 'lonlat'):
351 latlon = ne_to_latlon(lat, lon,
352 points[:, 0], points[:, 1])
353 latlon = num.array(latlon).T
354 x_abs.append(latlon[1:2, 1])
355 y_abs.append(latlon[2:3, 0])
356 if cs == 'xy':
357 x_abs.append(points[1:2, 1])
358 y_abs.append(points[2:3, 0])
360 if cs == 'lonlat':
361 return y_abs, x_abs
362 else:
363 return x_abs, y_abs
366def points_on_rect_source(
367 strike, dip, length, width, anchor,
368 discretized_basesource=None, points_x=None, points_y=None):
370 ln = length
371 wd = width
373 if isinstance(points_x, list) or isinstance(points_x, float):
374 points_x = num.array([points_x])
375 if isinstance(points_y, list) or isinstance(points_y, float):
376 points_y = num.array([points_y])
378 if discretized_basesource:
379 ds = discretized_basesource
381 nl_patches = ds.nl + 1
382 nw_patches = ds.nw + 1
384 npoints = nl_patches * nw_patches
385 points = num.zeros((npoints, 3))
386 ln_patches = num.array([il for il in range(nl_patches)])
387 wd_patches = num.array([iw for iw in range(nw_patches)])
389 points_ln =\
390 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1
391 points_wd =\
392 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1
394 for il in range(nl_patches):
395 for iw in range(nw_patches):
396 points[il * nw_patches + iw, :] = num.array([
397 points_ln[il] * ln * 0.5,
398 points_wd[iw] * wd * 0.5, 0.0])
400 elif points_x.shape[0] > 0 and points_y.shape[0] > 0:
401 points = num.zeros(shape=((len(points_x), 3)))
402 for i, (x, y) in enumerate(zip(points_x, points_y)):
403 points[i, :] = num.array(
404 [x * 0.5 * ln, y * 0.5 * wd, 0.0])
406 anch_x, anch_y = map_anchor[anchor]
408 points[:, 0] -= anch_x * 0.5 * ln
409 points[:, 1] -= anch_y * 0.5 * wd
411 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)
413 return num.dot(rotmat.T, points.T).T
416class InvalidGridDef(Exception):
417 pass
420class Range(SObject):
421 '''
422 Convenient range specification.
424 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
426 Range('0 .. 10k : 1k')
427 Range(start=0., stop=10e3, step=1e3)
428 Range(0, 10e3, 1e3)
429 Range('0 .. 10k @ 11')
430 Range(start=0., stop=10*km, n=11)
432 Range(0, 10e3, n=11)
433 Range(values=[x*1e3 for x in range(11)])
435 Depending on the use context, it can be possible to omit any part of the
436 specification. E.g. in the context of extracting a subset of an already
437 existing range, the existing range's specification values would be filled
438 in where missing.
440 The values are distributed with equal spacing, unless the ``spacing``
441 argument is modified. The values can be created offset or relative to an
442 external base value with the ``relative`` argument if the use context
443 supports this.
445 The range specification can be expressed with a short string
446 representation::
448 'start .. stop @ num | spacing, relative'
449 'start .. stop : step | spacing, relative'
451 most parts of the expression can be omitted if not needed. Whitespace is
452 allowed for readability but can also be omitted.
453 '''
455 start = Float.T(optional=True)
456 stop = Float.T(optional=True)
457 step = Float.T(optional=True)
458 n = Int.T(optional=True)
459 values = Array.T(optional=True, dtype=float, shape=(None,))
461 spacing = StringChoice.T(
462 choices=['lin', 'log', 'symlog'],
463 default='lin',
464 optional=True)
466 relative = StringChoice.T(
467 choices=['', 'add', 'mult'],
468 default='',
469 optional=True)
471 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
472 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
473 r'(\|(?P<stuff>.+))?$')
475 def __init__(self, *args, **kwargs):
476 d = {}
477 if len(args) == 1:
478 d = self.parse(args[0])
479 elif len(args) in (2, 3):
480 d['start'], d['stop'] = [float(x) for x in args[:2]]
481 if len(args) == 3:
482 d['step'] = float(args[2])
484 for k, v in kwargs.items():
485 if k in d:
486 raise ArgumentError('%s specified more than once' % k)
488 d[k] = v
490 SObject.__init__(self, **d)
492 def __str__(self):
493 def sfloat(x):
494 if x is not None:
495 return '%g' % x
496 else:
497 return ''
499 if self.values:
500 return ','.join('%g' % x for x in self.values)
502 if self.start is None and self.stop is None:
503 s0 = ''
504 else:
505 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
507 s1 = ''
508 if self.step is not None:
509 s1 = [' : %g', ':%g'][s0 == ''] % self.step
510 elif self.n is not None:
511 s1 = [' @ %i', '@%i'][s0 == ''] % self.n
513 if self.spacing == 'lin' and self.relative == '':
514 s2 = ''
515 else:
516 x = []
517 if self.spacing != 'lin':
518 x.append(self.spacing)
520 if self.relative != '':
521 x.append(self.relative)
523 s2 = ' | %s' % ','.join(x)
525 return s0 + s1 + s2
527 @classmethod
528 def parse(cls, s):
529 s = re.sub(r'\s+', '', s)
530 m = cls.pattern.match(s)
531 if not m:
532 try:
533 vals = [ufloat(x) for x in s.split(',')]
534 except Exception:
535 raise InvalidGridDef(
536 '"%s" is not a valid range specification' % s)
538 return dict(values=num.array(vals, dtype=float))
540 d = m.groupdict()
541 try:
542 start = ufloat_or_none(d['start'])
543 stop = ufloat_or_none(d['stop'])
544 step = ufloat_or_none(d['step'])
545 n = int_or_none(d['n'])
546 except Exception:
547 raise InvalidGridDef(
548 '"%s" is not a valid range specification' % s)
550 spacing = 'lin'
551 relative = ''
553 if d['stuff'] is not None:
554 t = d['stuff'].split(',')
555 for x in t:
556 if x in cls.spacing.choices:
557 spacing = x
558 elif x and x in cls.relative.choices:
559 relative = x
560 else:
561 raise InvalidGridDef(
562 '"%s" is not a valid range specification' % s)
564 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
565 relative=relative)
567 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
568 if self.values:
569 return self.values
571 start = self.start
572 stop = self.stop
573 step = self.step
574 n = self.n
576 swap = step is not None and step < 0.
577 if start is None:
578 start = [mi, ma][swap]
579 if stop is None:
580 stop = [ma, mi][swap]
581 if step is None and inc is not None:
582 step = [inc, -inc][ma < mi]
584 if start is None or stop is None:
585 raise InvalidGridDef(
586 'Cannot use range specification "%s" without start '
587 'and stop in this context' % self)
589 if step is None and n is None:
590 step = stop - start
592 if n is None:
593 if (step < 0) != (stop - start < 0):
594 raise InvalidGridDef(
595 'Range specification "%s" has inconsistent ordering '
596 '(step < 0 => stop > start)' % self)
598 n = int(round((stop - start) / step)) + 1
599 stop2 = start + (n - 1) * step
600 if abs(stop - stop2) > eps:
601 n = int(math.floor((stop - start) / step)) + 1
602 stop = start + (n - 1) * step
603 else:
604 stop = stop2
606 if start == stop:
607 n = 1
609 if self.spacing == 'lin':
610 vals = num.linspace(start, stop, n)
612 elif self.spacing in ('log', 'symlog'):
613 if start > 0. and stop > 0.:
614 vals = num.exp(num.linspace(num.log(start),
615 num.log(stop), n))
616 elif start < 0. and stop < 0.:
617 vals = -num.exp(num.linspace(num.log(-start),
618 num.log(-stop), n))
619 else:
620 raise InvalidGridDef(
621 'Log ranges should not include or cross zero '
622 '(in range specification "%s").' % self)
624 if self.spacing == 'symlog':
625 nvals = - vals
626 vals = num.concatenate((nvals[::-1], vals))
628 if self.relative in ('add', 'mult') and base is None:
629 raise InvalidGridDef(
630 'Cannot use relative range specification in this context.')
632 vals = self.make_relative(base, vals)
634 return list(map(float, vals))
636 def make_relative(self, base, vals):
637 if self.relative == 'add':
638 vals += base
640 if self.relative == 'mult':
641 vals *= base
643 return vals
646class GridDefElement(Object):
648 param = meta.StringID.T()
649 rs = Range.T()
651 def __init__(self, shorthand=None, **kwargs):
652 if shorthand is not None:
653 t = shorthand.split('=')
654 if len(t) != 2:
655 raise InvalidGridDef(
656 'Invalid grid specification element: %s' % shorthand)
658 sp, sr = t[0].strip(), t[1].strip()
660 kwargs['param'] = sp
661 kwargs['rs'] = Range(sr)
663 Object.__init__(self, **kwargs)
665 def shorthand(self):
666 return self.param + ' = ' + str(self.rs)
669class GridDef(Object):
671 elements = List.T(GridDefElement.T())
673 def __init__(self, shorthand=None, **kwargs):
674 if shorthand is not None:
675 t = shorthand.splitlines()
676 tt = []
677 for x in t:
678 x = x.strip()
679 if x:
680 tt.extend(x.split(';'))
682 elements = []
683 for se in tt:
684 elements.append(GridDef(se))
686 kwargs['elements'] = elements
688 Object.__init__(self, **kwargs)
690 def shorthand(self):
691 return '; '.join(str(x) for x in self.elements)
694class Cloneable(object):
696 def __iter__(self):
697 return iter(self.T.propnames)
699 def __getitem__(self, k):
700 if k not in self.keys():
701 raise KeyError(k)
703 return getattr(self, k)
705 def __setitem__(self, k, v):
706 if k not in self.keys():
707 raise KeyError(k)
709 return setattr(self, k, v)
711 def clone(self, **kwargs):
712 '''
713 Make a copy of the object.
715 A new object of the same class is created and initialized with the
716 parameters of the object on which this method is called on. If
717 ``kwargs`` are given, these are used to override any of the
718 initialization parameters.
719 '''
721 d = dict(self)
722 for k in d:
723 v = d[k]
724 if isinstance(v, Cloneable):
725 d[k] = v.clone()
727 d.update(kwargs)
728 return self.__class__(**d)
730 @classmethod
731 def keys(cls):
732 '''
733 Get list of the source model's parameter names.
734 '''
736 return cls.T.propnames
739class STF(Object, Cloneable):
741 '''
742 Base class for source time functions.
743 '''
745 def __init__(self, effective_duration=None, **kwargs):
746 if effective_duration is not None:
747 kwargs['duration'] = effective_duration / \
748 self.factor_duration_to_effective()
750 Object.__init__(self, **kwargs)
752 @classmethod
753 def factor_duration_to_effective(cls):
754 return 1.0
756 def centroid_time(self, tref):
757 return tref
759 @property
760 def effective_duration(self):
761 return self.duration * self.factor_duration_to_effective()
763 def discretize_t(self, deltat, tref):
764 tl = math.floor(tref / deltat) * deltat
765 th = math.ceil(tref / deltat) * deltat
766 if tl == th:
767 return num.array([tl], dtype=float), num.ones(1)
768 else:
769 return (
770 num.array([tl, th], dtype=float),
771 num.array([th - tref, tref - tl], dtype=float) / deltat)
773 def base_key(self):
774 return (type(self).__name__,)
777g_unit_pulse = STF()
780def sshift(times, amplitudes, tshift, deltat):
782 t0 = math.floor(tshift / deltat) * deltat
783 t1 = math.ceil(tshift / deltat) * deltat
784 if t0 == t1:
785 return times, amplitudes
787 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float)
789 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
790 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
792 times2 = num.arange(times.size + 1, dtype=float) * \
793 deltat + times[0] + t0
795 return times2, amplitudes2
798class BoxcarSTF(STF):
800 '''
801 Boxcar type source time function.
803 .. figure :: /static/stf-BoxcarSTF.svg
804 :width: 40%
805 :align: center
806 :alt: boxcar source time function
807 '''
809 duration = Float.T(
810 default=0.0,
811 help='duration of the boxcar')
813 anchor = Float.T(
814 default=0.0,
815 help='anchor point with respect to source.time: ('
816 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
817 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
818 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
820 @classmethod
821 def factor_duration_to_effective(cls):
822 return 1.0
824 def centroid_time(self, tref):
825 return tref - 0.5 * self.duration * self.anchor
827 def discretize_t(self, deltat, tref):
828 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
829 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
830 tmin = round(tmin_stf / deltat) * deltat
831 tmax = round(tmax_stf / deltat) * deltat
832 nt = int(round((tmax - tmin) / deltat)) + 1
833 times = num.linspace(tmin, tmax, nt)
834 amplitudes = num.ones_like(times)
835 if times.size > 1:
836 t_edges = num.linspace(
837 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
838 t = tmin_stf + self.duration * num.array(
839 [0.0, 0.0, 1.0, 1.0], dtype=float)
840 f = num.array([0., 1., 1., 0.], dtype=float)
841 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
842 amplitudes /= num.sum(amplitudes)
844 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
846 return sshift(times, amplitudes, -tshift, deltat)
848 def base_key(self):
849 return (type(self).__name__, self.duration, self.anchor)
852class TriangularSTF(STF):
854 '''
855 Triangular type source time function.
857 .. figure :: /static/stf-TriangularSTF.svg
858 :width: 40%
859 :align: center
860 :alt: triangular source time function
861 '''
863 duration = Float.T(
864 default=0.0,
865 help='baseline of the triangle')
867 peak_ratio = Float.T(
868 default=0.5,
869 help='fraction of time compared to duration, '
870 'when the maximum amplitude is reached')
872 anchor = Float.T(
873 default=0.0,
874 help='anchor point with respect to source.time: ('
875 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
876 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
877 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
879 @classmethod
880 def factor_duration_to_effective(cls, peak_ratio=None):
881 if peak_ratio is None:
882 peak_ratio = cls.peak_ratio.default()
884 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
886 def __init__(self, effective_duration=None, **kwargs):
887 if effective_duration is not None:
888 kwargs['duration'] = effective_duration / \
889 self.factor_duration_to_effective(
890 kwargs.get('peak_ratio', None))
892 STF.__init__(self, **kwargs)
894 @property
895 def centroid_ratio(self):
896 ra = self.peak_ratio
897 rb = 1.0 - ra
898 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
900 def centroid_time(self, tref):
901 ca = self.centroid_ratio
902 cb = 1.0 - ca
903 if self.anchor <= 0.:
904 return tref - ca * self.duration * self.anchor
905 else:
906 return tref - cb * self.duration * self.anchor
908 @property
909 def effective_duration(self):
910 return self.duration * self.factor_duration_to_effective(
911 self.peak_ratio)
913 def tminmax_stf(self, tref):
914 ca = self.centroid_ratio
915 cb = 1.0 - ca
916 if self.anchor <= 0.:
917 tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
918 tmax_stf = tmin_stf + self.duration
919 else:
920 tmax_stf = tref + cb * self.duration * (1. - self.anchor)
921 tmin_stf = tmax_stf - self.duration
923 return tmin_stf, tmax_stf
925 def discretize_t(self, deltat, tref):
926 tmin_stf, tmax_stf = self.tminmax_stf(tref)
928 tmin = round(tmin_stf / deltat) * deltat
929 tmax = round(tmax_stf / deltat) * deltat
930 nt = int(round((tmax - tmin) / deltat)) + 1
931 if nt > 1:
932 t_edges = num.linspace(
933 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
934 t = tmin_stf + self.duration * num.array(
935 [0.0, self.peak_ratio, 1.0], dtype=float)
936 f = num.array([0., 1., 0.], dtype=float)
937 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
938 amplitudes /= num.sum(amplitudes)
939 else:
940 amplitudes = num.ones(1)
942 times = num.linspace(tmin, tmax, nt)
943 return times, amplitudes
945 def base_key(self):
946 return (
947 type(self).__name__, self.duration, self.peak_ratio, self.anchor)
950class HalfSinusoidSTF(STF):
952 '''
953 Half sinusoid type source time function.
955 .. figure :: /static/stf-HalfSinusoidSTF.svg
956 :width: 40%
957 :align: center
958 :alt: half-sinusouid source time function
959 '''
961 duration = Float.T(
962 default=0.0,
963 help='duration of the half-sinusoid (baseline)')
965 anchor = Float.T(
966 default=0.0,
967 help='anchor point with respect to source.time: ('
968 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
969 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
970 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
972 exponent = Int.T(
973 default=1,
974 help='set to 2 to use square of the half-period sinusoidal function.')
976 def __init__(self, effective_duration=None, **kwargs):
977 if effective_duration is not None:
978 kwargs['duration'] = effective_duration / \
979 self.factor_duration_to_effective(
980 kwargs.get('exponent', 1))
982 STF.__init__(self, **kwargs)
984 @classmethod
985 def factor_duration_to_effective(cls, exponent):
986 if exponent == 1:
987 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
988 elif exponent == 2:
989 return math.sqrt(math.pi**2 - 6) / math.pi
990 else:
991 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
993 @property
994 def effective_duration(self):
995 return self.duration * self.factor_duration_to_effective(self.exponent)
997 def centroid_time(self, tref):
998 return tref - 0.5 * self.duration * self.anchor
1000 def discretize_t(self, deltat, tref):
1001 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
1002 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1003 tmin = round(tmin_stf / deltat) * deltat
1004 tmax = round(tmax_stf / deltat) * deltat
1005 nt = int(round((tmax - tmin) / deltat)) + 1
1006 if nt > 1:
1007 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
1008 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
1010 if self.exponent == 1:
1011 fint = -num.cos(
1012 (t_edges - tmin_stf) * (math.pi / self.duration))
1014 elif self.exponent == 2:
1015 fint = (t_edges - tmin_stf) / self.duration \
1016 - 1.0 / (2.0 * math.pi) * num.sin(
1017 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
1018 else:
1019 raise ValueError(
1020 'Exponent for HalfSinusoidSTF must be 1 or 2.')
1022 amplitudes = fint[1:] - fint[:-1]
1023 amplitudes /= num.sum(amplitudes)
1024 else:
1025 amplitudes = num.ones(1)
1027 times = num.linspace(tmin, tmax, nt)
1028 return times, amplitudes
1030 def base_key(self):
1031 return (type(self).__name__, self.duration, self.anchor)
1034class SmoothRampSTF(STF):
1035 '''
1036 Smooth-ramp type source time function for near-field displacement.
1037 Based on moment function of double-couple point source proposed by Bruestle
1038 and Mueller (PEPI, 1983).
1040 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
1041 earthquakes from Love-wave modelling for regional distances, PEPI 32,
1042 312-324.
1044 .. figure :: /static/stf-SmoothRampSTF.svg
1045 :width: 40%
1046 :alt: smooth ramp source time function
1047 '''
1048 duration = Float.T(
1049 default=0.0,
1050 help='duration of the ramp (baseline)')
1052 rise_ratio = Float.T(
1053 default=0.5,
1054 help='fraction of time compared to duration, '
1055 'when the maximum amplitude is reached')
1057 anchor = Float.T(
1058 default=0.0,
1059 help='anchor point with respect to source.time: ('
1060 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
1061 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
1062 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
1064 def discretize_t(self, deltat, tref):
1065 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
1066 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1067 tmin = round(tmin_stf / deltat) * deltat
1068 tmax = round(tmax_stf / deltat) * deltat
1069 D = round((tmax - tmin) / deltat) * deltat
1070 nt = int(round(D / deltat)) + 1
1071 times = num.linspace(tmin, tmax, nt)
1072 if nt > 1:
1073 rise_time = self.rise_ratio * self.duration
1074 amplitudes = num.ones_like(times)
1075 tp = tmin + rise_time
1076 ii = num.where(times <= tp)
1077 t_inc = times[ii]
1078 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
1079 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
1080 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
1082 amplitudes /= num.sum(amplitudes)
1083 else:
1084 amplitudes = num.ones(1)
1086 return times, amplitudes
1088 def base_key(self):
1089 return (type(self).__name__,
1090 self.duration, self.rise_ratio, self.anchor)
1093class ResonatorSTF(STF):
1094 '''
1095 Simple resonator like source time function.
1097 .. math ::
1099 f(t) = 0 for t < 0
1100 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
1103 .. figure :: /static/stf-SmoothRampSTF.svg
1104 :width: 40%
1105 :alt: smooth ramp source time function
1107 '''
1109 duration = Float.T(
1110 default=0.0,
1111 help='decay time')
1113 frequency = Float.T(
1114 default=1.0,
1115 help='resonance frequency')
1117 def discretize_t(self, deltat, tref):
1118 tmin_stf = tref
1119 tmax_stf = tref + self.duration * 3
1120 tmin = math.floor(tmin_stf / deltat) * deltat
1121 tmax = math.ceil(tmax_stf / deltat) * deltat
1122 times = util.arange2(tmin, tmax, deltat)
1123 amplitudes = num.exp(-(times - tref) / self.duration) \
1124 * num.sin(2.0 * num.pi * self.frequency * (times - tref))
1126 return times, amplitudes
1128 def base_key(self):
1129 return (type(self).__name__,
1130 self.duration, self.frequency)
1133class STFMode(StringChoice):
1134 choices = ['pre', 'post']
1137class Source(Location, Cloneable):
1138 '''
1139 Base class for all source models.
1140 '''
1142 name = String.T(optional=True, default='')
1144 time = Timestamp.T(
1145 default=Timestamp.D('1970-01-01 00:00:00'),
1146 help='source origin time.')
1148 stf = STF.T(
1149 optional=True,
1150 help='source time function.')
1152 stf_mode = STFMode.T(
1153 default='post',
1154 help='whether to apply source time function in pre or '
1155 'post-processing.')
1157 def __init__(self, **kwargs):
1158 Location.__init__(self, **kwargs)
1160 def update(self, **kwargs):
1161 '''
1162 Change some of the source models parameters.
1164 Example::
1166 >>> from pyrocko import gf
1167 >>> s = gf.DCSource()
1168 >>> s.update(strike=66., dip=33.)
1169 >>> print(s)
1170 --- !pf.DCSource
1171 depth: 0.0
1172 time: 1970-01-01 00:00:00
1173 magnitude: 6.0
1174 strike: 66.0
1175 dip: 33.0
1176 rake: 0.0
1178 '''
1180 for (k, v) in kwargs.items():
1181 self[k] = v
1183 def grid(self, **variables):
1184 '''
1185 Create grid of source model variations.
1187 :returns: :py:class:`SourceGrid` instance.
1189 Example::
1191 >>> from pyrocko import gf
1192 >>> base = DCSource()
1193 >>> R = gf.Range
1194 >>> for s in base.grid(R('
1196 '''
1197 return SourceGrid(base=self, variables=variables)
1199 def base_key(self):
1200 '''
1201 Get key to decide about source discretization / GF stack sharing.
1203 When two source models differ only in amplitude and origin time, the
1204 discretization and the GF stacking can be done only once for a unit
1205 amplitude and a zero origin time and the amplitude and origin times of
1206 the seismograms can be applied during post-processing of the synthetic
1207 seismogram.
1209 For any derived parameterized source model, this method is called to
1210 decide if discretization and stacking of the source should be shared.
1211 When two source models return an equal vector of values discretization
1212 is shared.
1213 '''
1214 return (self.depth, self.lat, self.north_shift,
1215 self.lon, self.east_shift, self.time, type(self).__name__) + \
1216 self.effective_stf_pre().base_key()
1218 def get_factor(self):
1219 '''
1220 Get the scaling factor to be applied during post-processing.
1222 Discretization of the base seismogram is usually done for a unit
1223 amplitude, because a common factor can be efficiently multiplied to
1224 final seismograms. This eliminates to do repeat the stacking when
1225 creating seismograms for a series of source models only differing in
1226 amplitude.
1228 This method should return the scaling factor to apply in the
1229 post-processing (often this is simply the scalar moment of the source).
1230 '''
1232 return 1.0
1234 def effective_stf_pre(self):
1235 '''
1236 Return the STF applied before stacking of the Green's functions.
1238 This STF is used during discretization of the parameterized source
1239 models, i.e. to produce a temporal distribution of point sources.
1241 Handling of the STF before stacking of the GFs is less efficient but
1242 allows to use different source time functions for different parts of
1243 the source.
1244 '''
1246 if self.stf is not None and self.stf_mode == 'pre':
1247 return self.stf
1248 else:
1249 return g_unit_pulse
1251 def effective_stf_post(self):
1252 '''
1253 Return the STF applied after stacking of the Green's fuctions.
1255 This STF is used in the post-processing of the synthetic seismograms.
1257 Handling of the STF after stacking of the GFs is usually more efficient
1258 but is only possible when a common STF is used for all subsources.
1259 '''
1261 if self.stf is not None and self.stf_mode == 'post':
1262 return self.stf
1263 else:
1264 return g_unit_pulse
1266 def _dparams_base(self):
1267 return dict(times=arr(self.time),
1268 lat=self.lat, lon=self.lon,
1269 north_shifts=arr(self.north_shift),
1270 east_shifts=arr(self.east_shift),
1271 depths=arr(self.depth))
1273 def _hash(self):
1274 sha = sha1()
1275 for k in self.base_key():
1276 sha.update(str(k).encode())
1277 return sha.hexdigest()
1279 def _dparams_base_repeated(self, times):
1280 if times is None:
1281 return self._dparams_base()
1283 nt = times.size
1284 north_shifts = num.repeat(self.north_shift, nt)
1285 east_shifts = num.repeat(self.east_shift, nt)
1286 depths = num.repeat(self.depth, nt)
1287 return dict(times=times,
1288 lat=self.lat, lon=self.lon,
1289 north_shifts=north_shifts,
1290 east_shifts=east_shifts,
1291 depths=depths)
1293 def pyrocko_event(self, store=None, target=None, **kwargs):
1294 duration = None
1295 if self.stf:
1296 duration = self.stf.effective_duration
1298 return model.Event(
1299 lat=self.lat,
1300 lon=self.lon,
1301 north_shift=self.north_shift,
1302 east_shift=self.east_shift,
1303 time=self.time,
1304 name=self.name,
1305 depth=self.depth,
1306 duration=duration,
1307 **kwargs)
1309 def outline(self, cs='xyz'):
1310 points = num.atleast_2d(num.zeros([1, 3]))
1312 points[:, 0] += self.north_shift
1313 points[:, 1] += self.east_shift
1314 points[:, 2] += self.depth
1315 if cs == 'xyz':
1316 return points
1317 elif cs == 'xy':
1318 return points[:, :2]
1319 elif cs in ('latlon', 'lonlat'):
1320 latlon = ne_to_latlon(
1321 self.lat, self.lon, points[:, 0], points[:, 1])
1323 latlon = num.array(latlon).T
1324 if cs == 'latlon':
1325 return latlon
1326 else:
1327 return latlon[:, ::-1]
1329 @classmethod
1330 def from_pyrocko_event(cls, ev, **kwargs):
1331 if ev.depth is None:
1332 raise ConversionError(
1333 'Cannot convert event object to source object: '
1334 'no depth information available')
1336 stf = None
1337 if ev.duration is not None:
1338 stf = HalfSinusoidSTF(effective_duration=ev.duration)
1340 d = dict(
1341 name=ev.name,
1342 time=ev.time,
1343 lat=ev.lat,
1344 lon=ev.lon,
1345 north_shift=ev.north_shift,
1346 east_shift=ev.east_shift,
1347 depth=ev.depth,
1348 stf=stf)
1349 d.update(kwargs)
1350 return cls(**d)
1352 def get_magnitude(self):
1353 raise NotImplementedError(
1354 '%s does not implement get_magnitude()'
1355 % self.__class__.__name__)
1358class SourceWithMagnitude(Source):
1359 '''
1360 Base class for sources containing a moment magnitude.
1361 '''
1363 magnitude = Float.T(
1364 default=6.0,
1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1367 def __init__(self, **kwargs):
1368 if 'moment' in kwargs:
1369 mom = kwargs.pop('moment')
1370 if 'magnitude' not in kwargs:
1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1373 Source.__init__(self, **kwargs)
1375 @property
1376 def moment(self):
1377 return float(pmt.magnitude_to_moment(self.magnitude))
1379 @moment.setter
1380 def moment(self, value):
1381 self.magnitude = float(pmt.moment_to_magnitude(value))
1383 def pyrocko_event(self, store=None, target=None, **kwargs):
1384 return Source.pyrocko_event(
1385 self, store, target,
1386 magnitude=self.magnitude,
1387 **kwargs)
1389 @classmethod
1390 def from_pyrocko_event(cls, ev, **kwargs):
1391 d = {}
1392 if ev.magnitude:
1393 d.update(magnitude=ev.magnitude)
1395 d.update(kwargs)
1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
1398 def get_magnitude(self):
1399 return self.magnitude
1402class DerivedMagnitudeError(ValidationError):
1403 pass
1406class SourceWithDerivedMagnitude(Source):
1408 class __T(Source.T):
1410 def validate_extra(self, val):
1411 Source.T.validate_extra(self, val)
1412 val.check_conflicts()
1414 def check_conflicts(self):
1415 '''
1416 Check for parameter conflicts.
1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
1419 on conflicts.
1420 '''
1421 pass
1423 def get_magnitude(self, store=None, target=None):
1424 raise DerivedMagnitudeError('No magnitude set.')
1426 def get_moment(self, store=None, target=None):
1427 return float(pmt.magnitude_to_moment(
1428 self.get_magnitude(store, target)))
1430 def pyrocko_moment_tensor(self, store=None, target=None):
1431 raise NotImplementedError(
1432 '%s does not implement pyrocko_moment_tensor()'
1433 % self.__class__.__name__)
1435 def pyrocko_event(self, store=None, target=None, **kwargs):
1436 try:
1437 mt = self.pyrocko_moment_tensor(store, target)
1438 magnitude = self.get_magnitude()
1439 except (DerivedMagnitudeError, NotImplementedError):
1440 mt = None
1441 magnitude = None
1443 return Source.pyrocko_event(
1444 self, store, target,
1445 moment_tensor=mt,
1446 magnitude=magnitude,
1447 **kwargs)
1450class ExplosionSource(SourceWithDerivedMagnitude):
1451 '''
1452 An isotropic explosion point source.
1453 '''
1455 magnitude = Float.T(
1456 optional=True,
1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1459 volume_change = Float.T(
1460 optional=True,
1461 help='volume change of the explosion/implosion or '
1462 'the contracting/extending magmatic source. [m^3]')
1464 discretized_source_class = meta.DiscretizedExplosionSource
1466 def __init__(self, **kwargs):
1467 if 'moment' in kwargs:
1468 mom = kwargs.pop('moment')
1469 if 'magnitude' not in kwargs:
1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1472 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1474 def base_key(self):
1475 return SourceWithDerivedMagnitude.base_key(self) + \
1476 (self.volume_change,)
1478 def check_conflicts(self):
1479 if self.magnitude is not None and self.volume_change is not None:
1480 raise DerivedMagnitudeError(
1481 'Magnitude and volume_change are both defined.')
1483 def get_magnitude(self, store=None, target=None):
1484 self.check_conflicts()
1486 if self.magnitude is not None:
1487 return self.magnitude
1489 elif self.volume_change is not None:
1490 moment = self.volume_change * \
1491 self.get_moment_to_volume_change_ratio(store, target)
1493 return float(pmt.moment_to_magnitude(abs(moment)))
1494 else:
1495 return float(pmt.moment_to_magnitude(1.0))
1497 def get_volume_change(self, store=None, target=None):
1498 self.check_conflicts()
1500 if self.volume_change is not None:
1501 return self.volume_change
1503 elif self.magnitude is not None:
1504 moment = float(pmt.magnitude_to_moment(self.magnitude))
1505 return moment / self.get_moment_to_volume_change_ratio(
1506 store, target)
1508 else:
1509 return 1.0 / self.get_moment_to_volume_change_ratio(store)
1511 def get_moment_to_volume_change_ratio(self, store, target=None):
1512 if store is None:
1513 raise DerivedMagnitudeError(
1514 'Need earth model to convert between volume change and '
1515 'magnitude.')
1517 points = num.array(
1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1520 interpolation = target.interpolation if target else 'multilinear'
1521 try:
1522 shear_moduli = store.config.get_shear_moduli(
1523 self.lat, self.lon,
1524 points=points,
1525 interpolation=interpolation)[0]
1527 bulk_moduli = store.config.get_bulk_moduli(
1528 self.lat, self.lon,
1529 points=points,
1530 interpolation=interpolation)[0]
1531 except meta.OutOfBounds:
1532 raise DerivedMagnitudeError(
1533 'Could not get shear modulus at source position.')
1535 return float(2. * shear_moduli + bulk_moduli)
1537 def get_factor(self):
1538 return 1.0
1540 def discretize_basesource(self, store, target=None):
1541 times, amplitudes = self.effective_stf_pre().discretize_t(
1542 store.config.deltat, self.time)
1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
1546 if self.volume_change is not None:
1547 if self.volume_change < 0.:
1548 amplitudes *= -1
1550 return meta.DiscretizedExplosionSource(
1551 m0s=amplitudes,
1552 **self._dparams_base_repeated(times))
1554 def pyrocko_moment_tensor(self, store=None, target=None):
1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.)
1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
1559class RectangularExplosionSource(ExplosionSource):
1560 '''
1561 Rectangular or line explosion source.
1562 '''
1564 discretized_source_class = meta.DiscretizedExplosionSource
1566 strike = Float.T(
1567 default=0.0,
1568 help='strike direction in [deg], measured clockwise from north')
1570 dip = Float.T(
1571 default=90.0,
1572 help='dip angle in [deg], measured downward from horizontal')
1574 length = Float.T(
1575 default=0.,
1576 help='length of rectangular source area [m]')
1578 width = Float.T(
1579 default=0.,
1580 help='width of rectangular source area [m]')
1582 anchor = StringChoice.T(
1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1584 'bottom_left', 'bottom_right'],
1585 default='center',
1586 optional=True,
1587 help='Anchor point for positioning the plane, can be: top, center or'
1588 'bottom and also top_left, top_right,bottom_left,'
1589 'bottom_right, center_left and center right')
1591 nucleation_x = Float.T(
1592 optional=True,
1593 help='horizontal position of rupture nucleation in normalized fault '
1594 'plane coordinates (-1 = left edge, +1 = right edge)')
1596 nucleation_y = Float.T(
1597 optional=True,
1598 help='down-dip position of rupture nucleation in normalized fault '
1599 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1601 velocity = Float.T(
1602 default=3500.,
1603 help='speed of explosion front [m/s]')
1605 aggressive_oversampling = Bool.T(
1606 default=False,
1607 help='Aggressive oversampling for basesource discretization. '
1608 'When using \'multilinear\' interpolation oversampling has'
1609 ' practically no effect.')
1611 def base_key(self):
1612 return Source.base_key(self) + (self.strike, self.dip, self.length,
1613 self.width, self.nucleation_x,
1614 self.nucleation_y, self.velocity,
1615 self.anchor)
1617 def discretize_basesource(self, store, target=None):
1619 if self.nucleation_x is not None:
1620 nucx = self.nucleation_x * 0.5 * self.length
1621 else:
1622 nucx = None
1624 if self.nucleation_y is not None:
1625 nucy = self.nucleation_y * 0.5 * self.width
1626 else:
1627 nucy = None
1629 stf = self.effective_stf_pre()
1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
1632 store.config.deltas, store.config.deltat,
1633 self.time, self.north_shift, self.east_shift, self.depth,
1634 self.strike, self.dip, self.length, self.width, self.anchor,
1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
1637 amplitudes /= num.sum(amplitudes)
1638 amplitudes *= self.get_moment(store, target)
1640 return meta.DiscretizedExplosionSource(
1641 lat=self.lat,
1642 lon=self.lon,
1643 times=times,
1644 north_shifts=points[:, 0],
1645 east_shifts=points[:, 1],
1646 depths=points[:, 2],
1647 m0s=amplitudes)
1649 def outline(self, cs='xyz'):
1650 points = outline_rect_source(self.strike, self.dip, self.length,
1651 self.width, self.anchor)
1653 points[:, 0] += self.north_shift
1654 points[:, 1] += self.east_shift
1655 points[:, 2] += self.depth
1656 if cs == 'xyz':
1657 return points
1658 elif cs == 'xy':
1659 return points[:, :2]
1660 elif cs in ('latlon', 'lonlat'):
1661 latlon = ne_to_latlon(
1662 self.lat, self.lon, points[:, 0], points[:, 1])
1664 latlon = num.array(latlon).T
1665 if cs == 'latlon':
1666 return latlon
1667 else:
1668 return latlon[:, ::-1]
1670 def get_nucleation_abs_coord(self, cs='xy'):
1672 if self.nucleation_x is None:
1673 return None, None
1675 coords = from_plane_coords(self.strike, self.dip, self.length,
1676 self.width, self.depth, self.nucleation_x,
1677 self.nucleation_y, lat=self.lat,
1678 lon=self.lon, north_shift=self.north_shift,
1679 east_shift=self.east_shift, cs=cs)
1680 return coords
1683class DCSource(SourceWithMagnitude):
1684 '''
1685 A double-couple point source.
1686 '''
1688 strike = Float.T(
1689 default=0.0,
1690 help='strike direction in [deg], measured clockwise from north')
1692 dip = Float.T(
1693 default=90.0,
1694 help='dip angle in [deg], measured downward from horizontal')
1696 rake = Float.T(
1697 default=0.0,
1698 help='rake angle in [deg], '
1699 'measured counter-clockwise from right-horizontal '
1700 'in on-plane view')
1702 discretized_source_class = meta.DiscretizedMTSource
1704 def base_key(self):
1705 return Source.base_key(self) + (self.strike, self.dip, self.rake)
1707 def get_factor(self):
1708 return float(pmt.magnitude_to_moment(self.magnitude))
1710 def discretize_basesource(self, store, target=None):
1711 mot = pmt.MomentTensor(
1712 strike=self.strike, dip=self.dip, rake=self.rake)
1714 times, amplitudes = self.effective_stf_pre().discretize_t(
1715 store.config.deltat, self.time)
1716 return meta.DiscretizedMTSource(
1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
1718 **self._dparams_base_repeated(times))
1720 def pyrocko_moment_tensor(self, store=None, target=None):
1721 return pmt.MomentTensor(
1722 strike=self.strike,
1723 dip=self.dip,
1724 rake=self.rake,
1725 scalar_moment=self.moment)
1727 def pyrocko_event(self, store=None, target=None, **kwargs):
1728 return SourceWithMagnitude.pyrocko_event(
1729 self, store, target,
1730 moment_tensor=self.pyrocko_moment_tensor(store, target),
1731 **kwargs)
1733 @classmethod
1734 def from_pyrocko_event(cls, ev, **kwargs):
1735 d = {}
1736 mt = ev.moment_tensor
1737 if mt:
1738 (strike, dip, rake), _ = mt.both_strike_dip_rake()
1739 d.update(
1740 strike=float(strike),
1741 dip=float(dip),
1742 rake=float(rake),
1743 magnitude=float(mt.moment_magnitude()))
1745 d.update(kwargs)
1746 return super(DCSource, cls).from_pyrocko_event(ev, **d)
1749class CLVDSource(SourceWithMagnitude):
1750 '''
1751 A pure CLVD point source.
1752 '''
1754 discretized_source_class = meta.DiscretizedMTSource
1756 azimuth = Float.T(
1757 default=0.0,
1758 help='azimuth direction of largest dipole, clockwise from north [deg]')
1760 dip = Float.T(
1761 default=90.,
1762 help='dip direction of largest dipole, downward from horizontal [deg]')
1764 def base_key(self):
1765 return Source.base_key(self) + (self.azimuth, self.dip)
1767 def get_factor(self):
1768 return float(pmt.magnitude_to_moment(self.magnitude))
1770 @property
1771 def m6(self):
1772 a = math.sqrt(4. / 3.) * self.get_factor()
1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1774 rotmat1 = pmt.euler_to_matrix(
1775 d2r * (self.dip - 90.),
1776 d2r * (self.azimuth - 90.),
1777 0.)
1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1))
1779 return pmt.to6(m)
1781 @property
1782 def m6_astuple(self):
1783 return tuple(self.m6.tolist())
1785 def discretize_basesource(self, store, target=None):
1786 factor = self.get_factor()
1787 times, amplitudes = self.effective_stf_pre().discretize_t(
1788 store.config.deltat, self.time)
1789 return meta.DiscretizedMTSource(
1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
1791 **self._dparams_base_repeated(times))
1793 def pyrocko_moment_tensor(self, store=None, target=None):
1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1796 def pyrocko_event(self, store=None, target=None, **kwargs):
1797 mt = self.pyrocko_moment_tensor(store, target)
1798 return Source.pyrocko_event(
1799 self, store, target,
1800 moment_tensor=self.pyrocko_moment_tensor(store, target),
1801 magnitude=float(mt.moment_magnitude()),
1802 **kwargs)
1805class VLVDSource(SourceWithDerivedMagnitude):
1806 '''
1807 Volumetric linear vector dipole source.
1809 This source is a parameterization for a restricted moment tensor point
1810 source, useful to represent dyke or sill like inflation or deflation
1811 sources. The restriction is such that the moment tensor is rotational
1812 symmetric. It can be represented by a superposition of a linear vector
1813 dipole (here we use a CLVD for convenience) and an isotropic component. The
1814 restricted moment tensor has 4 degrees of freedom: 2 independent
1815 eigenvalues and 2 rotation angles orienting the the symmetry axis.
1817 In this parameterization, the isotropic component is controlled by
1818 ``volume_change``. To define the moment tensor, it must be converted to the
1819 scalar moment of the the MT's isotropic component. For the conversion, the
1820 shear modulus at the source's position must be known. This value is
1821 extracted from the earth model defined in the GF store in use.
1823 The CLVD part by controlled by its scalar moment :math:`M_0`:
1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
1825 positiv or negativ CLVD (the sign of the largest eigenvalue).
1826 '''
1828 discretized_source_class = meta.DiscretizedMTSource
1830 azimuth = Float.T(
1831 default=0.0,
1832 help='azimuth direction of symmetry axis, clockwise from north [deg].')
1834 dip = Float.T(
1835 default=90.,
1836 help='dip direction of symmetry axis, downward from horizontal [deg].')
1838 volume_change = Float.T(
1839 default=0.,
1840 help='volume change of the inflation/deflation [m^3].')
1842 clvd_moment = Float.T(
1843 default=0.,
1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
1845 'controls the sign of the CLVD (the sign of its largest '
1846 'eigenvalue).')
1848 def get_moment_to_volume_change_ratio(self, store, target):
1849 if store is None or target is None:
1850 raise DerivedMagnitudeError(
1851 'Need earth model to convert between volume change and '
1852 'magnitude.')
1854 points = num.array(
1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1857 try:
1858 shear_moduli = store.config.get_shear_moduli(
1859 self.lat, self.lon,
1860 points=points,
1861 interpolation=target.interpolation)[0]
1863 bulk_moduli = store.config.get_bulk_moduli(
1864 self.lat, self.lon,
1865 points=points,
1866 interpolation=target.interpolation)[0]
1867 except meta.OutOfBounds:
1868 raise DerivedMagnitudeError(
1869 'Could not get shear modulus at source position.')
1871 return float(2. * shear_moduli + bulk_moduli)
1873 def base_key(self):
1874 return Source.base_key(self) + \
1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
1877 def get_magnitude(self, store=None, target=None):
1878 mt = self.pyrocko_moment_tensor(store, target)
1879 return float(pmt.moment_to_magnitude(mt.moment))
1881 def get_m6(self, store, target):
1882 a = math.sqrt(4. / 3.) * self.clvd_moment
1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1885 rotmat1 = pmt.euler_to_matrix(
1886 d2r * (self.dip - 90.),
1887 d2r * (self.azimuth - 90.),
1888 0.)
1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1))
1891 m_iso = self.volume_change * \
1892 self.get_moment_to_volume_change_ratio(store, target)
1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0.,
1895 0., 0.,) * math.sqrt(2. / 3)
1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso)
1898 return m
1900 def get_moment(self, store=None, target=None):
1901 return float(pmt.magnitude_to_moment(
1902 self.get_magnitude(store, target)))
1904 def get_m6_astuple(self, store, target):
1905 m6 = self.get_m6(store, target)
1906 return tuple(m6.tolist())
1908 def discretize_basesource(self, store, target=None):
1909 times, amplitudes = self.effective_stf_pre().discretize_t(
1910 store.config.deltat, self.time)
1912 m6 = self.get_m6(store, target)
1913 m6 *= amplitudes / self.get_factor()
1915 return meta.DiscretizedMTSource(
1916 m6s=m6[num.newaxis, :],
1917 **self._dparams_base_repeated(times))
1919 def pyrocko_moment_tensor(self, store=None, target=None):
1920 m6_astuple = self.get_m6_astuple(store, target)
1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
1924class MTSource(Source):
1925 '''
1926 A moment tensor point source.
1927 '''
1929 discretized_source_class = meta.DiscretizedMTSource
1931 mnn = Float.T(
1932 default=1.,
1933 help='north-north component of moment tensor in [Nm]')
1935 mee = Float.T(
1936 default=1.,
1937 help='east-east component of moment tensor in [Nm]')
1939 mdd = Float.T(
1940 default=1.,
1941 help='down-down component of moment tensor in [Nm]')
1943 mne = Float.T(
1944 default=0.,
1945 help='north-east component of moment tensor in [Nm]')
1947 mnd = Float.T(
1948 default=0.,
1949 help='north-down component of moment tensor in [Nm]')
1951 med = Float.T(
1952 default=0.,
1953 help='east-down component of moment tensor in [Nm]')
1955 def __init__(self, **kwargs):
1956 if 'm6' in kwargs:
1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
1958 kwargs.pop('m6')):
1959 kwargs[k] = float(v)
1961 Source.__init__(self, **kwargs)
1963 @property
1964 def m6(self):
1965 return num.array(self.m6_astuple)
1967 @property
1968 def m6_astuple(self):
1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
1971 @m6.setter
1972 def m6(self, value):
1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
1975 def base_key(self):
1976 return Source.base_key(self) + self.m6_astuple
1978 def discretize_basesource(self, store, target=None):
1979 times, amplitudes = self.effective_stf_pre().discretize_t(
1980 store.config.deltat, self.time)
1981 return meta.DiscretizedMTSource(
1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
1983 **self._dparams_base_repeated(times))
1985 def get_magnitude(self, store=None, target=None):
1986 m6 = self.m6
1987 return pmt.moment_to_magnitude(
1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
1989 math.sqrt(2.))
1991 def pyrocko_moment_tensor(self, store=None, target=None):
1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1994 def pyrocko_event(self, store=None, target=None, **kwargs):
1995 mt = self.pyrocko_moment_tensor(store, target)
1996 return Source.pyrocko_event(
1997 self, store, target,
1998 moment_tensor=self.pyrocko_moment_tensor(store, target),
1999 magnitude=float(mt.moment_magnitude()),
2000 **kwargs)
2002 @classmethod
2003 def from_pyrocko_event(cls, ev, **kwargs):
2004 d = {}
2005 mt = ev.moment_tensor
2006 if mt:
2007 d.update(m6=tuple(map(float, mt.m6())))
2008 else:
2009 if ev.magnitude is not None:
2010 mom = pmt.magnitude_to_moment(ev.magnitude)
2011 v = math.sqrt(2. / 3.) * mom
2012 d.update(m6=(v, v, v, 0., 0., 0.))
2014 d.update(kwargs)
2015 return super(MTSource, cls).from_pyrocko_event(ev, **d)
2018map_anchor = {
2019 'center': (0.0, 0.0),
2020 'center_left': (-1.0, 0.0),
2021 'center_right': (1.0, 0.0),
2022 'top': (0.0, -1.0),
2023 'top_left': (-1.0, -1.0),
2024 'top_right': (1.0, -1.0),
2025 'bottom': (0.0, 1.0),
2026 'bottom_left': (-1.0, 1.0),
2027 'bottom_right': (1.0, 1.0)}
2030class RectangularSource(SourceWithDerivedMagnitude):
2031 '''
2032 Classical Haskell source model modified for bilateral rupture.
2033 '''
2035 discretized_source_class = meta.DiscretizedMTSource
2037 magnitude = Float.T(
2038 optional=True,
2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
2041 strike = Float.T(
2042 default=0.0,
2043 help='strike direction in [deg], measured clockwise from north')
2045 dip = Float.T(
2046 default=90.0,
2047 help='dip angle in [deg], measured downward from horizontal')
2049 rake = Float.T(
2050 default=0.0,
2051 help='rake angle in [deg], '
2052 'measured counter-clockwise from right-horizontal '
2053 'in on-plane view')
2055 length = Float.T(
2056 default=0.,
2057 help='length of rectangular source area [m]')
2059 width = Float.T(
2060 default=0.,
2061 help='width of rectangular source area [m]')
2063 anchor = StringChoice.T(
2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2065 'bottom_left', 'bottom_right'],
2066 default='center',
2067 optional=True,
2068 help='Anchor point for positioning the plane, can be: ``top, center '
2069 'bottom, top_left, top_right,bottom_left,'
2070 'bottom_right, center_left, center right``.')
2072 nucleation_x = Float.T(
2073 optional=True,
2074 help='horizontal position of rupture nucleation in normalized fault '
2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)')
2077 nucleation_y = Float.T(
2078 optional=True,
2079 help='down-dip position of rupture nucleation in normalized fault '
2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)')
2082 velocity = Float.T(
2083 default=3500.,
2084 help='speed of rupture front [m/s]')
2086 slip = Float.T(
2087 optional=True,
2088 help='Slip on the rectangular source area [m]')
2090 opening_fraction = Float.T(
2091 default=0.,
2092 help='Determines fraction of slip related to opening. '
2093 '(``-1``: pure tensile closing, '
2094 '``0``: pure shear, '
2095 '``1``: pure tensile opening)')
2097 decimation_factor = Int.T(
2098 optional=True,
2099 default=1,
2100 help='Sub-source decimation factor, a larger decimation will'
2101 ' make the result inaccurate but shorten the necessary'
2102 ' computation time (use for testing puposes only).')
2104 aggressive_oversampling = Bool.T(
2105 default=False,
2106 help='Aggressive oversampling for basesource discretization. '
2107 'When using \'multilinear\' interpolation oversampling has'
2108 ' practically no effect.')
2110 def __init__(self, **kwargs):
2111 if 'moment' in kwargs:
2112 mom = kwargs.pop('moment')
2113 if 'magnitude' not in kwargs:
2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
2116 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2118 def base_key(self):
2119 return SourceWithDerivedMagnitude.base_key(self) + (
2120 self.magnitude,
2121 self.slip,
2122 self.strike,
2123 self.dip,
2124 self.rake,
2125 self.length,
2126 self.width,
2127 self.nucleation_x,
2128 self.nucleation_y,
2129 self.velocity,
2130 self.decimation_factor,
2131 self.anchor)
2133 def check_conflicts(self):
2134 if self.magnitude is not None and self.slip is not None:
2135 raise DerivedMagnitudeError(
2136 'Magnitude and slip are both defined.')
2138 def get_magnitude(self, store=None, target=None):
2139 self.check_conflicts()
2140 if self.magnitude is not None:
2141 return self.magnitude
2143 elif self.slip is not None:
2144 if None in (store, target):
2145 raise DerivedMagnitudeError(
2146 'Magnitude for a rectangular source with slip defined '
2147 'can only be derived when earth model and target '
2148 'interpolation method are available.')
2150 amplitudes = self._discretize(store, target)[2]
2151 if amplitudes.ndim == 2:
2152 # CLVD component has no net moment, leave out
2153 return float(pmt.moment_to_magnitude(
2154 num.sum(num.abs(amplitudes[0:2, :]).sum())))
2155 else:
2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
2158 else:
2159 return float(pmt.moment_to_magnitude(1.0))
2161 def get_factor(self):
2162 return 1.0
2164 def get_slip_tensile(self):
2165 return self.slip * self.opening_fraction
2167 def get_slip_shear(self):
2168 return self.slip - abs(self.get_slip_tensile)
2170 def _discretize(self, store, target):
2171 if self.nucleation_x is not None:
2172 nucx = self.nucleation_x * 0.5 * self.length
2173 else:
2174 nucx = None
2176 if self.nucleation_y is not None:
2177 nucy = self.nucleation_y * 0.5 * self.width
2178 else:
2179 nucy = None
2181 stf = self.effective_stf_pre()
2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
2184 store.config.deltas, store.config.deltat,
2185 self.time, self.north_shift, self.east_shift, self.depth,
2186 self.strike, self.dip, self.length, self.width, self.anchor,
2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
2188 decimation_factor=self.decimation_factor,
2189 aggressive_oversampling=self.aggressive_oversampling)
2191 if self.slip is not None:
2192 if target is not None:
2193 interpolation = target.interpolation
2194 else:
2195 interpolation = 'nearest_neighbor'
2196 logger.warning(
2197 'no target information available, will use '
2198 '"nearest_neighbor" interpolation when extracting shear '
2199 'modulus from earth model')
2201 shear_moduli = store.config.get_shear_moduli(
2202 self.lat, self.lon,
2203 points=points,
2204 interpolation=interpolation)
2206 tensile_slip = self.get_slip_tensile()
2207 shear_slip = self.slip - abs(tensile_slip)
2209 amplitudes_total = [shear_moduli * shear_slip]
2210 if tensile_slip != 0:
2211 bulk_moduli = store.config.get_bulk_moduli(
2212 self.lat, self.lon,
2213 points=points,
2214 interpolation=interpolation)
2216 tensile_iso = bulk_moduli * tensile_slip
2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
2219 amplitudes_total.extend([tensile_iso, tensile_clvd])
2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
2222 amplitudes * dl * dw
2224 else:
2225 # normalization to retain total moment
2226 amplitudes_norm = amplitudes / num.sum(amplitudes)
2227 moment = self.get_moment(store, target)
2229 amplitudes_total = [
2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
2231 if self.opening_fraction != 0.:
2232 amplitudes_total.append(
2233 amplitudes_norm * self.opening_fraction * moment)
2235 amplitudes_total = num.vstack(amplitudes_total).squeeze()
2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw
2239 def discretize_basesource(self, store, target=None):
2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize(
2242 store, target)
2244 mot = pmt.MomentTensor(
2245 strike=self.strike, dip=self.dip, rake=self.rake)
2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
2248 if amplitudes.ndim == 1:
2249 m6s[:, :] *= amplitudes[:, num.newaxis]
2250 elif amplitudes.ndim == 2:
2251 # shear MT components
2252 rotmat1 = pmt.euler_to_matrix(
2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake)
2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
2256 if amplitudes.shape[0] == 2:
2257 # tensile MT components - moment/magnitude input
2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
2259 rot_tensile = pmt.to6(
2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1)))
2262 m6s_tensile = rot_tensile[
2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2264 m6s += m6s_tensile
2266 elif amplitudes.shape[0] == 3:
2267 # tensile MT components - slip input
2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
2271 rot_iso = pmt.to6(
2272 num.dot(rotmat1.T, num.dot(iso, rotmat1)))
2273 rot_clvd = pmt.to6(
2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1)))
2276 m6s_iso = rot_iso[
2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2278 m6s_clvd = rot_clvd[
2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
2280 m6s += m6s_iso + m6s_clvd
2281 else:
2282 raise ValueError('Unknwown amplitudes shape!')
2283 else:
2284 raise ValueError(
2285 'Unexpected dimension of {}'.format(amplitudes.ndim))
2287 ds = meta.DiscretizedMTSource(
2288 lat=self.lat,
2289 lon=self.lon,
2290 times=times,
2291 north_shifts=points[:, 0],
2292 east_shifts=points[:, 1],
2293 depths=points[:, 2],
2294 m6s=m6s,
2295 dl=dl,
2296 dw=dw,
2297 nl=nl,
2298 nw=nw)
2300 return ds
2302 def outline(self, cs='xyz'):
2303 points = outline_rect_source(self.strike, self.dip, self.length,
2304 self.width, self.anchor)
2306 points[:, 0] += self.north_shift
2307 points[:, 1] += self.east_shift
2308 points[:, 2] += self.depth
2309 if cs == 'xyz':
2310 return points
2311 elif cs == 'xy':
2312 return points[:, :2]
2313 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2314 latlon = ne_to_latlon(
2315 self.lat, self.lon, points[:, 0], points[:, 1])
2317 latlon = num.array(latlon).T
2318 if cs == 'latlon':
2319 return latlon
2320 elif cs == 'lonlat':
2321 return latlon[:, ::-1]
2322 else:
2323 return num.concatenate(
2324 (latlon, points[:, 2].reshape((len(points), 1))),
2325 axis=1)
2327 def points_on_source(self, cs='xyz', **kwargs):
2329 points = points_on_rect_source(
2330 self.strike, self.dip, self.length, self.width,
2331 self.anchor, **kwargs)
2333 points[:, 0] += self.north_shift
2334 points[:, 1] += self.east_shift
2335 points[:, 2] += self.depth
2336 if cs == 'xyz':
2337 return points
2338 elif cs == 'xy':
2339 return points[:, :2]
2340 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2341 latlon = ne_to_latlon(
2342 self.lat, self.lon, points[:, 0], points[:, 1])
2344 latlon = num.array(latlon).T
2345 if cs == 'latlon':
2346 return latlon
2347 elif cs == 'lonlat':
2348 return latlon[:, ::-1]
2349 else:
2350 return num.concatenate(
2351 (latlon, points[:, 2].reshape((len(points), 1))),
2352 axis=1)
2354 def get_nucleation_abs_coord(self, cs='xy'):
2356 if self.nucleation_x is None:
2357 return None, None
2359 coords = from_plane_coords(self.strike, self.dip, self.length,
2360 self.width, self.depth, self.nucleation_x,
2361 self.nucleation_y, lat=self.lat,
2362 lon=self.lon, north_shift=self.north_shift,
2363 east_shift=self.east_shift, cs=cs)
2364 return coords
2366 def pyrocko_moment_tensor(self, store=None, target=None):
2367 return pmt.MomentTensor(
2368 strike=self.strike,
2369 dip=self.dip,
2370 rake=self.rake,
2371 scalar_moment=self.get_moment(store, target))
2373 def pyrocko_event(self, store=None, target=None, **kwargs):
2374 return SourceWithDerivedMagnitude.pyrocko_event(
2375 self, store, target,
2376 **kwargs)
2378 @classmethod
2379 def from_pyrocko_event(cls, ev, **kwargs):
2380 d = {}
2381 mt = ev.moment_tensor
2382 if mt:
2383 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2384 d.update(
2385 strike=float(strike),
2386 dip=float(dip),
2387 rake=float(rake),
2388 magnitude=float(mt.moment_magnitude()))
2390 d.update(kwargs)
2391 return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
2394class PseudoDynamicRupture(SourceWithDerivedMagnitude):
2395 '''
2396 Combined Eikonal and Okada quasi-dynamic rupture model.
2398 Details are described in :doc:`/topics/pseudo-dynamic-rupture`.
2399 Note: attribute `stf` is not used so far, but kept for future applications.
2400 '''
2402 discretized_source_class = meta.DiscretizedMTSource
2404 strike = Float.T(
2405 default=0.0,
2406 help='Strike direction in [deg], measured clockwise from north.')
2408 dip = Float.T(
2409 default=0.0,
2410 help='Dip angle in [deg], measured downward from horizontal.')
2412 length = Float.T(
2413 default=10. * km,
2414 help='Length of rectangular source area in [m].')
2416 width = Float.T(
2417 default=5. * km,
2418 help='Width of rectangular source area in [m].')
2420 anchor = StringChoice.T(
2421 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2422 'bottom_left', 'bottom_right'],
2423 default='center',
2424 optional=True,
2425 help='Anchor point for positioning the plane, can be: ``top, center, '
2426 'bottom, top_left, top_right, bottom_left, '
2427 'bottom_right, center_left, center_right``.')
2429 nucleation_x__ = Array.T(
2430 default=num.array([0.]),
2431 dtype=num.float64,
2432 serialize_as='list',
2433 help='Horizontal position of rupture nucleation in normalized fault '
2434 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).')
2436 nucleation_y__ = Array.T(
2437 default=num.array([0.]),
2438 dtype=num.float64,
2439 serialize_as='list',
2440 help='Down-dip position of rupture nucleation in normalized fault '
2441 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).')
2443 nucleation_time__ = Array.T(
2444 optional=True,
2445 help='Time in [s] after origin, when nucleation points defined by '
2446 '``nucleation_x`` and ``nucleation_y`` rupture.',
2447 dtype=num.float64,
2448 serialize_as='list')
2450 gamma = Float.T(
2451 default=0.8,
2452 help='Scaling factor between rupture velocity and S-wave velocity: '
2453 r':math:`v_r = \gamma * v_s`.')
2455 nx = Int.T(
2456 default=2,
2457 help='Number of discrete source patches in x direction (along '
2458 'strike).')
2460 ny = Int.T(
2461 default=2,
2462 help='Number of discrete source patches in y direction (down dip).')
2464 slip = Float.T(
2465 optional=True,
2466 help='Maximum slip of the rectangular source [m]. '
2467 'Setting the slip the tractions/stress field '
2468 'will be normalized to accomodate the desired maximum slip.')
2470 rake = Float.T(
2471 optional=True,
2472 help='Rake angle in [deg], '
2473 'measured counter-clockwise from right-horizontal '
2474 'in on-plane view. Rake is translated into homogenous tractions '
2475 'in strike and up-dip direction. ``rake`` is mutually exclusive '
2476 'with tractions parameter.')
2478 patches = List.T(
2479 OkadaSource.T(),
2480 optional=True,
2481 help='List of all boundary elements/sub faults/fault patches.')
2483 patch_mask__ = Array.T(
2484 dtype=bool,
2485 serialize_as='list',
2486 shape=(None,),
2487 optional=True,
2488 help='Mask for all boundary elements/sub faults/fault patches. True '
2489 'leaves the patch in the calculation, False excludes the patch.')
2491 tractions = TractionField.T(
2492 optional=True,
2493 help='Traction field the rupture plane is exposed to. See the'
2494 ':py:mod:`pyrocko.gf.tractions` module for more details. '
2495 'If ``tractions=None`` and ``rake`` is given'
2496 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will'
2497 ' be used.')
2499 coef_mat = Array.T(
2500 optional=True,
2501 help='Coefficient matrix linking traction and dislocation field.',
2502 dtype=num.float64,
2503 shape=(None, None))
2505 eikonal_decimation = Int.T(
2506 optional=True,
2507 default=1,
2508 help='Sub-source eikonal factor, a smaller eikonal factor will'
2509 ' increase the accuracy of rupture front calculation but'
2510 ' increases also the computation time.')
2512 decimation_factor = Int.T(
2513 optional=True,
2514 default=1,
2515 help='Sub-source decimation factor, a larger decimation will'
2516 ' make the result inaccurate but shorten the necessary'
2517 ' computation time (use for testing puposes only).')
2519 nthreads = Int.T(
2520 optional=True,
2521 default=1,
2522 help='Number of threads for Okada forward modelling, '
2523 'matrix inversion and calculation of point subsources. '
2524 'Note: for small/medium matrices 1 thread is most efficient.')
2526 pure_shear = Bool.T(
2527 optional=True,
2528 default=False,
2529 help='Calculate only shear tractions and omit tensile tractions.')
2531 smooth_rupture = Bool.T(
2532 default=True,
2533 help='Smooth the tractions by weighting partially ruptured'
2534 ' fault patches.')
2536 aggressive_oversampling = Bool.T(
2537 default=False,
2538 help='Aggressive oversampling for basesource discretization. '
2539 'When using \'multilinear\' interpolation oversampling has'
2540 ' practically no effect.')
2542 def __init__(self, **kwargs):
2543 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2544 self._interpolators = {}
2545 self.check_conflicts()
2547 @property
2548 def nucleation_x(self):
2549 return self.nucleation_x__
2551 @nucleation_x.setter
2552 def nucleation_x(self, nucleation_x):
2553 if isinstance(nucleation_x, list):
2554 nucleation_x = num.array(nucleation_x)
2556 elif not isinstance(
2557 nucleation_x, num.ndarray) and nucleation_x is not None:
2559 nucleation_x = num.array([nucleation_x])
2560 self.nucleation_x__ = nucleation_x
2562 @property
2563 def nucleation_y(self):
2564 return self.nucleation_y__
2566 @nucleation_y.setter
2567 def nucleation_y(self, nucleation_y):
2568 if isinstance(nucleation_y, list):
2569 nucleation_y = num.array(nucleation_y)
2571 elif not isinstance(nucleation_y, num.ndarray) \
2572 and nucleation_y is not None:
2573 nucleation_y = num.array([nucleation_y])
2575 self.nucleation_y__ = nucleation_y
2577 @property
2578 def nucleation(self):
2579 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2581 if (nucl_x is None) or (nucl_y is None):
2582 return None
2584 assert nucl_x.shape[0] == nucl_y.shape[0]
2586 return num.concatenate(
2587 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1)
2589 @nucleation.setter
2590 def nucleation(self, nucleation):
2591 if isinstance(nucleation, list):
2592 nucleation = num.array(nucleation)
2594 assert nucleation.shape[1] == 2
2596 self.nucleation_x = nucleation[:, 0]
2597 self.nucleation_y = nucleation[:, 1]
2599 @property
2600 def nucleation_time(self):
2601 return self.nucleation_time__
2603 @nucleation_time.setter
2604 def nucleation_time(self, nucleation_time):
2605 if not isinstance(nucleation_time, num.ndarray) \
2606 and nucleation_time is not None:
2607 nucleation_time = num.array([nucleation_time])
2609 self.nucleation_time__ = nucleation_time
2611 @property
2612 def patch_mask(self):
2613 if (self.patch_mask__ is not None and
2614 self.patch_mask__.shape == (self.nx * self.ny,)):
2616 return self.patch_mask__
2617 else:
2618 return num.ones(self.nx * self.ny, dtype=bool)
2620 @patch_mask.setter
2621 def patch_mask(self, patch_mask):
2622 if isinstance(patch_mask, list):
2623 patch_mask = num.array(patch_mask)
2625 self.patch_mask__ = patch_mask
2627 def get_tractions(self):
2628 '''
2629 Get source traction vectors.
2631 If :py:attr:`rake` is given, unit length directed traction vectors
2632 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned,
2633 else the given :py:attr:`tractions` are used.
2635 :returns:
2636 Traction vectors per patch.
2637 :rtype:
2638 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``.
2639 '''
2641 if self.rake is not None:
2642 if num.isnan(self.rake):
2643 raise ValueError('Rake must be a real number, not NaN.')
2645 logger.warning(
2646 'Tractions are derived based on the given source rake.')
2647 tractions = DirectedTractions(rake=self.rake)
2648 else:
2649 tractions = self.tractions
2650 return tractions.get_tractions(self.nx, self.ny, self.patches)
2652 def base_key(self):
2653 return SourceWithDerivedMagnitude.base_key(self) + (
2654 self.slip,
2655 self.strike,
2656 self.dip,
2657 self.rake,
2658 self.length,
2659 self.width,
2660 float(self.nucleation_x.mean()),
2661 float(self.nucleation_y.mean()),
2662 self.decimation_factor,
2663 self.anchor,
2664 self.pure_shear,
2665 self.gamma,
2666 tuple(self.patch_mask))
2668 def check_conflicts(self):
2669 if self.tractions and self.rake:
2670 raise AttributeError(
2671 'Tractions and rake are mutually exclusive.')
2672 if self.tractions is None and self.rake is None:
2673 self.rake = 0.
2675 def get_magnitude(self, store=None, target=None):
2676 self.check_conflicts()
2677 if self.slip is not None or self.tractions is not None:
2678 if store is None:
2679 raise DerivedMagnitudeError(
2680 'Magnitude for a rectangular source with slip or '
2681 'tractions defined can only be derived when earth model '
2682 'is set.')
2684 moment_rate, calc_times = self.discretize_basesource(
2685 store, target=target).get_moment_rate(store.config.deltat)
2687 deltat = num.concatenate((
2688 (num.diff(calc_times)[0],),
2689 num.diff(calc_times)))
2691 return float(pmt.moment_to_magnitude(
2692 num.sum(moment_rate * deltat)))
2694 else:
2695 return float(pmt.moment_to_magnitude(1.0))
2697 def get_factor(self):
2698 return 1.0
2700 def outline(self, cs='xyz'):
2701 '''
2702 Get source outline corner coordinates.
2704 :param cs:
2705 :ref:`Output coordinate system <coordinate-system-names>`.
2706 :type cs:
2707 optional, str
2709 :returns:
2710 Corner points in desired coordinate system.
2711 :rtype:
2712 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``.
2713 '''
2714 points = outline_rect_source(self.strike, self.dip, self.length,
2715 self.width, self.anchor)
2717 points[:, 0] += self.north_shift
2718 points[:, 1] += self.east_shift
2719 points[:, 2] += self.depth
2720 if cs == 'xyz':
2721 return points
2722 elif cs == 'xy':
2723 return points[:, :2]
2724 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2725 latlon = ne_to_latlon(
2726 self.lat, self.lon, points[:, 0], points[:, 1])
2728 latlon = num.array(latlon).T
2729 if cs == 'latlon':
2730 return latlon
2731 elif cs == 'lonlat':
2732 return latlon[:, ::-1]
2733 else:
2734 return num.concatenate(
2735 (latlon, points[:, 2].reshape((len(points), 1))),
2736 axis=1)
2738 def points_on_source(self, cs='xyz', **kwargs):
2739 '''
2740 Convert relative plane coordinates to geographical coordinates.
2742 Given x and y coordinates (relative source coordinates between -1.
2743 and 1.) are converted to desired geographical coordinates. Coordinates
2744 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x``
2745 and ``points_y``.
2747 :param cs:
2748 :ref:`Output coordinate system <coordinate-system-names>`.
2749 :type cs:
2750 optional, str
2752 :returns:
2753 Point coordinates in desired coordinate system.
2754 :rtype:
2755 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``.
2756 '''
2757 points = points_on_rect_source(
2758 self.strike, self.dip, self.length, self.width,
2759 self.anchor, **kwargs)
2761 points[:, 0] += self.north_shift
2762 points[:, 1] += self.east_shift
2763 points[:, 2] += self.depth
2764 if cs == 'xyz':
2765 return points
2766 elif cs == 'xy':
2767 return points[:, :2]
2768 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2769 latlon = ne_to_latlon(
2770 self.lat, self.lon, points[:, 0], points[:, 1])
2772 latlon = num.array(latlon).T
2773 if cs == 'latlon':
2774 return latlon
2775 elif cs == 'lonlat':
2776 return latlon[:, ::-1]
2777 else:
2778 return num.concatenate(
2779 (latlon, points[:, 2].reshape((len(points), 1))),
2780 axis=1)
2782 def pyrocko_moment_tensor(self, store=None, target=None):
2783 if store is not None:
2784 if not self.patches:
2785 self.discretize_patches(store)
2787 data = self.get_slip()
2788 else:
2789 data = self.get_tractions()
2791 weights = num.linalg.norm(data, axis=1)
2792 weights /= weights.sum()
2794 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d
2795 rake = num.average(rakes, weights=weights)
2797 return pmt.MomentTensor(
2798 strike=self.strike,
2799 dip=self.dip,
2800 rake=rake,
2801 scalar_moment=self.get_moment(store, target))
2803 def pyrocko_event(self, store=None, target=None, **kwargs):
2804 return SourceWithDerivedMagnitude.pyrocko_event(
2805 self, store, target,
2806 **kwargs)
2808 @classmethod
2809 def from_pyrocko_event(cls, ev, **kwargs):
2810 d = {}
2811 mt = ev.moment_tensor
2812 if mt:
2813 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2814 d.update(
2815 strike=float(strike),
2816 dip=float(dip),
2817 rake=float(rake))
2819 d.update(kwargs)
2820 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d)
2822 def _discretize_points(self, store, *args, **kwargs):
2823 '''
2824 Discretize source plane with equal vertical and horizontal spacing.
2826 Additional ``*args`` and ``**kwargs`` are passed to
2827 :py:meth:`points_on_source`.
2829 :param store:
2830 Green's function database (needs to cover whole region of the
2831 source).
2832 :type store:
2833 :py:class:`~pyrocko.gf.store.Store`
2835 :returns:
2836 Number of points in strike and dip direction, distance
2837 between adjacent points, coordinates (latlondepth) and coordinates
2838 (xy on fault) for discrete points.
2839 :rtype:
2840 (int, int, float, :py:class:`~numpy.ndarray`,
2841 :py:class:`~numpy.ndarray`).
2842 '''
2843 anch_x, anch_y = map_anchor[self.anchor]
2845 npoints = int(self.width // km) + 1
2846 points = num.zeros((npoints, 3))
2847 points[:, 1] = num.linspace(-1., 1., npoints)
2848 points[:, 1] = (points[:, 1] - anch_y) * self.width/2
2850 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)
2851 points = num.dot(rotmat.T, points.T).T
2852 points[:, 2] += self.depth
2854 vs_min = store.config.get_vs(
2855 self.lat, self.lon, points,
2856 interpolation='nearest_neighbor')
2857 vr_min = max(vs_min.min(), .5*km) * self.gamma
2859 oversampling = 10.
2860 delta_l = self.length / (self.nx * oversampling)
2861 delta_w = self.width / (self.ny * oversampling)
2863 delta = self.eikonal_decimation * num.min([
2864 store.config.deltat * vr_min / oversampling,
2865 delta_l, delta_w] + [
2866 deltas for deltas in store.config.deltas])
2868 delta = delta_w / num.ceil(delta_w / delta)
2870 nx = int(num.ceil(self.length / delta)) + 1
2871 ny = int(num.ceil(self.width / delta)) + 1
2873 rem_l = (nx-1)*delta - self.length
2874 lim_x = rem_l / self.length
2876 points_xy = num.zeros((nx * ny, 2))
2877 points_xy[:, 0] = num.repeat(
2878 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny)
2879 points_xy[:, 1] = num.tile(
2880 num.linspace(-1., 1., ny), nx)
2882 points = self.points_on_source(
2883 points_x=points_xy[:, 0],
2884 points_y=points_xy[:, 1],
2885 **kwargs)
2887 return nx, ny, delta, points, points_xy
2889 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor',
2890 points=None):
2891 '''
2892 Get rupture velocity for discrete points on source plane.
2894 :param store:
2895 Green's function database (needs to cover the whole region of the
2896 source)
2897 :type store:
2898 optional, :py:class:`~pyrocko.gf.store.Store`
2900 :param interpolation:
2901 Interpolation method to use (choose between ``'nearest_neighbor'``
2902 and ``'multilinear'``).
2903 :type interpolation:
2904 optional, str
2906 :param points:
2907 Coordinates on fault (-1.:1.) of discrete points.
2908 :type points:
2909 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)``
2911 :returns:
2912 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete
2913 points.
2914 :rtype:
2915 :py:class:`~numpy.ndarray`: ``(n_points, )``.
2916 '''
2918 if points is None:
2919 _, _, _, points, _ = self._discretize_points(store, cs='xyz')
2921 return store.config.get_vs(
2922 self.lat, self.lon,
2923 points=points,
2924 interpolation=interpolation) * self.gamma
2926 def discretize_time(
2927 self, store, interpolation='nearest_neighbor',
2928 vr=None, times=None, *args, **kwargs):
2929 '''
2930 Get rupture start time for discrete points on source plane.
2932 :param store:
2933 Green's function database (needs to cover whole region of the
2934 source)
2935 :type store:
2936 :py:class:`~pyrocko.gf.store.Store`
2938 :param interpolation:
2939 Interpolation method to use (choose between ``'nearest_neighbor'``
2940 and ``'multilinear'``).
2941 :type interpolation:
2942 optional, str
2944 :param vr:
2945 Array, containing rupture user defined rupture velocity values.
2946 :type vr:
2947 optional, :py:class:`~numpy.ndarray`
2949 :param times:
2950 Array, containing zeros, where rupture is starting, real positive
2951 numbers at later secondary nucleation points and -1, where time
2952 will be calculated. If not given, rupture starts at nucleation_x,
2953 nucleation_y. Times are given for discrete points with equal
2954 horizontal and vertical spacing.
2955 :type times:
2956 optional, :py:class:`~numpy.ndarray`
2958 :returns:
2959 Coordinates (latlondepth), coordinates (xy), rupture velocity,
2960 rupture propagation time of discrete points.
2961 :rtype:
2962 :py:class:`~numpy.ndarray`: ``(n_points, 3)``,
2963 :py:class:`~numpy.ndarray`: ``(n_points, 2)``,
2964 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``,
2965 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``.
2966 '''
2967 nx, ny, delta, points, points_xy = self._discretize_points(
2968 store, cs='xyz')
2970 if vr is None or vr.shape != tuple((nx, ny)):
2971 if vr:
2972 logger.warning(
2973 'Given rupture velocities are not in right shape: '
2974 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny))
2975 vr = self._discretize_rupture_v(store, interpolation, points)\
2976 .reshape(nx, ny)
2978 if vr.shape != tuple((nx, ny)):
2979 logger.warning(
2980 'Given rupture velocities are not in right shape. Therefore'
2981 ' standard rupture velocity array is used.')
2983 def initialize_times():
2984 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2986 if nucl_x.shape != nucl_y.shape:
2987 raise ValueError(
2988 'Nucleation coordinates have different shape.')
2990 dist_points = num.array([
2991 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1)
2992 for x, y in zip(nucl_x, nucl_y)])
2993 nucl_indices = num.argmin(dist_points, axis=1)
2995 if self.nucleation_time is None:
2996 nucl_times = num.zeros_like(nucl_indices)
2997 else:
2998 if self.nucleation_time.shape == nucl_x.shape:
2999 nucl_times = self.nucleation_time
3000 else:
3001 raise ValueError(
3002 'Nucleation coordinates and times have different '
3003 'shapes')
3005 t = num.full(nx * ny, -1.)
3006 t[nucl_indices] = nucl_times
3007 return t.reshape(nx, ny)
3009 if times is None:
3010 times = initialize_times()
3011 elif times.shape != tuple((nx, ny)):
3012 times = initialize_times()
3013 logger.warning(
3014 'Given times are not in right shape. Therefore standard time'
3015 ' array is used.')
3017 eikonal_ext.eikonal_solver_fmm_cartesian(
3018 speeds=vr, times=times, delta=delta)
3020 return points, points_xy, vr, times
3022 def get_vr_time_interpolators(
3023 self, store, interpolation='nearest_neighbor', force=False,
3024 **kwargs):
3025 '''
3026 Get interpolators for rupture velocity and rupture time.
3028 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3030 :param store:
3031 Green's function database (needs to cover whole region of the
3032 source).
3033 :type store:
3034 :py:class:`~pyrocko.gf.store.Store`
3036 :param interpolation:
3037 Interpolation method to use (choose between ``'nearest_neighbor'``
3038 and ``'multilinear'``).
3039 :type interpolation:
3040 optional, str
3042 :param force:
3043 Force recalculation of the interpolators (e.g. after change of
3044 nucleation point locations/times). Default is ``False``.
3045 :type force:
3046 optional, bool
3047 '''
3048 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'}
3049 if interpolation not in interp_map:
3050 raise TypeError(
3051 'Interpolation method %s not available' % interpolation)
3053 if not self._interpolators.get(interpolation, False) or force:
3054 _, points_xy, vr, times = self.discretize_time(
3055 store, **kwargs)
3057 if self.length <= 0.:
3058 raise ValueError(
3059 'length must be larger then 0. not %g' % self.length)
3061 if self.width <= 0.:
3062 raise ValueError(
3063 'width must be larger then 0. not %g' % self.width)
3065 nx, ny = times.shape
3066 anch_x, anch_y = map_anchor[self.anchor]
3068 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2.
3069 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2.
3071 self._interpolators[interpolation] = (
3072 nx, ny, times, vr,
3073 RegularGridInterpolator(
3074 num.ascontiguousarray(
3075 (points_xy[::ny, 0], points_xy[:ny, 1])),
3076 times,
3077 method=interp_map[interpolation]),
3078 RegularGridInterpolator(
3079 num.ascontiguousarray(
3080 (points_xy[::ny, 0], points_xy[:ny, 1])),
3081 vr,
3082 method=interp_map[interpolation]))
3083 return self._interpolators[interpolation]
3085 def discretize_patches(
3086 self, store, interpolation='nearest_neighbor', force=False,
3087 grid_shape=(),
3088 **kwargs):
3089 '''
3090 Get rupture start time and OkadaSource elements for points on rupture.
3092 All source elements and their corresponding center points are
3093 calculated and stored in the :py:attr:`patches` attribute.
3095 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`.
3097 :param store:
3098 Green's function database (needs to cover whole region of the
3099 source).
3100 :type store:
3101 :py:class:`~pyrocko.gf.store.Store`
3103 :param interpolation:
3104 Interpolation method to use (choose between ``'nearest_neighbor'``
3105 and ``'multilinear'``).
3106 :type interpolation:
3107 optional, str
3109 :param force:
3110 Force recalculation of the vr and time interpolators ( e.g. after
3111 change of nucleation point locations/times). Default is ``False``.
3112 :type force:
3113 optional, bool
3115 :param grid_shape:
3116 Desired sub fault patch grid size (nlength, nwidth). Either factor
3117 or grid_shape should be set.
3118 :type grid_shape:
3119 optional, tuple of int
3120 '''
3121 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3122 self.get_vr_time_interpolators(
3123 store,
3124 interpolation=interpolation, force=force, **kwargs)
3125 anch_x, anch_y = map_anchor[self.anchor]
3127 al = self.length / 2.
3128 aw = self.width / 2.
3129 al1 = -(al + anch_x * al)
3130 al2 = al - anch_x * al
3131 aw1 = -aw + anch_y * aw
3132 aw2 = aw + anch_y * aw
3133 assert num.abs([al1, al2]).sum() == self.length
3134 assert num.abs([aw1, aw2]).sum() == self.width
3136 def get_lame(*a, **kw):
3137 shear_mod = store.config.get_shear_moduli(*a, **kw)
3138 lamb = store.config.get_vp(*a, **kw)**2 \
3139 * store.config.get_rho(*a, **kw) - 2. * shear_mod
3140 return shear_mod, lamb / (2. * (lamb + shear_mod))
3142 shear_mod, poisson = get_lame(
3143 self.lat, self.lon,
3144 num.array([[self.north_shift, self.east_shift, self.depth]]),
3145 interpolation=interpolation)
3147 okada_src = OkadaSource(
3148 lat=self.lat, lon=self.lon,
3149 strike=self.strike, dip=self.dip,
3150 north_shift=self.north_shift, east_shift=self.east_shift,
3151 depth=self.depth,
3152 al1=al1, al2=al2, aw1=aw1, aw2=aw2,
3153 poisson=poisson.mean(),
3154 shearmod=shear_mod.mean(),
3155 opening=kwargs.get('opening', 0.))
3157 if not (self.nx and self.ny):
3158 if grid_shape:
3159 self.nx, self.ny = grid_shape
3160 else:
3161 self.nx = nx
3162 self.ny = ny
3164 source_disc, source_points = okada_src.discretize(self.nx, self.ny)
3166 shear_mod, poisson = get_lame(
3167 self.lat, self.lon,
3168 num.array([src.source_patch()[:3] for src in source_disc]),
3169 interpolation=interpolation)
3171 if (self.nx, self.ny) != (nx, ny):
3172 times_interp = time_interpolator(
3173 num.ascontiguousarray(source_points[:, :2]))
3174 vr_interp = vr_interpolator(
3175 num.ascontiguousarray(source_points[:, :2]))
3176 else:
3177 times_interp = times.T.ravel()
3178 vr_interp = vr.T.ravel()
3180 for isrc, src in enumerate(source_disc):
3181 src.vr = vr_interp[isrc]
3182 src.time = times_interp[isrc] + self.time
3184 self.patches = source_disc
3186 def discretize_basesource(self, store, target=None):
3187 '''
3188 Prepare source for synthetic waveform calculation.
3190 :param store:
3191 Green's function database (needs to cover whole region of the
3192 source).
3193 :type store:
3194 :py:class:`~pyrocko.gf.store.Store`
3196 :param target:
3197 Target information.
3198 :type target:
3199 optional, :py:class:`~pyrocko.gf.targets.Target`
3201 :returns:
3202 Source discretized by a set of moment tensors and times.
3203 :rtype:
3204 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource`
3205 '''
3206 if not target:
3207 interpolation = 'nearest_neighbor'
3208 else:
3209 interpolation = target.interpolation
3211 if not self.patches:
3212 self.discretize_patches(store, interpolation)
3214 if self.coef_mat is None:
3215 self.calc_coef_mat()
3217 delta_slip, slip_times = self.get_delta_slip(store)
3218 npatches = self.nx * self.ny
3219 ntimes = slip_times.size
3221 anch_x, anch_y = map_anchor[self.anchor]
3223 pln = self.length / self.nx
3224 pwd = self.width / self.ny
3226 patch_coords = num.array([
3227 (p.ix, p.iy)
3228 for p in self.patches]).reshape(self.nx, self.ny, 2)
3230 # boundary condition is zero-slip
3231 # is not valid to avoid unwished interpolation effects
3232 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3))
3233 slip_grid[1:-1, 1:-1, :, :] = \
3234 delta_slip.reshape(self.nx, self.ny, ntimes, 3)
3236 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :]
3237 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :]
3238 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :]
3239 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :]
3241 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :]
3242 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :]
3243 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :]
3244 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :]
3246 def make_grid(patch_parameter):
3247 grid = num.zeros((self.nx + 2, self.ny + 2))
3248 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny)
3250 grid[0, 0] = grid[1, 1]
3251 grid[0, -1] = grid[1, -2]
3252 grid[-1, 0] = grid[-2, 1]
3253 grid[-1, -1] = grid[-2, -2]
3255 grid[1:-1, 0] = grid[1:-1, 1]
3256 grid[1:-1, -1] = grid[1:-1, -2]
3257 grid[0, 1:-1] = grid[1, 1:-1]
3258 grid[-1, 1:-1] = grid[-2, 1:-1]
3260 return grid
3262 lamb = self.get_patch_attribute('lamb')
3263 mu = self.get_patch_attribute('shearmod')
3265 lamb_grid = make_grid(lamb)
3266 mu_grid = make_grid(mu)
3268 coords_x = num.zeros(self.nx + 2)
3269 coords_x[1:-1] = patch_coords[:, 0, 0]
3270 coords_x[0] = coords_x[1] - pln / 2
3271 coords_x[-1] = coords_x[-2] + pln / 2
3273 coords_y = num.zeros(self.ny + 2)
3274 coords_y[1:-1] = patch_coords[0, :, 1]
3275 coords_y[0] = coords_y[1] - pwd / 2
3276 coords_y[-1] = coords_y[-2] + pwd / 2
3278 slip_interp = RegularGridInterpolator(
3279 (coords_x, coords_y, slip_times),
3280 slip_grid, method='nearest')
3282 lamb_interp = RegularGridInterpolator(
3283 (coords_x, coords_y),
3284 lamb_grid, method='nearest')
3286 mu_interp = RegularGridInterpolator(
3287 (coords_x, coords_y),
3288 mu_grid, method='nearest')
3290 # discretize basesources
3291 mindeltagf = min(tuple(
3292 (self.length / self.nx, self.width / self.ny) +
3293 tuple(store.config.deltas)))
3295 nl = int((1. / self.decimation_factor) *
3296 num.ceil(pln / mindeltagf)) + 1
3297 nw = int((1. / self.decimation_factor) *
3298 num.ceil(pwd / mindeltagf)) + 1
3299 nsrc_patch = int(nl * nw)
3300 dl = pln / nl
3301 dw = pwd / nw
3303 patch_area = dl * dw
3305 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl)
3306 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw)
3308 base_coords = num.zeros((nsrc_patch, 3))
3309 base_coords[:, 0] = num.tile(xl, nw)
3310 base_coords[:, 1] = num.repeat(xw, nl)
3311 base_coords = num.tile(base_coords, (npatches, 1))
3313 center_coords = num.zeros((npatches, 3))
3314 center_coords[:, 0] = num.repeat(
3315 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2
3316 center_coords[:, 1] = num.tile(
3317 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2
3319 base_coords -= center_coords.repeat(nsrc_patch, axis=0)
3320 nbaselocs = base_coords.shape[0]
3322 base_interp = base_coords.repeat(ntimes, axis=0)
3324 base_times = num.tile(slip_times, nbaselocs)
3325 base_interp[:, 0] -= anch_x * self.length / 2
3326 base_interp[:, 1] -= anch_y * self.width / 2
3327 base_interp[:, 2] = base_times
3329 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3330 store, interpolation=interpolation)
3332 time_eikonal_max = time_interpolator.values.max()
3334 nbasesrcs = base_interp.shape[0]
3335 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3)
3336 lamb = lamb_interp(base_interp[:, :2]).ravel()
3337 mu = mu_interp(base_interp[:, :2]).ravel()
3339 if False:
3340 try:
3341 import matplotlib.pyplot as plt
3342 coords = base_coords.copy()
3343 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1)
3344 plt.scatter(coords[:, 0], coords[:, 1], c=norm)
3345 plt.show()
3346 except AttributeError:
3347 pass
3349 base_interp[:, 2] = 0.
3350 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
3351 base_interp = num.dot(rotmat.T, base_interp.T).T
3352 base_interp[:, 0] += self.north_shift
3353 base_interp[:, 1] += self.east_shift
3354 base_interp[:, 2] += self.depth
3356 slip_strike = delta_slip[:, :, 0].ravel()
3357 slip_dip = delta_slip[:, :, 1].ravel()
3358 slip_norm = delta_slip[:, :, 2].ravel()
3360 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0)
3361 slip_rake = r2d * num.arctan2(slip_dip, slip_strike)
3363 m6s = okada_ext.patch2m6(
3364 strikes=num.full(nbasesrcs, self.strike, dtype=float),
3365 dips=num.full(nbasesrcs, self.dip, dtype=float),
3366 rakes=slip_rake,
3367 disl_shear=slip_shear,
3368 disl_norm=slip_norm,
3369 lamb=lamb,
3370 mu=mu,
3371 nthreads=self.nthreads)
3373 m6s *= patch_area
3375 dl = -self.patches[0].al1 + self.patches[0].al2
3376 dw = -self.patches[0].aw1 + self.patches[0].aw2
3378 base_times[base_times > time_eikonal_max] = time_eikonal_max
3380 ds = meta.DiscretizedMTSource(
3381 lat=self.lat,
3382 lon=self.lon,
3383 times=base_times + self.time,
3384 north_shifts=base_interp[:, 0],
3385 east_shifts=base_interp[:, 1],
3386 depths=base_interp[:, 2],
3387 m6s=m6s,
3388 dl=dl,
3389 dw=dw,
3390 nl=self.nx,
3391 nw=self.ny)
3393 return ds
3395 def calc_coef_mat(self):
3396 '''
3397 Calculate coefficients connecting tractions and dislocations.
3398 '''
3399 if not self.patches:
3400 raise ValueError(
3401 'Patches are needed. Please calculate them first.')
3403 self.coef_mat = make_okada_coefficient_matrix(
3404 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear)
3406 def get_patch_attribute(self, attr):
3407 '''
3408 Get patch attributes.
3410 :param attr:
3411 Name of selected attribute (see
3412 :py:class`pyrocko.modelling.okada.OkadaSource`).
3413 :type attr:
3414 str
3416 :returns:
3417 Array with attribute value for each fault patch.
3418 :rtype:
3419 :py:class:`~numpy.ndarray`
3421 '''
3422 if not self.patches:
3423 raise ValueError(
3424 'Patches are needed. Please calculate them first.')
3425 return num.array([getattr(p, attr) for p in self.patches])
3427 def get_slip(
3428 self,
3429 time=None,
3430 scale_slip=True,
3431 interpolation='nearest_neighbor',
3432 **kwargs):
3433 '''
3434 Get slip per subfault patch for given time after rupture start.
3436 :param time:
3437 Time after origin [s], for which slip is computed. If not
3438 given, final static slip is returned.
3439 :type time:
3440 optional, float > 0.
3442 :param scale_slip:
3443 If ``True`` and :py:attr:`slip` given, all slip values are scaled
3444 to fit the given maximum slip.
3445 :type scale_slip:
3446 optional, bool
3448 :param interpolation:
3449 Interpolation method to use (choose between ``'nearest_neighbor'``
3450 and ``'multilinear'``).
3451 :type interpolation:
3452 optional, str
3454 :returns:
3455 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`)
3456 for each source patch.
3457 :rtype:
3458 :py:class:`~numpy.ndarray`: ``(n_sources, 3)``
3459 '''
3461 if self.patches is None:
3462 raise ValueError(
3463 'Please discretize the source first (discretize_patches())')
3464 npatches = len(self.patches)
3465 tractions = self.get_tractions()
3466 time_patch_max = self.get_patch_attribute('time').max() - self.time
3468 time_patch = time
3469 if time is None:
3470 time_patch = time_patch_max
3472 if self.coef_mat is None:
3473 self.calc_coef_mat()
3475 if tractions.shape != (npatches, 3):
3476 raise AttributeError(
3477 'The traction vector is of invalid shape.'
3478 ' Required shape is (npatches, 3)')
3480 patch_mask = num.ones(npatches, dtype=bool)
3481 if self.patch_mask is not None:
3482 patch_mask = self.patch_mask
3484 times = self.get_patch_attribute('time') - self.time
3485 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches
3486 relevant_sources = num.nonzero(times <= time_patch)[0]
3487 disloc_est = num.zeros_like(tractions)
3489 if self.smooth_rupture:
3490 patch_activation = num.zeros(npatches)
3492 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3493 self.get_vr_time_interpolators(
3494 store, interpolation=interpolation)
3496 # Getting the native Eikonal grid, bit hackish
3497 points_x = num.round(time_interpolator.grid[0], decimals=2)
3498 points_y = num.round(time_interpolator.grid[1], decimals=2)
3499 times_eikonal = time_interpolator.values
3501 time_max = time
3502 if time is None:
3503 time_max = times_eikonal.max()
3505 for ip, p in enumerate(self.patches):
3506 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2)
3507 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2)
3509 idx_length = num.logical_and(
3510 points_x >= ul[0], points_x <= lr[0])
3511 idx_width = num.logical_and(
3512 points_y >= ul[1], points_y <= lr[1])
3514 times_patch = times_eikonal[num.ix_(idx_length, idx_width)]
3515 if times_patch.size == 0:
3516 raise AttributeError('could not use smooth_rupture')
3518 patch_activation[ip] = \
3519 (times_patch <= time_max).sum() / times_patch.size
3521 if time_patch == 0 and time_patch != time_patch_max:
3522 patch_activation[ip] = 0.
3524 patch_activation[~patch_mask] = 0. # exlcude unmasked patches
3526 relevant_sources = num.nonzero(patch_activation > 0.)[0]
3528 if relevant_sources.size == 0:
3529 return disloc_est
3531 indices_disl = num.repeat(relevant_sources * 3, 3)
3532 indices_disl[1::3] += 1
3533 indices_disl[2::3] += 2
3535 disloc_est[relevant_sources] = invert_fault_dislocations_bem(
3536 stress_field=tractions[relevant_sources, :].ravel(),
3537 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3538 pure_shear=self.pure_shear, nthreads=self.nthreads,
3539 epsilon=None,
3540 **kwargs)
3542 if self.smooth_rupture:
3543 disloc_est *= patch_activation[:, num.newaxis]
3545 if scale_slip and self.slip is not None:
3546 disloc_tmax = num.zeros(npatches)
3548 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3)
3549 indices_disl[1::3] += 1
3550 indices_disl[2::3] += 2
3552 disloc_tmax[patch_mask] = num.linalg.norm(
3553 invert_fault_dislocations_bem(
3554 stress_field=tractions[patch_mask, :].ravel(),
3555 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3556 pure_shear=self.pure_shear, nthreads=self.nthreads,
3557 epsilon=None,
3558 **kwargs), axis=1)
3560 disloc_tmax_max = disloc_tmax.max()
3561 if disloc_tmax_max == 0.:
3562 logger.warning(
3563 'slip scaling not performed. Maximum slip is 0.')
3565 disloc_est *= self.slip / disloc_tmax_max
3567 return disloc_est
3569 def get_delta_slip(
3570 self,
3571 store=None,
3572 deltat=None,
3573 delta=True,
3574 interpolation='nearest_neighbor',
3575 **kwargs):
3576 '''
3577 Get slip change snapshots.
3579 The time interval, within which the slip changes are computed is
3580 determined by the sampling rate of the Green's function database or
3581 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`.
3583 :param store:
3584 Green's function database (needs to cover whole region of of the
3585 source). Its sampling interval is used as time increment for slip
3586 difference calculation. Either ``deltat`` or ``store`` should be
3587 given.
3588 :type store:
3589 optional, :py:class:`~pyrocko.gf.store.Store`
3591 :param deltat:
3592 Time interval for slip difference calculation [s]. Either
3593 ``deltat`` or ``store`` should be given.
3594 :type deltat:
3595 optional, float
3597 :param delta:
3598 If ``True``, slip differences between two time steps are given. If
3599 ``False``, cumulative slip for all time steps.
3600 :type delta:
3601 optional, bool
3603 :param interpolation:
3604 Interpolation method to use (choose between ``'nearest_neighbor'``
3605 and ``'multilinear'``).
3606 :type interpolation:
3607 optional, str
3609 :returns:
3610 Displacement changes(:math:`\\Delta u_{strike},
3611 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and
3612 time; corner times, for which delta slip is computed. The order of
3613 displacement changes array is:
3615 .. math::
3617 &[[\\\\
3618 &[\\Delta u_{strike, patch1, t1},
3619 \\Delta u_{dip, patch1, t1},
3620 \\Delta u_{tensile, patch1, t1}],\\\\
3621 &[\\Delta u_{strike, patch1, t2},
3622 \\Delta u_{dip, patch1, t2},
3623 \\Delta u_{tensile, patch1, t2}]\\\\
3624 &], [\\\\
3625 &[\\Delta u_{strike, patch2, t1}, ...],\\\\
3626 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\
3628 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3629 :py:class:`~numpy.ndarray`: ``(n_times, )``
3630 '''
3631 if store and deltat:
3632 raise AttributeError(
3633 'Argument collision. '
3634 'Please define only the store or the deltat argument.')
3636 if store:
3637 deltat = store.config.deltat
3639 if not deltat:
3640 raise AttributeError('Please give a GF store or set deltat.')
3642 npatches = len(self.patches)
3644 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3645 store, interpolation=interpolation)
3646 tmax = time_interpolator.values.max()
3648 calc_times = num.arange(0., tmax + deltat, deltat)
3649 calc_times[calc_times > tmax] = tmax
3651 disloc_est = num.zeros((npatches, calc_times.size, 3))
3653 for itime, t in enumerate(calc_times):
3654 disloc_est[:, itime, :] = self.get_slip(
3655 time=t, scale_slip=False, **kwargs)
3657 if self.slip:
3658 disloc_tmax = num.linalg.norm(
3659 self.get_slip(scale_slip=False, time=tmax),
3660 axis=1)
3662 disloc_tmax_max = disloc_tmax.max()
3663 if disloc_tmax_max == 0.:
3664 logger.warning(
3665 'Slip scaling not performed. Maximum slip is 0.')
3666 else:
3667 disloc_est *= self.slip / disloc_tmax_max
3669 if not delta:
3670 return disloc_est, calc_times
3672 # if we have only one timestep there is no gradient
3673 if calc_times.size > 1:
3674 disloc_init = disloc_est[:, 0, :]
3675 disloc_est = num.diff(disloc_est, axis=1)
3676 disloc_est = num.concatenate((
3677 disloc_init[:, num.newaxis, :], disloc_est), axis=1)
3679 calc_times = calc_times
3681 return disloc_est, calc_times
3683 def get_slip_rate(self, *args, **kwargs):
3684 '''
3685 Get slip rate inverted from patches.
3687 The time interval, within which the slip rates are computed is
3688 determined by the sampling rate of the Green's function database or
3689 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to
3690 :py:meth:`get_delta_slip`.
3692 :returns:
3693 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`,
3694 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`)
3695 for each source patch and time; corner times, for which slip rate
3696 is computed. The order of sliprate array is:
3698 .. math::
3700 &[[\\\\
3701 &[\\Delta u_{strike, patch1, t1}/\\Delta t,
3702 \\Delta u_{dip, patch1, t1}/\\Delta t,
3703 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\
3704 &[\\Delta u_{strike, patch1, t2}/\\Delta t,
3705 \\Delta u_{dip, patch1, t2}/\\Delta t,
3706 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\
3707 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\
3708 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\
3710 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``,
3711 :py:class:`~numpy.ndarray`: ``(n_times, )``
3712 '''
3713 ddisloc_est, calc_times = self.get_delta_slip(
3714 *args, delta=True, **kwargs)
3716 dt = num.concatenate(
3717 [(num.diff(calc_times)[0], ), num.diff(calc_times)])
3718 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt
3720 return slip_rate, calc_times
3722 def get_moment_rate_patches(self, *args, **kwargs):
3723 '''
3724 Get scalar seismic moment rate for each patch individually.
3726 Additional ``*args`` and ``**kwargs`` are passed to
3727 :py:meth:`get_slip_rate`.
3729 :returns:
3730 Seismic moment rate for each source patch and time; corner times,
3731 for which patch moment rate is computed based on slip rate. The
3732 order of the moment rate array is:
3734 .. math::
3736 &[\\\\
3737 &[(\\Delta M / \\Delta t)_{patch1, t1},
3738 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\
3739 &[(\\Delta M / \\Delta t)_{patch2, t1},
3740 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\
3741 &[...]]\\\\
3743 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``,
3744 :py:class:`~numpy.ndarray`: ``(n_times, )``
3745 '''
3746 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs)
3748 shear_mod = self.get_patch_attribute('shearmod')
3749 p_length = self.get_patch_attribute('length')
3750 p_width = self.get_patch_attribute('width')
3752 dA = p_length * p_width
3754 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis]
3756 return mom_rate, calc_times
3758 def get_moment_rate(self, store, target=None, deltat=None):
3759 '''
3760 Get seismic source moment rate for the total source (STF).
3762 :param store:
3763 Green's function database (needs to cover whole region of of the
3764 source). Its ``deltat`` [s] is used as time increment for slip
3765 difference calculation. Either ``deltat`` or ``store`` should be
3766 given.
3767 :type store:
3768 :py:class:`~pyrocko.gf.store.Store`
3770 :param target:
3771 Target information, needed for interpolation method.
3772 :type target:
3773 optional, :py:class:`~pyrocko.gf.targets.Target`
3775 :param deltat:
3776 Time increment for slip difference calculation [s]. If not given
3777 ``store.deltat`` is used.
3778 :type deltat:
3779 optional, float
3781 :return:
3782 Seismic moment rate [Nm/s] for each time; corner times, for which
3783 moment rate is computed. The order of the moment rate array is:
3785 .. math::
3787 &[\\\\
3788 &(\\Delta M / \\Delta t)_{t1},\\\\
3789 &(\\Delta M / \\Delta t)_{t2},\\\\
3790 &...]\\\\
3792 :rtype:
3793 :py:class:`~numpy.ndarray`: ``(n_times, )``,
3794 :py:class:`~numpy.ndarray`: ``(n_times, )``
3795 '''
3796 if not deltat:
3797 deltat = store.config.deltat
3798 return self.discretize_basesource(
3799 store, target=target).get_moment_rate(deltat)
3801 def get_moment(self, *args, **kwargs):
3802 '''
3803 Get seismic cumulative moment.
3805 Additional ``*args`` and ``**kwargs`` are passed to
3806 :py:meth:`get_magnitude`.
3808 :returns:
3809 Cumulative seismic moment in [Nm].
3810 :rtype:
3811 float
3812 '''
3813 return float(pmt.magnitude_to_moment(self.get_magnitude(
3814 *args, **kwargs)))
3816 def rescale_slip(self, magnitude=None, moment=None, **kwargs):
3817 '''
3818 Rescale source slip based on given target magnitude or seismic moment.
3820 Rescale the maximum source slip to fit the source moment magnitude or
3821 seismic moment to the given target values. Either ``magnitude`` or
3822 ``moment`` need to be given. Additional ``**kwargs`` are passed to
3823 :py:meth:`get_moment`.
3825 :param magnitude:
3826 Target moment magnitude :math:`M_\\mathrm{w}` as in
3827 [Hanks and Kanamori, 1979]
3828 :type magnitude:
3829 optional, float
3831 :param moment:
3832 Target seismic moment :math:`M_0` [Nm].
3833 :type moment:
3834 optional, float
3835 '''
3836 if self.slip is None:
3837 self.slip = 1.
3838 logger.warning('No slip found for rescaling. '
3839 'An initial slip of 1 m is assumed.')
3841 if magnitude is None and moment is None:
3842 raise ValueError(
3843 'Either target magnitude or moment need to be given.')
3845 moment_init = self.get_moment(**kwargs)
3847 if magnitude is not None:
3848 moment = pmt.magnitude_to_moment(magnitude)
3850 self.slip *= moment / moment_init
3852 def get_centroid(self, store, *args, **kwargs):
3853 '''
3854 Centroid of the pseudo dynamic rupture model.
3856 The centroid location and time are derived from the locations and times
3857 of the individual patches weighted with their moment contribution.
3858 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`.
3860 :param store:
3861 Green's function database (needs to cover whole region of of the
3862 source). Its ``deltat`` [s] is used as time increment for slip
3863 difference calculation. Either ``deltat`` or ``store`` should be
3864 given.
3865 :type store:
3866 :py:class:`~pyrocko.gf.store.Store`
3868 :returns:
3869 The centroid location and associated moment tensor.
3870 :rtype:
3871 :py:class:`pyrocko.model.Event`
3872 '''
3873 _, _, _, _, time, _ = self.get_vr_time_interpolators(store)
3874 t_max = time.values.max()
3876 moment_rate, times = self.get_moment_rate_patches(deltat=t_max)
3878 moment = num.sum(moment_rate * times, axis=1)
3879 weights = moment / moment.sum()
3881 norths = self.get_patch_attribute('north_shift')
3882 easts = self.get_patch_attribute('east_shift')
3883 depths = self.get_patch_attribute('depth')
3885 centroid_n = num.sum(weights * norths)
3886 centroid_e = num.sum(weights * easts)
3887 centroid_d = num.sum(weights * depths)
3889 centroid_lat, centroid_lon = ne_to_latlon(
3890 self.lat, self.lon, centroid_n, centroid_e)
3892 moment_rate_, times = self.get_moment_rate(store)
3893 delta_times = num.concatenate((
3894 [times[1] - times[0]],
3895 num.diff(times)))
3896 moment_src = delta_times * moment_rate
3898 centroid_t = num.sum(
3899 moment_src / num.sum(moment_src) * times) + self.time
3901 mt = self.pyrocko_moment_tensor(store, *args, **kwargs)
3903 return model.Event(
3904 lat=centroid_lat,
3905 lon=centroid_lon,
3906 depth=centroid_d,
3907 time=centroid_t,
3908 moment_tensor=mt,
3909 magnitude=mt.magnitude,
3910 duration=t_max)
3913class DoubleDCSource(SourceWithMagnitude):
3914 '''
3915 Two double-couple point sources separated in space and time.
3916 Moment share between the sub-sources is controlled by the
3917 parameter mix.
3918 The position of the subsources is dependent on the moment
3919 distribution between the two sources. Depth, east and north
3920 shift are given for the centroid between the two double-couples.
3921 The subsources will positioned according to their moment shares
3922 around this centroid position.
3923 This is done according to their delta parameters, which are
3924 therefore in relation to that centroid.
3925 Note that depth of the subsources therefore can be
3926 depth+/-delta_depth. For shallow earthquakes therefore
3927 the depth has to be chosen deeper to avoid sampling
3928 above surface.
3929 '''
3931 strike1 = Float.T(
3932 default=0.0,
3933 help='strike direction in [deg], measured clockwise from north')
3935 dip1 = Float.T(
3936 default=90.0,
3937 help='dip angle in [deg], measured downward from horizontal')
3939 azimuth = Float.T(
3940 default=0.0,
3941 help='azimuth to second double-couple [deg], '
3942 'measured at first, clockwise from north')
3944 rake1 = Float.T(
3945 default=0.0,
3946 help='rake angle in [deg], '
3947 'measured counter-clockwise from right-horizontal '
3948 'in on-plane view')
3950 strike2 = Float.T(
3951 default=0.0,
3952 help='strike direction in [deg], measured clockwise from north')
3954 dip2 = Float.T(
3955 default=90.0,
3956 help='dip angle in [deg], measured downward from horizontal')
3958 rake2 = Float.T(
3959 default=0.0,
3960 help='rake angle in [deg], '
3961 'measured counter-clockwise from right-horizontal '
3962 'in on-plane view')
3964 delta_time = Float.T(
3965 default=0.0,
3966 help='separation of double-couples in time (t2-t1) [s]')
3968 delta_depth = Float.T(
3969 default=0.0,
3970 help='difference in depth (z2-z1) [m]')
3972 distance = Float.T(
3973 default=0.0,
3974 help='distance between the two double-couples [m]')
3976 mix = Float.T(
3977 default=0.5,
3978 help='how to distribute the moment to the two doublecouples '
3979 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
3981 stf1 = STF.T(
3982 optional=True,
3983 help='Source time function of subsource 1 '
3984 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
3986 stf2 = STF.T(
3987 optional=True,
3988 help='Source time function of subsource 2 '
3989 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
3991 discretized_source_class = meta.DiscretizedMTSource
3993 def base_key(self):
3994 return (
3995 self.time, self.depth, self.lat, self.north_shift,
3996 self.lon, self.east_shift, type(self).__name__) + \
3997 self.effective_stf1_pre().base_key() + \
3998 self.effective_stf2_pre().base_key() + (
3999 self.strike1, self.dip1, self.rake1,
4000 self.strike2, self.dip2, self.rake2,
4001 self.delta_time, self.delta_depth,
4002 self.azimuth, self.distance, self.mix)
4004 def get_factor(self):
4005 return self.moment
4007 def effective_stf1_pre(self):
4008 return self.stf1 or self.stf or g_unit_pulse
4010 def effective_stf2_pre(self):
4011 return self.stf2 or self.stf or g_unit_pulse
4013 def effective_stf_post(self):
4014 return g_unit_pulse
4016 def split(self):
4017 a1 = 1.0 - self.mix
4018 a2 = self.mix
4019 delta_north = math.cos(self.azimuth * d2r) * self.distance
4020 delta_east = math.sin(self.azimuth * d2r) * self.distance
4022 dc1 = DCSource(
4023 lat=self.lat,
4024 lon=self.lon,
4025 time=self.time - self.delta_time * a2,
4026 north_shift=self.north_shift - delta_north * a2,
4027 east_shift=self.east_shift - delta_east * a2,
4028 depth=self.depth - self.delta_depth * a2,
4029 moment=self.moment * a1,
4030 strike=self.strike1,
4031 dip=self.dip1,
4032 rake=self.rake1,
4033 stf=self.stf1 or self.stf)
4035 dc2 = DCSource(
4036 lat=self.lat,
4037 lon=self.lon,
4038 time=self.time + self.delta_time * a1,
4039 north_shift=self.north_shift + delta_north * a1,
4040 east_shift=self.east_shift + delta_east * a1,
4041 depth=self.depth + self.delta_depth * a1,
4042 moment=self.moment * a2,
4043 strike=self.strike2,
4044 dip=self.dip2,
4045 rake=self.rake2,
4046 stf=self.stf2 or self.stf)
4048 return [dc1, dc2]
4050 def discretize_basesource(self, store, target=None):
4051 a1 = 1.0 - self.mix
4052 a2 = self.mix
4053 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4054 rake=self.rake1, scalar_moment=a1)
4055 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4056 rake=self.rake2, scalar_moment=a2)
4058 delta_north = math.cos(self.azimuth * d2r) * self.distance
4059 delta_east = math.sin(self.azimuth * d2r) * self.distance
4061 times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
4062 store.config.deltat, self.time - self.delta_time * a2)
4064 times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
4065 store.config.deltat, self.time + self.delta_time * a1)
4067 nt1 = times1.size
4068 nt2 = times2.size
4070 ds = meta.DiscretizedMTSource(
4071 lat=self.lat,
4072 lon=self.lon,
4073 times=num.concatenate((times1, times2)),
4074 north_shifts=num.concatenate((
4075 num.repeat(self.north_shift - delta_north * a2, nt1),
4076 num.repeat(self.north_shift + delta_north * a1, nt2))),
4077 east_shifts=num.concatenate((
4078 num.repeat(self.east_shift - delta_east * a2, nt1),
4079 num.repeat(self.east_shift + delta_east * a1, nt2))),
4080 depths=num.concatenate((
4081 num.repeat(self.depth - self.delta_depth * a2, nt1),
4082 num.repeat(self.depth + self.delta_depth * a1, nt2))),
4083 m6s=num.vstack((
4084 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
4085 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
4087 return ds
4089 def pyrocko_moment_tensor(self, store=None, target=None):
4090 a1 = 1.0 - self.mix
4091 a2 = self.mix
4092 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
4093 rake=self.rake1,
4094 scalar_moment=a1 * self.moment)
4095 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
4096 rake=self.rake2,
4097 scalar_moment=a2 * self.moment)
4098 return pmt.MomentTensor(m=mot1.m() + mot2.m())
4100 def pyrocko_event(self, store=None, target=None, **kwargs):
4101 return SourceWithMagnitude.pyrocko_event(
4102 self, store, target,
4103 moment_tensor=self.pyrocko_moment_tensor(store, target),
4104 **kwargs)
4106 @classmethod
4107 def from_pyrocko_event(cls, ev, **kwargs):
4108 d = {}
4109 mt = ev.moment_tensor
4110 if mt:
4111 (strike, dip, rake), _ = mt.both_strike_dip_rake()
4112 d.update(
4113 strike1=float(strike),
4114 dip1=float(dip),
4115 rake1=float(rake),
4116 strike2=float(strike),
4117 dip2=float(dip),
4118 rake2=float(rake),
4119 mix=0.0,
4120 magnitude=float(mt.moment_magnitude()))
4122 d.update(kwargs)
4123 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
4124 source.stf1 = source.stf
4125 source.stf2 = HalfSinusoidSTF(effective_duration=0.)
4126 source.stf = None
4127 return source
4130class RingfaultSource(SourceWithMagnitude):
4131 '''
4132 A ring fault with vertical doublecouples.
4133 '''
4135 diameter = Float.T(
4136 default=1.0,
4137 help='diameter of the ring in [m]')
4139 sign = Float.T(
4140 default=1.0,
4141 help='inside of the ring moves up (+1) or down (-1)')
4143 strike = Float.T(
4144 default=0.0,
4145 help='strike direction of the ring plane, clockwise from north,'
4146 ' in [deg]')
4148 dip = Float.T(
4149 default=0.0,
4150 help='dip angle of the ring plane from horizontal in [deg]')
4152 npointsources = Int.T(
4153 default=360,
4154 help='number of point sources to use')
4156 discretized_source_class = meta.DiscretizedMTSource
4158 def base_key(self):
4159 return Source.base_key(self) + (
4160 self.strike, self.dip, self.diameter, self.npointsources)
4162 def get_factor(self):
4163 return self.sign * self.moment
4165 def discretize_basesource(self, store=None, target=None):
4166 n = self.npointsources
4167 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
4169 points = num.zeros((n, 3))
4170 points[:, 0] = num.cos(phi) * 0.5 * self.diameter
4171 points[:, 1] = num.sin(phi) * 0.5 * self.diameter
4173 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)
4174 points = num.dot(rotmat.T, points.T).T # !!! ?
4176 points[:, 0] += self.north_shift
4177 points[:, 1] += self.east_shift
4178 points[:, 2] += self.depth
4180 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
4181 scalar_moment=1.0 / n).m())
4183 rotmats = num.transpose(
4184 [[num.cos(phi), num.sin(phi), num.zeros(n)],
4185 [-num.sin(phi), num.cos(phi), num.zeros(n)],
4186 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
4188 ms = num.zeros((n, 3, 3))
4189 for i in range(n):
4190 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
4191 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
4193 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
4194 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
4196 times, amplitudes = self.effective_stf_pre().discretize_t(
4197 store.config.deltat, self.time)
4199 nt = times.size
4201 return meta.DiscretizedMTSource(
4202 times=num.tile(times, n),
4203 lat=self.lat,
4204 lon=self.lon,
4205 north_shifts=num.repeat(points[:, 0], nt),
4206 east_shifts=num.repeat(points[:, 1], nt),
4207 depths=num.repeat(points[:, 2], nt),
4208 m6s=num.repeat(m6s, nt, axis=0) * num.tile(
4209 amplitudes, n)[:, num.newaxis])
4212class CombiSource(Source):
4213 '''
4214 Composite source model.
4215 '''
4217 discretized_source_class = meta.DiscretizedMTSource
4219 subsources = List.T(Source.T())
4221 def __init__(self, subsources=[], **kwargs):
4222 if not subsources:
4223 raise BadRequest(
4224 'Need at least one sub-source to create a CombiSource object.')
4226 lats = num.array(
4227 [subsource.lat for subsource in subsources], dtype=float)
4228 lons = num.array(
4229 [subsource.lon for subsource in subsources], dtype=float)
4231 lat, lon = lats[0], lons[0]
4232 if not num.all(lats == lat) and num.all(lons == lon):
4233 subsources = [s.clone() for s in subsources]
4234 for subsource in subsources[1:]:
4235 subsource.set_origin(lat, lon)
4237 depth = float(num.mean([p.depth for p in subsources]))
4238 time = float(num.mean([p.time for p in subsources]))
4239 north_shift = float(num.mean([p.north_shift for p in subsources]))
4240 east_shift = float(num.mean([p.east_shift for p in subsources]))
4241 kwargs.update(
4242 time=time,
4243 lat=float(lat),
4244 lon=float(lon),
4245 north_shift=north_shift,
4246 east_shift=east_shift,
4247 depth=depth)
4249 Source.__init__(self, subsources=subsources, **kwargs)
4251 def get_factor(self):
4252 return 1.0
4254 def discretize_basesource(self, store, target=None):
4255 dsources = []
4256 for sf in self.subsources:
4257 ds = sf.discretize_basesource(store, target)
4258 ds.m6s *= sf.get_factor()
4259 dsources.append(ds)
4261 return meta.DiscretizedMTSource.combine(dsources)
4264class SFSource(Source):
4265 '''
4266 A single force point source.
4268 Supported GF schemes: `'elastic5'`.
4269 '''
4271 discretized_source_class = meta.DiscretizedSFSource
4273 fn = Float.T(
4274 default=0.,
4275 help='northward component of single force [N]')
4277 fe = Float.T(
4278 default=0.,
4279 help='eastward component of single force [N]')
4281 fd = Float.T(
4282 default=0.,
4283 help='downward component of single force [N]')
4285 def __init__(self, **kwargs):
4286 Source.__init__(self, **kwargs)
4288 def base_key(self):
4289 return Source.base_key(self) + (self.fn, self.fe, self.fd)
4291 def get_factor(self):
4292 return 1.0
4294 def discretize_basesource(self, store, target=None):
4295 times, amplitudes = self.effective_stf_pre().discretize_t(
4296 store.config.deltat, self.time)
4297 forces = amplitudes[:, num.newaxis] * num.array(
4298 [[self.fn, self.fe, self.fd]], dtype=float)
4300 return meta.DiscretizedSFSource(forces=forces,
4301 **self._dparams_base_repeated(times))
4303 def pyrocko_event(self, store=None, target=None, **kwargs):
4304 return Source.pyrocko_event(
4305 self, store, target,
4306 **kwargs)
4308 @classmethod
4309 def from_pyrocko_event(cls, ev, **kwargs):
4310 d = {}
4311 d.update(kwargs)
4312 return super(SFSource, cls).from_pyrocko_event(ev, **d)
4315class PorePressurePointSource(Source):
4316 '''
4317 Excess pore pressure point source.
4319 For poro-elastic initial value problem where an excess pore pressure is
4320 brought into a small source volume.
4321 '''
4323 discretized_source_class = meta.DiscretizedPorePressureSource
4325 pp = Float.T(
4326 default=1.0,
4327 help='initial excess pore pressure in [Pa]')
4329 def base_key(self):
4330 return Source.base_key(self)
4332 def get_factor(self):
4333 return self.pp
4335 def discretize_basesource(self, store, target=None):
4336 return meta.DiscretizedPorePressureSource(pp=arr(1.0),
4337 **self._dparams_base())
4340class PorePressureLineSource(Source):
4341 '''
4342 Excess pore pressure line source.
4344 The line source is centered at (north_shift, east_shift, depth).
4345 '''
4347 discretized_source_class = meta.DiscretizedPorePressureSource
4349 pp = Float.T(
4350 default=1.0,
4351 help='initial excess pore pressure in [Pa]')
4353 length = Float.T(
4354 default=0.0,
4355 help='length of the line source [m]')
4357 azimuth = Float.T(
4358 default=0.0,
4359 help='azimuth direction, clockwise from north [deg]')
4361 dip = Float.T(
4362 default=90.,
4363 help='dip direction, downward from horizontal [deg]')
4365 def base_key(self):
4366 return Source.base_key(self) + (self.azimuth, self.dip, self.length)
4368 def get_factor(self):
4369 return self.pp
4371 def discretize_basesource(self, store, target=None):
4373 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
4375 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
4377 sa = math.sin(self.azimuth * d2r)
4378 ca = math.cos(self.azimuth * d2r)
4379 sd = math.sin(self.dip * d2r)
4380 cd = math.cos(self.dip * d2r)
4382 points = num.zeros((n, 3))
4383 points[:, 0] = self.north_shift + a * ca * cd
4384 points[:, 1] = self.east_shift + a * sa * cd
4385 points[:, 2] = self.depth + a * sd
4387 return meta.DiscretizedPorePressureSource(
4388 times=util.num_full(n, self.time),
4389 lat=self.lat,
4390 lon=self.lon,
4391 north_shifts=points[:, 0],
4392 east_shifts=points[:, 1],
4393 depths=points[:, 2],
4394 pp=num.ones(n) / n)
4397class Request(Object):
4398 '''
4399 Synthetic seismogram computation request.
4401 ::
4403 Request(**kwargs)
4404 Request(sources, targets, **kwargs)
4405 '''
4407 sources = List.T(
4408 Source.T(),
4409 help='list of sources for which to produce synthetics.')
4411 targets = List.T(
4412 Target.T(),
4413 help='list of targets for which to produce synthetics.')
4415 @classmethod
4416 def args2kwargs(cls, args):
4417 if len(args) not in (0, 2, 3):
4418 raise BadRequest('Invalid arguments.')
4420 if len(args) == 2:
4421 return dict(sources=args[0], targets=args[1])
4422 else:
4423 return {}
4425 def __init__(self, *args, **kwargs):
4426 kwargs.update(self.args2kwargs(args))
4427 sources = kwargs.pop('sources', [])
4428 targets = kwargs.pop('targets', [])
4430 if isinstance(sources, Source):
4431 sources = [sources]
4433 if isinstance(targets, Target) or isinstance(targets, StaticTarget):
4434 targets = [targets]
4436 Object.__init__(self, sources=sources, targets=targets, **kwargs)
4438 @property
4439 def targets_dynamic(self):
4440 return [t for t in self.targets if isinstance(t, Target)]
4442 @property
4443 def targets_static(self):
4444 return [t for t in self.targets if isinstance(t, StaticTarget)]
4446 @property
4447 def has_dynamic(self):
4448 return True if len(self.targets_dynamic) > 0 else False
4450 @property
4451 def has_statics(self):
4452 return True if len(self.targets_static) > 0 else False
4454 def subsources_map(self):
4455 m = defaultdict(list)
4456 for source in self.sources:
4457 m[source.base_key()].append(source)
4459 return m
4461 def subtargets_map(self):
4462 m = defaultdict(list)
4463 for target in self.targets:
4464 m[target.base_key()].append(target)
4466 return m
4468 def subrequest_map(self):
4469 ms = self.subsources_map()
4470 mt = self.subtargets_map()
4471 m = {}
4472 for (ks, ls) in ms.items():
4473 for (kt, lt) in mt.items():
4474 m[ks, kt] = (ls, lt)
4476 return m
4479class ProcessingStats(Object):
4480 t_perc_get_store_and_receiver = Float.T(default=0.)
4481 t_perc_discretize_source = Float.T(default=0.)
4482 t_perc_make_base_seismogram = Float.T(default=0.)
4483 t_perc_make_same_span = Float.T(default=0.)
4484 t_perc_post_process = Float.T(default=0.)
4485 t_perc_optimize = Float.T(default=0.)
4486 t_perc_stack = Float.T(default=0.)
4487 t_perc_static_get_store = Float.T(default=0.)
4488 t_perc_static_discretize_basesource = Float.T(default=0.)
4489 t_perc_static_sum_statics = Float.T(default=0.)
4490 t_perc_static_post_process = Float.T(default=0.)
4491 t_wallclock = Float.T(default=0.)
4492 t_cpu = Float.T(default=0.)
4493 n_read_blocks = Int.T(default=0)
4494 n_results = Int.T(default=0)
4495 n_subrequests = Int.T(default=0)
4496 n_stores = Int.T(default=0)
4497 n_records_stacked = Int.T(default=0)
4500class Response(Object):
4501 '''
4502 Resonse object to a synthetic seismogram computation request.
4503 '''
4505 request = Request.T()
4506 results_list = List.T(List.T(meta.SeismosizerResult.T()))
4507 stats = ProcessingStats.T()
4509 def pyrocko_traces(self):
4510 '''
4511 Return a list of requested
4512 :class:`~pyrocko.trace.Trace` instances.
4513 '''
4515 traces = []
4516 for results in self.results_list:
4517 for result in results:
4518 if not isinstance(result, meta.Result):
4519 continue
4520 traces.append(result.trace.pyrocko_trace())
4522 return traces
4524 def kite_scenes(self):
4525 '''
4526 Return a list of requested
4527 :class:`~kite.scenes` instances.
4528 '''
4529 kite_scenes = []
4530 for results in self.results_list:
4531 for result in results:
4532 if isinstance(result, meta.KiteSceneResult):
4533 sc = result.get_scene()
4534 kite_scenes.append(sc)
4536 return kite_scenes
4538 def static_results(self):
4539 '''
4540 Return a list of requested
4541 :class:`~pyrocko.gf.meta.StaticResult` instances.
4542 '''
4543 statics = []
4544 for results in self.results_list:
4545 for result in results:
4546 if not isinstance(result, meta.StaticResult):
4547 continue
4548 statics.append(result)
4550 return statics
4552 def iter_results(self, get='pyrocko_traces'):
4553 '''
4554 Generator function to iterate over results of request.
4556 Yields associated :py:class:`Source`,
4557 :class:`~pyrocko.gf.targets.Target`,
4558 :class:`~pyrocko.trace.Trace` instances in each iteration.
4559 '''
4561 for isource, source in enumerate(self.request.sources):
4562 for itarget, target in enumerate(self.request.targets):
4563 result = self.results_list[isource][itarget]
4564 if get == 'pyrocko_traces':
4565 yield source, target, result.trace.pyrocko_trace()
4566 elif get == 'results':
4567 yield source, target, result
4569 def snuffle(self, **kwargs):
4570 '''
4571 Open *snuffler* with requested traces.
4572 '''
4574 trace.snuffle(self.pyrocko_traces(), **kwargs)
4577class Engine(Object):
4578 '''
4579 Base class for synthetic seismogram calculators.
4580 '''
4582 def get_store_ids(self):
4583 '''
4584 Get list of available GF store IDs
4585 '''
4587 return []
4590class Rule(object):
4591 pass
4594class VectorRule(Rule):
4596 def __init__(self, quantity, differentiate=0, integrate=0):
4597 self.components = [quantity + '.' + c for c in 'ned']
4598 self.differentiate = differentiate
4599 self.integrate = integrate
4601 def required_components(self, target):
4602 n, e, d = self.components
4603 sa, ca, sd, cd = target.get_sin_cos_factors()
4605 comps = []
4606 if nonzero(ca * cd):
4607 comps.append(n)
4609 if nonzero(sa * cd):
4610 comps.append(e)
4612 if nonzero(sd):
4613 comps.append(d)
4615 return tuple(comps)
4617 def apply_(self, target, base_seismogram):
4618 n, e, d = self.components
4619 sa, ca, sd, cd = target.get_sin_cos_factors()
4621 if nonzero(ca * cd):
4622 data = base_seismogram[n].data * (ca * cd)
4623 deltat = base_seismogram[n].deltat
4624 else:
4625 data = 0.0
4627 if nonzero(sa * cd):
4628 data = data + base_seismogram[e].data * (sa * cd)
4629 deltat = base_seismogram[e].deltat
4631 if nonzero(sd):
4632 data = data + base_seismogram[d].data * sd
4633 deltat = base_seismogram[d].deltat
4635 if self.differentiate:
4636 data = util.diff_fd(self.differentiate, 4, deltat, data)
4638 if self.integrate:
4639 raise NotImplementedError('Integration is not implemented yet.')
4641 return data
4644class HorizontalVectorRule(Rule):
4646 def __init__(self, quantity, differentiate=0, integrate=0):
4647 self.components = [quantity + '.' + c for c in 'ne']
4648 self.differentiate = differentiate
4649 self.integrate = integrate
4651 def required_components(self, target):
4652 n, e = self.components
4653 sa, ca, _, _ = target.get_sin_cos_factors()
4655 comps = []
4656 if nonzero(ca):
4657 comps.append(n)
4659 if nonzero(sa):
4660 comps.append(e)
4662 return tuple(comps)
4664 def apply_(self, target, base_seismogram):
4665 n, e = self.components
4666 sa, ca, _, _ = target.get_sin_cos_factors()
4668 if nonzero(ca):
4669 data = base_seismogram[n].data * ca
4670 else:
4671 data = 0.0
4673 if nonzero(sa):
4674 data = data + base_seismogram[e].data * sa
4676 if self.differentiate:
4677 deltat = base_seismogram[e].deltat
4678 data = util.diff_fd(self.differentiate, 4, deltat, data)
4680 if self.integrate:
4681 raise NotImplementedError('Integration is not implemented yet.')
4683 return data
4686class ScalarRule(Rule):
4688 def __init__(self, quantity, differentiate=0):
4689 self.c = quantity
4691 def required_components(self, target):
4692 return (self.c, )
4694 def apply_(self, target, base_seismogram):
4695 data = base_seismogram[self.c].data.copy()
4696 deltat = base_seismogram[self.c].deltat
4697 if self.differentiate:
4698 data = util.diff_fd(self.differentiate, 4, deltat, data)
4700 return data
4703class StaticDisplacement(Rule):
4705 def required_components(self, target):
4706 return tuple(['displacement.%s' % c for c in list('ned')])
4708 def apply_(self, target, base_statics):
4709 if isinstance(target, SatelliteTarget):
4710 los_fac = target.get_los_factors()
4711 base_statics['displacement.los'] =\
4712 (los_fac[:, 0] * -base_statics['displacement.d'] +
4713 los_fac[:, 1] * base_statics['displacement.e'] +
4714 los_fac[:, 2] * base_statics['displacement.n'])
4715 return base_statics
4718channel_rules = {
4719 'displacement': [VectorRule('displacement')],
4720 'rotation': [VectorRule('rotation')],
4721 'velocity': [
4722 VectorRule('velocity'),
4723 VectorRule('displacement', differentiate=1)],
4724 'acceleration': [
4725 VectorRule('acceleration'),
4726 VectorRule('velocity', differentiate=1),
4727 VectorRule('displacement', differentiate=2)],
4728 'pore_pressure': [ScalarRule('pore_pressure')],
4729 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
4730 'darcy_velocity': [VectorRule('darcy_velocity')],
4731}
4733static_rules = {
4734 'displacement': [StaticDisplacement()]
4735}
4738class OutOfBoundsContext(Object):
4739 source = Source.T()
4740 target = Target.T()
4741 distance = Float.T()
4742 components = List.T(String.T())
4745def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
4746 dsource_cache = {}
4747 tcounters = list(range(6))
4749 store_ids = set()
4750 sources = set()
4751 targets = set()
4753 for itarget, target in enumerate(ptargets):
4754 target._id = itarget
4756 for w in work:
4757 _, _, isources, itargets = w
4759 sources.update([psources[isource] for isource in isources])
4760 targets.update([ptargets[itarget] for itarget in itargets])
4762 store_ids = set([t.store_id for t in targets])
4764 for isource, source in enumerate(psources):
4766 components = set()
4767 for itarget, target in enumerate(targets):
4768 rule = engine.get_rule(source, target)
4769 components.update(rule.required_components(target))
4771 for store_id in store_ids:
4772 store_targets = [t for t in targets if t.store_id == store_id]
4774 sample_rates = set([t.sample_rate for t in store_targets])
4775 interpolations = set([t.interpolation for t in store_targets])
4777 base_seismograms = []
4778 store_targets_out = []
4780 for samp_rate in sample_rates:
4781 for interp in interpolations:
4782 engine_targets = [
4783 t for t in store_targets if t.sample_rate == samp_rate
4784 and t.interpolation == interp]
4786 if not engine_targets:
4787 continue
4789 store_targets_out += engine_targets
4791 base_seismograms += engine.base_seismograms(
4792 source,
4793 engine_targets,
4794 components,
4795 dsource_cache,
4796 nthreads)
4798 for iseis, seismogram in enumerate(base_seismograms):
4799 for tr in seismogram.values():
4800 if tr.err != store.SeismosizerErrorEnum.SUCCESS:
4801 e = SeismosizerError(
4802 'Seismosizer failed with return code %i\n%s' % (
4803 tr.err, str(
4804 OutOfBoundsContext(
4805 source=source,
4806 target=store_targets[iseis],
4807 distance=source.distance_to(
4808 store_targets[iseis]),
4809 components=components))))
4810 raise e
4812 for seismogram, target in zip(base_seismograms, store_targets_out):
4814 try:
4815 result = engine._post_process_dynamic(
4816 seismogram, source, target)
4817 except SeismosizerError as e:
4818 result = e
4820 yield (isource, target._id, result), tcounters
4823def process_dynamic(work, psources, ptargets, engine, nthreads=0):
4824 dsource_cache = {}
4826 for w in work:
4827 _, _, isources, itargets = w
4829 sources = [psources[isource] for isource in isources]
4830 targets = [ptargets[itarget] for itarget in itargets]
4832 components = set()
4833 for target in targets:
4834 rule = engine.get_rule(sources[0], target)
4835 components.update(rule.required_components(target))
4837 for isource, source in zip(isources, sources):
4838 for itarget, target in zip(itargets, targets):
4840 try:
4841 base_seismogram, tcounters = engine.base_seismogram(
4842 source, target, components, dsource_cache, nthreads)
4843 except meta.OutOfBounds as e:
4844 e.context = OutOfBoundsContext(
4845 source=sources[0],
4846 target=targets[0],
4847 distance=sources[0].distance_to(targets[0]),
4848 components=components)
4849 raise
4851 n_records_stacked = 0
4852 t_optimize = 0.0
4853 t_stack = 0.0
4855 for _, tr in base_seismogram.items():
4856 n_records_stacked += tr.n_records_stacked
4857 t_optimize += tr.t_optimize
4858 t_stack += tr.t_stack
4860 try:
4861 result = engine._post_process_dynamic(
4862 base_seismogram, source, target)
4863 result.n_records_stacked = n_records_stacked
4864 result.n_shared_stacking = len(sources) *\
4865 len(targets)
4866 result.t_optimize = t_optimize
4867 result.t_stack = t_stack
4868 except SeismosizerError as e:
4869 result = e
4871 tcounters.append(xtime())
4872 yield (isource, itarget, result), tcounters
4875def process_static(work, psources, ptargets, engine, nthreads=0):
4876 for w in work:
4877 _, _, isources, itargets = w
4879 sources = [psources[isource] for isource in isources]
4880 targets = [ptargets[itarget] for itarget in itargets]
4882 for isource, source in zip(isources, sources):
4883 for itarget, target in zip(itargets, targets):
4884 components = engine.get_rule(source, target)\
4885 .required_components(target)
4887 try:
4888 base_statics, tcounters = engine.base_statics(
4889 source, target, components, nthreads)
4890 except meta.OutOfBounds as e:
4891 e.context = OutOfBoundsContext(
4892 source=sources[0],
4893 target=targets[0],
4894 distance=float('nan'),
4895 components=components)
4896 raise
4897 result = engine._post_process_statics(
4898 base_statics, source, target)
4899 tcounters.append(xtime())
4901 yield (isource, itarget, result), tcounters
4904class LocalEngine(Engine):
4905 '''
4906 Offline synthetic seismogram calculator.
4908 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and
4909 :py:attr:`store_dirs` with paths set in environment variables
4910 GF_STORE_SUPERDIRS and GF_STORE_DIRS.
4911 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and
4912 :py:attr:`store_dirs` with paths set in the user's config file.
4914 The config file can be found at :file:`~/.pyrocko/config.pf`
4916 .. code-block :: python
4918 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
4919 gf_store_superdirs: ['/home/pyrocko/gf_stores/']
4920 '''
4922 store_superdirs = List.T(
4923 String.T(),
4924 help='directories which are searched for Green\'s function stores')
4926 store_dirs = List.T(
4927 String.T(),
4928 help='additional individual Green\'s function store directories')
4930 default_store_id = String.T(
4931 optional=True,
4932 help='default store ID to be used when a request does not provide '
4933 'one')
4935 def __init__(self, **kwargs):
4936 use_env = kwargs.pop('use_env', False)
4937 use_config = kwargs.pop('use_config', False)
4938 Engine.__init__(self, **kwargs)
4939 if use_env:
4940 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
4941 env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
4942 if env_store_superdirs:
4943 self.store_superdirs.extend(env_store_superdirs.split(':'))
4945 if env_store_dirs:
4946 self.store_dirs.extend(env_store_dirs.split(':'))
4948 if use_config:
4949 c = config.config()
4950 self.store_superdirs.extend(c.gf_store_superdirs)
4951 self.store_dirs.extend(c.gf_store_dirs)
4953 self._check_store_dirs_type()
4954 self._id_to_store_dir = {}
4955 self._open_stores = {}
4956 self._effective_default_store_id = None
4958 def _check_store_dirs_type(self):
4959 for sdir in ['store_dirs', 'store_superdirs']:
4960 if not isinstance(self.__getattribute__(sdir), list):
4961 raise TypeError("{} of {} is not of type list".format(
4962 sdir, self.__class__.__name__))
4964 def _get_store_id(self, store_dir):
4965 store_ = store.Store(store_dir)
4966 store_id = store_.config.id
4967 store_.close()
4968 return store_id
4970 def _looks_like_store_dir(self, store_dir):
4971 return os.path.isdir(store_dir) and \
4972 all(os.path.isfile(pjoin(store_dir, x)) for x in
4973 ('index', 'traces', 'config'))
4975 def iter_store_dirs(self):
4976 store_dirs = set()
4977 for d in self.store_superdirs:
4978 if not os.path.exists(d):
4979 logger.warning('store_superdir not available: %s' % d)
4980 continue
4982 for entry in os.listdir(d):
4983 store_dir = os.path.realpath(pjoin(d, entry))
4984 if self._looks_like_store_dir(store_dir):
4985 store_dirs.add(store_dir)
4987 for store_dir in self.store_dirs:
4988 store_dirs.add(os.path.realpath(store_dir))
4990 return store_dirs
4992 def _scan_stores(self):
4993 for store_dir in self.iter_store_dirs():
4994 store_id = self._get_store_id(store_dir)
4995 if store_id not in self._id_to_store_dir:
4996 self._id_to_store_dir[store_id] = store_dir
4997 else:
4998 if store_dir != self._id_to_store_dir[store_id]:
4999 raise DuplicateStoreId(
5000 'GF store ID %s is used in (at least) two '
5001 'different stores. Locations are: %s and %s' %
5002 (store_id, self._id_to_store_dir[store_id], store_dir))
5004 def get_store_dir(self, store_id):
5005 '''
5006 Lookup directory given a GF store ID.
5007 '''
5009 if store_id not in self._id_to_store_dir:
5010 self._scan_stores()
5012 if store_id not in self._id_to_store_dir:
5013 raise NoSuchStore(store_id, self.iter_store_dirs())
5015 return self._id_to_store_dir[store_id]
5017 def get_store_ids(self):
5018 '''
5019 Get list of available store IDs.
5020 '''
5022 self._scan_stores()
5023 return sorted(self._id_to_store_dir.keys())
5025 def effective_default_store_id(self):
5026 if self._effective_default_store_id is None:
5027 if self.default_store_id is None:
5028 store_ids = self.get_store_ids()
5029 if len(store_ids) == 1:
5030 self._effective_default_store_id = self.get_store_ids()[0]
5031 else:
5032 raise NoDefaultStoreSet()
5033 else:
5034 self._effective_default_store_id = self.default_store_id
5036 return self._effective_default_store_id
5038 def get_store(self, store_id=None):
5039 '''
5040 Get a store from the engine.
5042 :param store_id: identifier of the store (optional)
5043 :returns: :py:class:`~pyrocko.gf.store.Store` object
5045 If no ``store_id`` is provided the store
5046 associated with the :py:gattr:`default_store_id` is returned.
5047 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
5048 undefined.
5049 '''
5051 if store_id is None:
5052 store_id = self.effective_default_store_id()
5054 if store_id not in self._open_stores:
5055 store_dir = self.get_store_dir(store_id)
5056 self._open_stores[store_id] = store.Store(store_dir)
5058 return self._open_stores[store_id]
5060 def get_store_config(self, store_id):
5061 store = self.get_store(store_id)
5062 return store.config
5064 def get_store_extra(self, store_id, key):
5065 store = self.get_store(store_id)
5066 return store.get_extra(key)
5068 def close_cashed_stores(self):
5069 '''
5070 Close and remove ids from cashed stores.
5071 '''
5072 store_ids = []
5073 for store_id, store_ in self._open_stores.items():
5074 store_.close()
5075 store_ids.append(store_id)
5077 for store_id in store_ids:
5078 self._open_stores.pop(store_id)
5080 def get_rule(self, source, target):
5081 cprovided = self.get_store(target.store_id).get_provided_components()
5083 if isinstance(target, StaticTarget):
5084 quantity = target.quantity
5085 available_rules = static_rules
5086 elif isinstance(target, Target):
5087 quantity = target.effective_quantity()
5088 available_rules = channel_rules
5090 try:
5091 for rule in available_rules[quantity]:
5092 cneeded = rule.required_components(target)
5093 if all(c in cprovided for c in cneeded):
5094 return rule
5096 except KeyError:
5097 pass
5099 raise BadRequest(
5100 'No rule to calculate "%s" with GFs from store "%s" '
5101 'for source model "%s".' % (
5102 target.effective_quantity(),
5103 target.store_id,
5104 source.__class__.__name__))
5106 def _cached_discretize_basesource(self, source, store, cache, target):
5107 if (source, store) not in cache:
5108 cache[source, store] = source.discretize_basesource(store, target)
5110 return cache[source, store]
5112 def base_seismograms(self, source, targets, components, dsource_cache,
5113 nthreads=0):
5115 target = targets[0]
5117 interp = set([t.interpolation for t in targets])
5118 if len(interp) > 1:
5119 raise BadRequest('Targets have different interpolation schemes.')
5121 rates = set([t.sample_rate for t in targets])
5122 if len(rates) > 1:
5123 raise BadRequest('Targets have different sample rates.')
5125 store_ = self.get_store(target.store_id)
5126 receivers = [t.receiver(store_) for t in targets]
5128 if target.sample_rate is not None:
5129 deltat = 1. / target.sample_rate
5130 rate = target.sample_rate
5131 else:
5132 deltat = None
5133 rate = store_.config.sample_rate
5135 tmin = num.fromiter(
5136 (t.tmin for t in targets), dtype=float, count=len(targets))
5137 tmax = num.fromiter(
5138 (t.tmax for t in targets), dtype=float, count=len(targets))
5140 itmin = num.floor(tmin * rate).astype(num.int64)
5141 itmax = num.ceil(tmax * rate).astype(num.int64)
5142 nsamples = itmax - itmin + 1
5144 mask = num.isnan(tmin)
5145 itmin[mask] = 0
5146 nsamples[mask] = -1
5148 base_source = self._cached_discretize_basesource(
5149 source, store_, dsource_cache, target)
5151 base_seismograms = store_.calc_seismograms(
5152 base_source, receivers, components,
5153 deltat=deltat,
5154 itmin=itmin, nsamples=nsamples,
5155 interpolation=target.interpolation,
5156 optimization=target.optimization,
5157 nthreads=nthreads)
5159 for i, base_seismogram in enumerate(base_seismograms):
5160 base_seismograms[i] = store.make_same_span(base_seismogram)
5162 return base_seismograms
5164 def base_seismogram(self, source, target, components, dsource_cache,
5165 nthreads):
5167 tcounters = [xtime()]
5169 store_ = self.get_store(target.store_id)
5170 receiver = target.receiver(store_)
5172 if target.tmin and target.tmax is not None:
5173 rate = store_.config.sample_rate
5174 itmin = int(num.floor(target.tmin * rate))
5175 itmax = int(num.ceil(target.tmax * rate))
5176 nsamples = itmax - itmin + 1
5177 else:
5178 itmin = None
5179 nsamples = None
5181 tcounters.append(xtime())
5182 base_source = self._cached_discretize_basesource(
5183 source, store_, dsource_cache, target)
5185 tcounters.append(xtime())
5187 if target.sample_rate is not None:
5188 deltat = 1. / target.sample_rate
5189 else:
5190 deltat = None
5192 base_seismogram = store_.seismogram(
5193 base_source, receiver, components,
5194 deltat=deltat,
5195 itmin=itmin, nsamples=nsamples,
5196 interpolation=target.interpolation,
5197 optimization=target.optimization,
5198 nthreads=nthreads)
5200 tcounters.append(xtime())
5202 base_seismogram = store.make_same_span(base_seismogram)
5204 tcounters.append(xtime())
5206 return base_seismogram, tcounters
5208 def base_statics(self, source, target, components, nthreads):
5209 tcounters = [xtime()]
5210 store_ = self.get_store(target.store_id)
5212 if target.tsnapshot is not None:
5213 rate = store_.config.sample_rate
5214 itsnapshot = int(num.floor(target.tsnapshot * rate))
5215 else:
5216 itsnapshot = None
5217 tcounters.append(xtime())
5219 base_source = source.discretize_basesource(store_, target=target)
5221 tcounters.append(xtime())
5223 base_statics = store_.statics(
5224 base_source,
5225 target,
5226 itsnapshot,
5227 components,
5228 target.interpolation,
5229 nthreads)
5231 tcounters.append(xtime())
5233 return base_statics, tcounters
5235 def _post_process_dynamic(self, base_seismogram, source, target):
5236 base_any = next(iter(base_seismogram.values()))
5237 deltat = base_any.deltat
5238 itmin = base_any.itmin
5240 rule = self.get_rule(source, target)
5241 data = rule.apply_(target, base_seismogram)
5243 factor = source.get_factor() * target.get_factor()
5244 if factor != 1.0:
5245 data = data * factor
5247 stf = source.effective_stf_post()
5249 times, amplitudes = stf.discretize_t(
5250 deltat, 0.0)
5252 # repeat end point to prevent boundary effects
5253 padded_data = num.empty(data.size + amplitudes.size, dtype=float)
5254 padded_data[:data.size] = data
5255 padded_data[data.size:] = data[-1]
5256 data = num.convolve(amplitudes, padded_data)
5258 tmin = itmin * deltat + times[0]
5260 tr = meta.SeismosizerTrace(
5261 codes=target.codes,
5262 data=data[:-amplitudes.size],
5263 deltat=deltat,
5264 tmin=tmin)
5266 return target.post_process(self, source, tr)
5268 def _post_process_statics(self, base_statics, source, starget):
5269 rule = self.get_rule(source, starget)
5270 data = rule.apply_(starget, base_statics)
5272 factor = source.get_factor()
5273 if factor != 1.0:
5274 for v in data.values():
5275 v *= factor
5277 return starget.post_process(self, source, base_statics)
5279 def process(self, *args, **kwargs):
5280 '''
5281 Process a request.
5283 ::
5285 process(**kwargs)
5286 process(request, **kwargs)
5287 process(sources, targets, **kwargs)
5289 The request can be given a a :py:class:`Request` object, or such an
5290 object is created using ``Request(**kwargs)`` for convenience.
5292 :returns: :py:class:`Response` object
5293 '''
5295 if len(args) not in (0, 1, 2):
5296 raise BadRequest('Invalid arguments.')
5298 if len(args) == 1:
5299 kwargs['request'] = args[0]
5301 elif len(args) == 2:
5302 kwargs.update(Request.args2kwargs(args))
5304 request = kwargs.pop('request', None)
5305 status_callback = kwargs.pop('status_callback', None)
5306 calc_timeseries = kwargs.pop('calc_timeseries', True)
5308 nprocs = kwargs.pop('nprocs', None)
5309 nthreads = kwargs.pop('nthreads', 1)
5310 if nprocs is not None:
5311 nthreads = nprocs
5313 if request is None:
5314 request = Request(**kwargs)
5316 if resource:
5317 rs0 = resource.getrusage(resource.RUSAGE_SELF)
5318 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
5319 tt0 = xtime()
5321 # make sure stores are open before fork()
5322 store_ids = set(target.store_id for target in request.targets)
5323 for store_id in store_ids:
5324 self.get_store(store_id)
5326 source_index = dict((x, i) for (i, x) in
5327 enumerate(request.sources))
5328 target_index = dict((x, i) for (i, x) in
5329 enumerate(request.targets))
5331 m = request.subrequest_map()
5333 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
5334 results_list = []
5336 for i in range(len(request.sources)):
5337 results_list.append([None] * len(request.targets))
5339 tcounters_dyn_list = []
5340 tcounters_static_list = []
5341 nsub = len(skeys)
5342 isub = 0
5344 # Processing dynamic targets through
5345 # parimap(process_subrequest_dynamic)
5347 if calc_timeseries:
5348 _process_dynamic = process_dynamic_timeseries
5349 else:
5350 _process_dynamic = process_dynamic
5352 if request.has_dynamic:
5353 work_dynamic = [
5354 (i, nsub,
5355 [source_index[source] for source in m[k][0]],
5356 [target_index[target] for target in m[k][1]
5357 if not isinstance(target, StaticTarget)])
5358 for (i, k) in enumerate(skeys)]
5360 for ii_results, tcounters_dyn in _process_dynamic(
5361 work_dynamic, request.sources, request.targets, self,
5362 nthreads):
5364 tcounters_dyn_list.append(num.diff(tcounters_dyn))
5365 isource, itarget, result = ii_results
5366 results_list[isource][itarget] = result
5368 if status_callback:
5369 status_callback(isub, nsub)
5371 isub += 1
5373 # Processing static targets through process_static
5374 if request.has_statics:
5375 work_static = [
5376 (i, nsub,
5377 [source_index[source] for source in m[k][0]],
5378 [target_index[target] for target in m[k][1]
5379 if isinstance(target, StaticTarget)])
5380 for (i, k) in enumerate(skeys)]
5382 for ii_results, tcounters_static in process_static(
5383 work_static, request.sources, request.targets, self,
5384 nthreads=nthreads):
5386 tcounters_static_list.append(num.diff(tcounters_static))
5387 isource, itarget, result = ii_results
5388 results_list[isource][itarget] = result
5390 if status_callback:
5391 status_callback(isub, nsub)
5393 isub += 1
5395 if status_callback:
5396 status_callback(nsub, nsub)
5398 tt1 = time.time()
5399 if resource:
5400 rs1 = resource.getrusage(resource.RUSAGE_SELF)
5401 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
5403 s = ProcessingStats()
5405 if request.has_dynamic:
5406 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
5407 t_dyn = float(num.sum(tcumu_dyn))
5408 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
5409 (s.t_perc_get_store_and_receiver,
5410 s.t_perc_discretize_source,
5411 s.t_perc_make_base_seismogram,
5412 s.t_perc_make_same_span,
5413 s.t_perc_post_process) = perc_dyn
5414 else:
5415 t_dyn = 0.
5417 if request.has_statics:
5418 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
5419 t_static = num.sum(tcumu_static)
5420 perc_static = map(float, tcumu_static / t_static * 100.)
5421 (s.t_perc_static_get_store,
5422 s.t_perc_static_discretize_basesource,
5423 s.t_perc_static_sum_statics,
5424 s.t_perc_static_post_process) = perc_static
5426 s.t_wallclock = tt1 - tt0
5427 if resource:
5428 s.t_cpu = (
5429 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
5430 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
5431 s.n_read_blocks = (
5432 (rs1.ru_inblock + rc1.ru_inblock) -
5433 (rs0.ru_inblock + rc0.ru_inblock))
5435 n_records_stacked = 0.
5436 for results in results_list:
5437 for result in results:
5438 if not isinstance(result, meta.Result):
5439 continue
5440 shr = float(result.n_shared_stacking)
5441 n_records_stacked += result.n_records_stacked / shr
5442 s.t_perc_optimize += result.t_optimize / shr
5443 s.t_perc_stack += result.t_stack / shr
5444 s.n_records_stacked = int(n_records_stacked)
5445 if t_dyn != 0.:
5446 s.t_perc_optimize /= t_dyn * 100
5447 s.t_perc_stack /= t_dyn * 100
5449 return Response(
5450 request=request,
5451 results_list=results_list,
5452 stats=s)
5455class RemoteEngine(Engine):
5456 '''
5457 Client for remote synthetic seismogram calculator.
5458 '''
5460 site = String.T(default=ws.g_default_site, optional=True)
5461 url = String.T(default=ws.g_url, optional=True)
5463 def process(self, request=None, status_callback=None, **kwargs):
5465 if request is None:
5466 request = Request(**kwargs)
5468 return ws.seismosizer(url=self.url, site=self.site, request=request)
5471g_engine = None
5474def get_engine(store_superdirs=[]):
5475 global g_engine
5476 if g_engine is None:
5477 g_engine = LocalEngine(use_env=True, use_config=True)
5479 for d in store_superdirs:
5480 if d not in g_engine.store_superdirs:
5481 g_engine.store_superdirs.append(d)
5483 return g_engine
5486class SourceGroup(Object):
5488 def __getattr__(self, k):
5489 return num.fromiter((getattr(s, k) for s in self),
5490 dtype=float)
5492 def __iter__(self):
5493 raise NotImplementedError(
5494 'This method should be implemented in subclass.')
5496 def __len__(self):
5497 raise NotImplementedError(
5498 'This method should be implemented in subclass.')
5501class SourceList(SourceGroup):
5502 sources = List.T(Source.T())
5504 def append(self, s):
5505 self.sources.append(s)
5507 def __iter__(self):
5508 return iter(self.sources)
5510 def __len__(self):
5511 return len(self.sources)
5514class SourceGrid(SourceGroup):
5516 base = Source.T()
5517 variables = Dict.T(String.T(), Range.T())
5518 order = List.T(String.T())
5520 def __len__(self):
5521 n = 1
5522 for (k, v) in self.make_coords(self.base):
5523 n *= len(list(v))
5525 return n
5527 def __iter__(self):
5528 for items in permudef(self.make_coords(self.base)):
5529 s = self.base.clone(**{k: v for (k, v) in items})
5530 s.regularize()
5531 yield s
5533 def ordered_params(self):
5534 ks = list(self.variables.keys())
5535 for k in self.order + list(self.base.keys()):
5536 if k in ks:
5537 yield k
5538 ks.remove(k)
5539 if ks:
5540 raise Exception('Invalid parameter "%s" for source type "%s".' %
5541 (ks[0], self.base.__class__.__name__))
5543 def make_coords(self, base):
5544 return [(param, self.variables[param].make(base=base[param]))
5545 for param in self.ordered_params()]
5548source_classes = [
5549 Source,
5550 SourceWithMagnitude,
5551 SourceWithDerivedMagnitude,
5552 ExplosionSource,
5553 RectangularExplosionSource,
5554 DCSource,
5555 CLVDSource,
5556 VLVDSource,
5557 MTSource,
5558 RectangularSource,
5559 PseudoDynamicRupture,
5560 DoubleDCSource,
5561 RingfaultSource,
5562 CombiSource,
5563 SFSource,
5564 PorePressurePointSource,
5565 PorePressureLineSource,
5566]
5568stf_classes = [
5569 STF,
5570 BoxcarSTF,
5571 TriangularSTF,
5572 HalfSinusoidSTF,
5573 ResonatorSTF,
5574]
5576__all__ = '''
5577SeismosizerError
5578BadRequest
5579NoSuchStore
5580DerivedMagnitudeError
5581STFMode
5582'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
5583Request
5584ProcessingStats
5585Response
5586Engine
5587LocalEngine
5588RemoteEngine
5589source_classes
5590get_engine
5591Range
5592SourceGroup
5593SourceList
5594SourceGrid
5595map_anchor
5596'''.split()