1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5from __future__ import absolute_import, division, print_function
7from collections import defaultdict
8from functools import cmp_to_key
9import time
10import math
11import os
12import re
13import logging
14try:
15 import resource
16except ImportError:
17 resource = None
18from hashlib import sha1
20import numpy as num
21from scipy.interpolate import RegularGridInterpolator
23from pyrocko.guts import (Object, Float, String, StringChoice, List,
24 Timestamp, Int, SObject, ArgumentError, Dict,
25 ValidationError, Bool)
26from pyrocko.guts_array import Array
28from pyrocko import moment_tensor as pmt
29from pyrocko import trace, util, config, model, eikonal_ext
30from pyrocko.orthodrome import ne_to_latlon
31from pyrocko.model import Location
32from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \
33 okada_ext, invert_fault_dislocations_bem
35from . import meta, store, ws
36from .tractions import TractionField, DirectedTractions
37from .targets import Target, StaticTarget, SatelliteTarget
39pjoin = os.path.join
41guts_prefix = 'pf'
43d2r = math.pi / 180.
44r2d = 180. / math.pi
45km = 1e3
47logger = logging.getLogger('pyrocko.gf.seismosizer')
50def cmp_none_aware(a, b):
51 if isinstance(a, tuple) and isinstance(b, tuple):
52 for xa, xb in zip(a, b):
53 rv = cmp_none_aware(xa, xb)
54 if rv != 0:
55 return rv
57 return 0
59 anone = a is None
60 bnone = b is None
62 if anone and bnone:
63 return 0
65 if anone:
66 return -1
68 if bnone:
69 return 1
71 return bool(a > b) - bool(a < b)
74def xtime():
75 return time.time()
78class SeismosizerError(Exception):
79 pass
82class BadRequest(SeismosizerError):
83 pass
86class DuplicateStoreId(Exception):
87 pass
90class NoDefaultStoreSet(Exception):
91 pass
94class ConversionError(Exception):
95 pass
98class NoSuchStore(BadRequest):
100 def __init__(self, store_id=None, dirs=None):
101 BadRequest.__init__(self)
102 self.store_id = store_id
103 self.dirs = dirs
105 def __str__(self):
106 if self.store_id is not None:
107 rstr = 'no GF store with id "%s" found.' % self.store_id
108 else:
109 rstr = 'GF store not found.'
111 if self.dirs is not None:
112 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs))
113 return rstr
116def ufloat(s):
117 units = {
118 'k': 1e3,
119 'M': 1e6,
120 }
122 factor = 1.0
123 if s and s[-1] in units:
124 factor = units[s[-1]]
125 s = s[:-1]
126 if not s:
127 raise ValueError('unit without a number: \'%s\'' % s)
129 return float(s) * factor
132def ufloat_or_none(s):
133 if s:
134 return ufloat(s)
135 else:
136 return None
139def int_or_none(s):
140 if s:
141 return int(s)
142 else:
143 return None
146def nonzero(x, eps=1e-15):
147 return abs(x) > eps
150def permudef(ln, j=0):
151 if j < len(ln):
152 k, v = ln[j]
153 for y in v:
154 ln[j] = k, y
155 for s in permudef(ln, j + 1):
156 yield s
158 ln[j] = k, v
159 return
160 else:
161 yield ln
164def arr(x):
165 return num.atleast_1d(num.asarray(x))
168def discretize_rect_source(deltas, deltat, time, north, east, depth,
169 strike, dip, length, width,
170 anchor, velocity=None, stf=None,
171 nucleation_x=None, nucleation_y=None,
172 decimation_factor=1, pointsonly=False,
173 plane_coords=False,
174 aggressive_oversampling=False):
176 if stf is None:
177 stf = STF()
179 if not velocity and not pointsonly:
180 raise AttributeError('velocity is required in time mode')
182 mindeltagf = float(num.min(deltas))
183 if velocity:
184 mindeltagf = min(mindeltagf, deltat * velocity)
186 ln = length
187 wd = width
189 if aggressive_oversampling:
190 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
191 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
192 else:
193 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1
194 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1
196 n = int(nl * nw)
198 dl = ln / nl
199 dw = wd / nw
201 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl)
202 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw)
204 points = num.zeros((n, 3), dtype=num.float)
205 points[:, 0] = num.tile(xl, nw)
206 points[:, 1] = num.repeat(xw, nl)
208 if nucleation_x is not None:
209 dist_x = num.abs(nucleation_x - points[:, 0])
210 else:
211 dist_x = num.zeros(n)
213 if nucleation_y is not None:
214 dist_y = num.abs(nucleation_y - points[:, 1])
215 else:
216 dist_y = num.zeros(n)
218 dist = num.sqrt(dist_x**2 + dist_y**2)
219 times = dist / velocity
221 anch_x, anch_y = map_anchor[anchor]
223 points[:, 0] -= anch_x * 0.5 * length
224 points[:, 1] -= anch_y * 0.5 * width
226 if plane_coords:
227 return points, dl, dw, nl, nw
229 rotmat = num.asarray(
230 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
231 points = num.dot(rotmat.T, points.T).T
233 points[:, 0] += north
234 points[:, 1] += east
235 points[:, 2] += depth
237 if pointsonly:
238 return points, dl, dw, nl, nw
240 xtau, amplitudes = stf.discretize_t(deltat, time)
241 nt = xtau.size
243 points2 = num.repeat(points, nt, axis=0)
244 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel()
245 amplitudes2 = num.tile(amplitudes, n)
247 return points2, times2, amplitudes2, dl, dw, nl, nw
250def check_rect_source_discretisation(points2, nl, nw, store):
251 # We assume a non-rotated fault plane
252 N_CRITICAL = 8
253 points = points2.T.reshape((3, nl, nw))
254 if points.size <= N_CRITICAL:
255 logger.warning('RectangularSource is defined by only %d sub-sources!'
256 % points.size)
257 return True
259 distances = num.sqrt(
260 (points[0, 0, :] - points[0, 1, :])**2 +
261 (points[1, 0, :] - points[1, 1, :])**2 +
262 (points[2, 0, :] - points[2, 1, :])**2)
264 depths = points[2, 0, :]
265 vs_profile = store.config.get_vs(
266 lat=0., lon=0.,
267 points=num.repeat(depths[:, num.newaxis], 3, axis=1),
268 interpolation='multilinear')
270 min_wavelength = vs_profile * (store.config.deltat * 2)
271 if not num.all(min_wavelength > distances / 2):
272 return False
273 return True
276def outline_rect_source(strike, dip, length, width, anchor):
277 ln = length
278 wd = width
279 points = num.array(
280 [[-0.5 * ln, -0.5 * wd, 0.],
281 [0.5 * ln, -0.5 * wd, 0.],
282 [0.5 * ln, 0.5 * wd, 0.],
283 [-0.5 * ln, 0.5 * wd, 0.],
284 [-0.5 * ln, -0.5 * wd, 0.]])
286 anch_x, anch_y = map_anchor[anchor]
287 points[:, 0] -= anch_x * 0.5 * length
288 points[:, 1] -= anch_y * 0.5 * width
290 rotmat = num.asarray(
291 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
293 return num.dot(rotmat.T, points.T).T
296def from_plane_coords(
297 strike, dip, length, width, depth, x_plane_coords, y_plane_coords,
298 lat=0., lon=0.,
299 north_shift=0, east_shift=0,
300 anchor='top', cs='xy'):
302 ln = length
303 wd = width
304 x_abs = []
305 y_abs = []
306 if not isinstance(x_plane_coords, list):
307 x_plane_coords = [x_plane_coords]
308 y_plane_coords = [y_plane_coords]
310 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords):
311 points = num.array(
312 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
313 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.],
314 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
315 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.],
316 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]])
318 anch_x, anch_y = map_anchor[anchor]
319 points[:, 0] -= anch_x * 0.5 * length
320 points[:, 1] -= anch_y * 0.5 * width
322 rotmat = num.asarray(
323 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
325 points = num.dot(rotmat.T, points.T).T
326 points[:, 0] += north_shift
327 points[:, 1] += east_shift
328 points[:, 2] += depth
329 if cs in ('latlon', 'lonlat'):
330 latlon = ne_to_latlon(lat, lon,
331 points[:, 0], points[:, 1])
332 latlon = num.array(latlon).T
333 x_abs.append(latlon[1:2, 1])
334 y_abs.append(latlon[2:3, 0])
335 if cs == 'xy':
336 x_abs.append(points[1:2, 1])
337 y_abs.append(points[2:3, 0])
339 if cs == 'lonlat':
340 return y_abs, x_abs
341 else:
342 return x_abs, y_abs
345def points_on_rect_source(
346 strike, dip, length, width, anchor,
347 discretized_basesource=None, points_x=None, points_y=None):
349 ln = length
350 wd = width
352 if isinstance(points_x, list) or isinstance(points_x, float):
353 points_x = num.array([points_x])
354 if isinstance(points_y, list) or isinstance(points_y, float):
355 points_y = num.array([points_y])
357 if discretized_basesource:
358 ds = discretized_basesource
360 nl_patches = ds.nl + 1
361 nw_patches = ds.nw + 1
363 npoints = nl_patches * nw_patches
364 points = num.zeros((npoints, 3))
365 ln_patches = num.array([il for il in range(nl_patches)])
366 wd_patches = num.array([iw for iw in range(nw_patches)])
368 points_ln =\
369 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1
370 points_wd =\
371 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1
373 for il in range(nl_patches):
374 for iw in range(nw_patches):
375 points[il * nw_patches + iw, :] = num.array([
376 points_ln[il] * ln * 0.5,
377 points_wd[iw] * wd * 0.5, 0.0])
379 elif points_x.any() and points_y.any():
380 points = num.zeros(shape=((len(points_x), 3)))
381 for i, (x, y) in enumerate(zip(points_x, points_y)):
382 points[i, :] = num.array(
383 [x * 0.5 * ln, y * 0.5 * wd, 0.0])
385 anch_x, anch_y = map_anchor[anchor]
387 points[:, 0] -= anch_x * 0.5 * ln
388 points[:, 1] -= anch_y * 0.5 * wd
390 rotmat = num.asarray(
391 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
393 return num.dot(rotmat.T, points.T).T
396class InvalidGridDef(Exception):
397 pass
400class Range(SObject):
401 '''
402 Convenient range specification.
404 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
406 Range('0 .. 10k : 1k')
407 Range(start=0., stop=10e3, step=1e3)
408 Range(0, 10e3, 1e3)
409 Range('0 .. 10k @ 11')
410 Range(start=0., stop=10*km, n=11)
412 Range(0, 10e3, n=11)
413 Range(values=[x*1e3 for x in range(11)])
415 Depending on the use context, it can be possible to omit any part of the
416 specification. E.g. in the context of extracting a subset of an already
417 existing range, the existing range's specification values would be filled
418 in where missing.
420 The values are distributed with equal spacing, unless the ``spacing``
421 argument is modified. The values can be created offset or relative to an
422 external base value with the ``relative`` argument if the use context
423 supports this.
425 The range specification can be expressed with a short string
426 representation::
428 'start .. stop @ num | spacing, relative'
429 'start .. stop : step | spacing, relative'
431 most parts of the expression can be omitted if not needed. Whitespace is
432 allowed for readability but can also be omitted.
433 '''
435 start = Float.T(optional=True)
436 stop = Float.T(optional=True)
437 step = Float.T(optional=True)
438 n = Int.T(optional=True)
439 values = Array.T(optional=True, dtype=float, shape=(None,))
441 spacing = StringChoice.T(
442 choices=['lin', 'log', 'symlog'],
443 default='lin',
444 optional=True)
446 relative = StringChoice.T(
447 choices=['', 'add', 'mult'],
448 default='',
449 optional=True)
451 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?'
452 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?'
453 r'(\|(?P<stuff>.+))?$')
455 def __init__(self, *args, **kwargs):
456 d = {}
457 if len(args) == 1:
458 d = self.parse(args[0])
459 elif len(args) in (2, 3):
460 d['start'], d['stop'] = [float(x) for x in args[:2]]
461 if len(args) == 3:
462 d['step'] = float(args[2])
464 for k, v in kwargs.items():
465 if k in d:
466 raise ArgumentError('%s specified more than once' % k)
468 d[k] = v
470 SObject.__init__(self, **d)
472 def __str__(self):
473 def sfloat(x):
474 if x is not None:
475 return '%g' % x
476 else:
477 return ''
479 if self.values:
480 return ','.join('%g' % x for x in self.values)
482 if self.start is None and self.stop is None:
483 s0 = ''
484 else:
485 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
487 s1 = ''
488 if self.step is not None:
489 s1 = [' : %g', ':%g'][s0 == ''] % self.step
490 elif self.n is not None:
491 s1 = [' @ %i', '@%i'][s0 == ''] % self.n
493 if self.spacing == 'lin' and self.relative == '':
494 s2 = ''
495 else:
496 x = []
497 if self.spacing != 'lin':
498 x.append(self.spacing)
500 if self.relative != '':
501 x.append(self.relative)
503 s2 = ' | %s' % ','.join(x)
505 return s0 + s1 + s2
507 @classmethod
508 def parse(cls, s):
509 s = re.sub(r'\s+', '', s)
510 m = cls.pattern.match(s)
511 if not m:
512 try:
513 vals = [ufloat(x) for x in s.split(',')]
514 except Exception:
515 raise InvalidGridDef(
516 '"%s" is not a valid range specification' % s)
518 return dict(values=num.array(vals, dtype=float))
520 d = m.groupdict()
521 try:
522 start = ufloat_or_none(d['start'])
523 stop = ufloat_or_none(d['stop'])
524 step = ufloat_or_none(d['step'])
525 n = int_or_none(d['n'])
526 except Exception:
527 raise InvalidGridDef(
528 '"%s" is not a valid range specification' % s)
530 spacing = 'lin'
531 relative = ''
533 if d['stuff'] is not None:
534 t = d['stuff'].split(',')
535 for x in t:
536 if x in cls.spacing.choices:
537 spacing = x
538 elif x and x in cls.relative.choices:
539 relative = x
540 else:
541 raise InvalidGridDef(
542 '"%s" is not a valid range specification' % s)
544 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing,
545 relative=relative)
547 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5):
548 if self.values:
549 return self.values
551 start = self.start
552 stop = self.stop
553 step = self.step
554 n = self.n
556 swap = step is not None and step < 0.
557 if start is None:
558 start = [mi, ma][swap]
559 if stop is None:
560 stop = [ma, mi][swap]
561 if step is None and inc is not None:
562 step = [inc, -inc][ma < mi]
564 if start is None or stop is None:
565 raise InvalidGridDef(
566 'Cannot use range specification "%s" without start '
567 'and stop in this context' % self)
569 if step is None and n is None:
570 step = stop - start
572 if n is None:
573 if (step < 0) != (stop - start < 0):
574 raise InvalidGridDef(
575 'Range specification "%s" has inconsistent ordering '
576 '(step < 0 => stop > start)' % self)
578 n = int(round((stop - start) / step)) + 1
579 stop2 = start + (n - 1) * step
580 if abs(stop - stop2) > eps:
581 n = int(math.floor((stop - start) / step)) + 1
582 stop = start + (n - 1) * step
583 else:
584 stop = stop2
586 if start == stop:
587 n = 1
589 if self.spacing == 'lin':
590 vals = num.linspace(start, stop, n)
592 elif self.spacing in ('log', 'symlog'):
593 if start > 0. and stop > 0.:
594 vals = num.exp(num.linspace(num.log(start),
595 num.log(stop), n))
596 elif start < 0. and stop < 0.:
597 vals = -num.exp(num.linspace(num.log(-start),
598 num.log(-stop), n))
599 else:
600 raise InvalidGridDef(
601 'Log ranges should not include or cross zero '
602 '(in range specification "%s").' % self)
604 if self.spacing == 'symlog':
605 nvals = - vals
606 vals = num.concatenate((nvals[::-1], vals))
608 if self.relative in ('add', 'mult') and base is None:
609 raise InvalidGridDef(
610 'Cannot use relative range specification in this context.')
612 vals = self.make_relative(base, vals)
614 return list(map(float, vals))
616 def make_relative(self, base, vals):
617 if self.relative == 'add':
618 vals += base
620 if self.relative == 'mult':
621 vals *= base
623 return vals
626class GridDefElement(Object):
628 param = meta.StringID.T()
629 rs = Range.T()
631 def __init__(self, shorthand=None, **kwargs):
632 if shorthand is not None:
633 t = shorthand.split('=')
634 if len(t) != 2:
635 raise InvalidGridDef(
636 'Invalid grid specification element: %s' % shorthand)
638 sp, sr = t[0].strip(), t[1].strip()
640 kwargs['param'] = sp
641 kwargs['rs'] = Range(sr)
643 Object.__init__(self, **kwargs)
645 def shorthand(self):
646 return self.param + ' = ' + str(self.rs)
649class GridDef(Object):
651 elements = List.T(GridDefElement.T())
653 def __init__(self, shorthand=None, **kwargs):
654 if shorthand is not None:
655 t = shorthand.splitlines()
656 tt = []
657 for x in t:
658 x = x.strip()
659 if x:
660 tt.extend(x.split(';'))
662 elements = []
663 for se in tt:
664 elements.append(GridDef(se))
666 kwargs['elements'] = elements
668 Object.__init__(self, **kwargs)
670 def shorthand(self):
671 return '; '.join(str(x) for x in self.elements)
674class Cloneable(object):
676 def __iter__(self):
677 return iter(self.T.propnames)
679 def __getitem__(self, k):
680 if k not in self.keys():
681 raise KeyError(k)
683 return getattr(self, k)
685 def __setitem__(self, k, v):
686 if k not in self.keys():
687 raise KeyError(k)
689 return setattr(self, k, v)
691 def clone(self, **kwargs):
692 '''
693 Make a copy of the object.
695 A new object of the same class is created and initialized with the
696 parameters of the object on which this method is called on. If
697 ``kwargs`` are given, these are used to override any of the
698 initialization parameters.
699 '''
701 d = dict(self)
702 for k in d:
703 v = d[k]
704 if isinstance(v, Cloneable):
705 d[k] = v.clone()
707 d.update(kwargs)
708 return self.__class__(**d)
710 @classmethod
711 def keys(cls):
712 '''
713 Get list of the source model's parameter names.
714 '''
716 return cls.T.propnames
719class STF(Object, Cloneable):
721 '''
722 Base class for source time functions.
723 '''
725 def __init__(self, effective_duration=None, **kwargs):
726 if effective_duration is not None:
727 kwargs['duration'] = effective_duration / \
728 self.factor_duration_to_effective()
730 Object.__init__(self, **kwargs)
732 @classmethod
733 def factor_duration_to_effective(cls):
734 return 1.0
736 def centroid_time(self, tref):
737 return tref
739 @property
740 def effective_duration(self):
741 return self.duration * self.factor_duration_to_effective()
743 def discretize_t(self, deltat, tref):
744 tl = math.floor(tref / deltat) * deltat
745 th = math.ceil(tref / deltat) * deltat
746 if tl == th:
747 return num.array([tl], dtype=float), num.ones(1)
748 else:
749 return (
750 num.array([tl, th], dtype=float),
751 num.array([th - tref, tref - tl], dtype=float) / deltat)
753 def base_key(self):
754 return (type(self).__name__,)
757g_unit_pulse = STF()
760def sshift(times, amplitudes, tshift, deltat):
762 t0 = math.floor(tshift / deltat) * deltat
763 t1 = math.ceil(tshift / deltat) * deltat
764 if t0 == t1:
765 return times, amplitudes
767 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float)
769 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes
770 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes
772 times2 = num.arange(times.size + 1, dtype=float) * \
773 deltat + times[0] + t0
775 return times2, amplitudes2
778class BoxcarSTF(STF):
780 '''
781 Boxcar type source time function.
783 .. figure :: /static/stf-BoxcarSTF.svg
784 :width: 40%
785 :align: center
786 :alt: boxcar source time function
787 '''
789 duration = Float.T(
790 default=0.0,
791 help='duration of the boxcar')
793 anchor = Float.T(
794 default=0.0,
795 help='anchor point with respect to source.time: ('
796 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
797 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
798 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
800 @classmethod
801 def factor_duration_to_effective(cls):
802 return 1.0
804 def centroid_time(self, tref):
805 return tref - 0.5 * self.duration * self.anchor
807 def discretize_t(self, deltat, tref):
808 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
809 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
810 tmin = round(tmin_stf / deltat) * deltat
811 tmax = round(tmax_stf / deltat) * deltat
812 nt = int(round((tmax - tmin) / deltat)) + 1
813 times = num.linspace(tmin, tmax, nt)
814 amplitudes = num.ones_like(times)
815 if times.size > 1:
816 t_edges = num.linspace(
817 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
818 t = tmin_stf + self.duration * num.array(
819 [0.0, 0.0, 1.0, 1.0], dtype=float)
820 f = num.array([0., 1., 1., 0.], dtype=float)
821 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
822 amplitudes /= num.sum(amplitudes)
824 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref))
826 return sshift(times, amplitudes, -tshift, deltat)
828 def base_key(self):
829 return (type(self).__name__, self.duration, self.anchor)
832class TriangularSTF(STF):
834 '''
835 Triangular type source time function.
837 .. figure :: /static/stf-TriangularSTF.svg
838 :width: 40%
839 :align: center
840 :alt: triangular source time function
841 '''
843 duration = Float.T(
844 default=0.0,
845 help='baseline of the triangle')
847 peak_ratio = Float.T(
848 default=0.5,
849 help='fraction of time compared to duration, '
850 'when the maximum amplitude is reached')
852 anchor = Float.T(
853 default=0.0,
854 help='anchor point with respect to source.time: ('
855 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
856 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
857 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
859 @classmethod
860 def factor_duration_to_effective(cls, peak_ratio=None):
861 if peak_ratio is None:
862 peak_ratio = cls.peak_ratio.default()
864 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0)
866 def __init__(self, effective_duration=None, **kwargs):
867 if effective_duration is not None:
868 kwargs['duration'] = effective_duration / \
869 self.factor_duration_to_effective(
870 kwargs.get('peak_ratio', None))
872 STF.__init__(self, **kwargs)
874 @property
875 def centroid_ratio(self):
876 ra = self.peak_ratio
877 rb = 1.0 - ra
878 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb)
880 def centroid_time(self, tref):
881 ca = self.centroid_ratio
882 cb = 1.0 - ca
883 if self.anchor <= 0.:
884 return tref - ca * self.duration * self.anchor
885 else:
886 return tref - cb * self.duration * self.anchor
888 @property
889 def effective_duration(self):
890 return self.duration * self.factor_duration_to_effective(
891 self.peak_ratio)
893 def tminmax_stf(self, tref):
894 ca = self.centroid_ratio
895 cb = 1.0 - ca
896 if self.anchor <= 0.:
897 tmin_stf = tref - ca * self.duration * (self.anchor + 1.)
898 tmax_stf = tmin_stf + self.duration
899 else:
900 tmax_stf = tref + cb * self.duration * (1. - self.anchor)
901 tmin_stf = tmax_stf - self.duration
903 return tmin_stf, tmax_stf
905 def discretize_t(self, deltat, tref):
906 tmin_stf, tmax_stf = self.tminmax_stf(tref)
908 tmin = round(tmin_stf / deltat) * deltat
909 tmax = round(tmax_stf / deltat) * deltat
910 nt = int(round((tmax - tmin) / deltat)) + 1
911 if nt > 1:
912 t_edges = num.linspace(
913 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)
914 t = tmin_stf + self.duration * num.array(
915 [0.0, self.peak_ratio, 1.0], dtype=float)
916 f = num.array([0., 1., 0.], dtype=float)
917 amplitudes = util.plf_integrate_piecewise(t_edges, t, f)
918 amplitudes /= num.sum(amplitudes)
919 else:
920 amplitudes = num.ones(1)
922 times = num.linspace(tmin, tmax, nt)
923 return times, amplitudes
925 def base_key(self):
926 return (
927 type(self).__name__, self.duration, self.peak_ratio, self.anchor)
930class HalfSinusoidSTF(STF):
932 '''
933 Half sinusoid type source time function.
935 .. figure :: /static/stf-HalfSinusoidSTF.svg
936 :width: 40%
937 :align: center
938 :alt: half-sinusouid source time function
939 '''
941 duration = Float.T(
942 default=0.0,
943 help='duration of the half-sinusoid (baseline)')
945 anchor = Float.T(
946 default=0.0,
947 help='anchor point with respect to source.time: ('
948 '-1.0: left -> source duration [0, T] ~ hypocenter time, '
949 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, '
950 '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
952 exponent = Int.T(
953 default=1,
954 help='set to 2 to use square of the half-period sinusoidal function.')
956 def __init__(self, effective_duration=None, **kwargs):
957 if effective_duration is not None:
958 kwargs['duration'] = effective_duration / \
959 self.factor_duration_to_effective(
960 kwargs.get('exponent', 1))
962 STF.__init__(self, **kwargs)
964 @classmethod
965 def factor_duration_to_effective(cls, exponent):
966 if exponent == 1:
967 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi
968 elif exponent == 2:
969 return math.sqrt(math.pi**2 - 6) / math.pi
970 else:
971 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.')
973 @property
974 def effective_duration(self):
975 return self.duration * self.factor_duration_to_effective(self.exponent)
977 def centroid_time(self, tref):
978 return tref - 0.5 * self.duration * self.anchor
980 def discretize_t(self, deltat, tref):
981 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
982 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
983 tmin = round(tmin_stf / deltat) * deltat
984 tmax = round(tmax_stf / deltat) * deltat
985 nt = int(round((tmax - tmin) / deltat)) + 1
986 if nt > 1:
987 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace(
988 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
990 if self.exponent == 1:
991 fint = -num.cos(
992 (t_edges - tmin_stf) * (math.pi / self.duration))
994 elif self.exponent == 2:
995 fint = (t_edges - tmin_stf) / self.duration \
996 - 1.0 / (2.0 * math.pi) * num.sin(
997 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration))
998 else:
999 raise ValueError(
1000 'Exponent for HalfSinusoidSTF must be 1 or 2.')
1002 amplitudes = fint[1:] - fint[:-1]
1003 amplitudes /= num.sum(amplitudes)
1004 else:
1005 amplitudes = num.ones(1)
1007 times = num.linspace(tmin, tmax, nt)
1008 return times, amplitudes
1010 def base_key(self):
1011 return (type(self).__name__, self.duration, self.anchor)
1014class SmoothRampSTF(STF):
1015 '''
1016 Smooth-ramp type source time function for near-field displacement.
1017 Based on moment function of double-couple point source proposed by Bruestle
1018 and Mueller (PEPI, 1983).
1020 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow
1021 earthquakes from Love-wave modelling for regional distances, PEPI 32,
1022 312-324.
1024 .. figure :: /static/stf-SmoothRampSTF.svg
1025 :width: 40%
1026 :alt: smooth ramp source time function
1027 '''
1028 duration = Float.T(
1029 default=0.0,
1030 help='duration of the ramp (baseline)')
1032 rise_ratio = Float.T(
1033 default=0.5,
1034 help='fraction of time compared to duration, '
1035 'when the maximum amplitude is reached')
1037 anchor = Float.T(
1038 default=0.0,
1039 help='anchor point with respect to source.time: ('
1040 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, '
1041 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, '
1042 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
1044 def discretize_t(self, deltat, tref):
1045 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5
1046 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5
1047 tmin = round(tmin_stf / deltat) * deltat
1048 tmax = round(tmax_stf / deltat) * deltat
1049 D = round((tmax - tmin) / deltat) * deltat
1050 nt = int(round(D / deltat)) + 1
1051 times = num.linspace(tmin, tmax, nt)
1052 if nt > 1:
1053 rise_time = self.rise_ratio * self.duration
1054 amplitudes = num.ones_like(times)
1055 tp = tmin + rise_time
1056 ii = num.where(times <= tp)
1057 t_inc = times[ii]
1058 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time)
1059 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0
1060 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
1062 amplitudes /= num.sum(amplitudes)
1063 else:
1064 amplitudes = num.ones(1)
1066 return times, amplitudes
1068 def base_key(self):
1069 return (type(self).__name__,
1070 self.duration, self.rise_ratio, self.anchor)
1073class ResonatorSTF(STF):
1074 '''
1075 Simple resonator like source time function.
1077 .. math ::
1079 f(t) = 0 for t < 0
1080 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
1083 .. figure :: /static/stf-SmoothRampSTF.svg
1084 :width: 40%
1085 :alt: smooth ramp source time function
1087 '''
1089 duration = Float.T(
1090 default=0.0,
1091 help='decay time')
1093 frequency = Float.T(
1094 default=1.0,
1095 help='resonance frequency')
1097 def discretize_t(self, deltat, tref):
1098 tmin_stf = tref
1099 tmax_stf = tref + self.duration * 3
1100 tmin = math.floor(tmin_stf / deltat) * deltat
1101 tmax = math.ceil(tmax_stf / deltat) * deltat
1102 times = util.arange2(tmin, tmax, deltat)
1103 amplitudes = num.exp(-(times - tref) / self.duration) \
1104 * num.sin(2.0 * num.pi * self.frequency * (times - tref))
1106 return times, amplitudes
1108 def base_key(self):
1109 return (type(self).__name__,
1110 self.duration, self.frequency)
1113class STFMode(StringChoice):
1114 choices = ['pre', 'post']
1117class Source(Location, Cloneable):
1118 '''
1119 Base class for all source models.
1120 '''
1122 name = String.T(optional=True, default='')
1124 time = Timestamp.T(
1125 default=Timestamp.D('1970-01-01 00:00:00'),
1126 help='source origin time.')
1128 stf = STF.T(
1129 optional=True,
1130 help='source time function.')
1132 stf_mode = STFMode.T(
1133 default='post',
1134 help='whether to apply source time function in pre or '
1135 'post-processing.')
1137 def __init__(self, **kwargs):
1138 Location.__init__(self, **kwargs)
1140 def update(self, **kwargs):
1141 '''
1142 Change some of the source models parameters.
1144 Example::
1146 >>> from pyrocko import gf
1147 >>> s = gf.DCSource()
1148 >>> s.update(strike=66., dip=33.)
1149 >>> print(s)
1150 --- !pf.DCSource
1151 depth: 0.0
1152 time: 1970-01-01 00:00:00
1153 magnitude: 6.0
1154 strike: 66.0
1155 dip: 33.0
1156 rake: 0.0
1158 '''
1160 for (k, v) in kwargs.items():
1161 self[k] = v
1163 def grid(self, **variables):
1164 '''
1165 Create grid of source model variations.
1167 :returns: :py:class:`SourceGrid` instance.
1169 Example::
1171 >>> from pyrocko import gf
1172 >>> base = DCSource()
1173 >>> R = gf.Range
1174 >>> for s in base.grid(R('
1176 '''
1177 return SourceGrid(base=self, variables=variables)
1179 def base_key(self):
1180 '''
1181 Get key to decide about source discretization / GF stack sharing.
1183 When two source models differ only in amplitude and origin time, the
1184 discretization and the GF stacking can be done only once for a unit
1185 amplitude and a zero origin time and the amplitude and origin times of
1186 the seismograms can be applied during post-processing of the synthetic
1187 seismogram.
1189 For any derived parameterized source model, this method is called to
1190 decide if discretization and stacking of the source should be shared.
1191 When two source models return an equal vector of values discretization
1192 is shared.
1193 '''
1194 return (self.depth, self.lat, self.north_shift,
1195 self.lon, self.east_shift, self.time, type(self).__name__) + \
1196 self.effective_stf_pre().base_key()
1198 def get_factor(self):
1199 '''
1200 Get the scaling factor to be applied during post-processing.
1202 Discretization of the base seismogram is usually done for a unit
1203 amplitude, because a common factor can be efficiently multiplied to
1204 final seismograms. This eliminates to do repeat the stacking when
1205 creating seismograms for a series of source models only differing in
1206 amplitude.
1208 This method should return the scaling factor to apply in the
1209 post-processing (often this is simply the scalar moment of the source).
1210 '''
1212 return 1.0
1214 def effective_stf_pre(self):
1215 '''
1216 Return the STF applied before stacking of the Green's functions.
1218 This STF is used during discretization of the parameterized source
1219 models, i.e. to produce a temporal distribution of point sources.
1221 Handling of the STF before stacking of the GFs is less efficient but
1222 allows to use different source time functions for different parts of
1223 the source.
1224 '''
1226 if self.stf is not None and self.stf_mode == 'pre':
1227 return self.stf
1228 else:
1229 return g_unit_pulse
1231 def effective_stf_post(self):
1232 '''
1233 Return the STF applied after stacking of the Green's fuctions.
1235 This STF is used in the post-processing of the synthetic seismograms.
1237 Handling of the STF after stacking of the GFs is usually more efficient
1238 but is only possible when a common STF is used for all subsources.
1239 '''
1241 if self.stf is not None and self.stf_mode == 'post':
1242 return self.stf
1243 else:
1244 return g_unit_pulse
1246 def _dparams_base(self):
1247 return dict(times=arr(self.time),
1248 lat=self.lat, lon=self.lon,
1249 north_shifts=arr(self.north_shift),
1250 east_shifts=arr(self.east_shift),
1251 depths=arr(self.depth))
1253 def _hash(self):
1254 sha = sha1()
1255 for k in self.base_key():
1256 sha.update(str(k).encode())
1257 return sha.hexdigest()
1259 def _dparams_base_repeated(self, times):
1260 if times is None:
1261 return self._dparams_base()
1263 nt = times.size
1264 north_shifts = num.repeat(self.north_shift, nt)
1265 east_shifts = num.repeat(self.east_shift, nt)
1266 depths = num.repeat(self.depth, nt)
1267 return dict(times=times,
1268 lat=self.lat, lon=self.lon,
1269 north_shifts=north_shifts,
1270 east_shifts=east_shifts,
1271 depths=depths)
1273 def pyrocko_event(self, store=None, target=None, **kwargs):
1274 duration = None
1275 if self.stf:
1276 duration = self.stf.effective_duration
1278 return model.Event(
1279 lat=self.lat,
1280 lon=self.lon,
1281 north_shift=self.north_shift,
1282 east_shift=self.east_shift,
1283 time=self.time,
1284 name=self.name,
1285 depth=self.depth,
1286 duration=duration,
1287 **kwargs)
1289 def outline(self, cs='xyz'):
1290 points = num.atleast_2d(num.zeros([1, 3]))
1292 points[:, 0] += self.north_shift
1293 points[:, 1] += self.east_shift
1294 points[:, 2] += self.depth
1295 if cs == 'xyz':
1296 return points
1297 elif cs == 'xy':
1298 return points[:, :2]
1299 elif cs in ('latlon', 'lonlat'):
1300 latlon = ne_to_latlon(
1301 self.lat, self.lon, points[:, 0], points[:, 1])
1303 latlon = num.array(latlon).T
1304 if cs == 'latlon':
1305 return latlon
1306 else:
1307 return latlon[:, ::-1]
1309 @classmethod
1310 def from_pyrocko_event(cls, ev, **kwargs):
1311 if ev.depth is None:
1312 raise ConversionError(
1313 'Cannot convert event object to source object: '
1314 'no depth information available')
1316 stf = None
1317 if ev.duration is not None:
1318 stf = HalfSinusoidSTF(effective_duration=ev.duration)
1320 d = dict(
1321 name=ev.name,
1322 time=ev.time,
1323 lat=ev.lat,
1324 lon=ev.lon,
1325 north_shift=ev.north_shift,
1326 east_shift=ev.east_shift,
1327 depth=ev.depth,
1328 stf=stf)
1329 d.update(kwargs)
1330 return cls(**d)
1332 def get_magnitude(self):
1333 raise NotImplementedError(
1334 '%s does not implement get_magnitude()'
1335 % self.__class__.__name__)
1338class SourceWithMagnitude(Source):
1339 '''
1340 Base class for sources containing a moment magnitude.
1341 '''
1343 magnitude = Float.T(
1344 default=6.0,
1345 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1347 def __init__(self, **kwargs):
1348 if 'moment' in kwargs:
1349 mom = kwargs.pop('moment')
1350 if 'magnitude' not in kwargs:
1351 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1353 Source.__init__(self, **kwargs)
1355 @property
1356 def moment(self):
1357 return float(pmt.magnitude_to_moment(self.magnitude))
1359 @moment.setter
1360 def moment(self, value):
1361 self.magnitude = float(pmt.moment_to_magnitude(value))
1363 def pyrocko_event(self, store=None, target=None, **kwargs):
1364 return Source.pyrocko_event(
1365 self, store, target,
1366 magnitude=self.magnitude,
1367 **kwargs)
1369 @classmethod
1370 def from_pyrocko_event(cls, ev, **kwargs):
1371 d = {}
1372 if ev.magnitude:
1373 d.update(magnitude=ev.magnitude)
1375 d.update(kwargs)
1376 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d)
1378 def get_magnitude(self):
1379 return self.magnitude
1382class DerivedMagnitudeError(ValidationError):
1383 pass
1386class SourceWithDerivedMagnitude(Source):
1388 class __T(Source.T):
1390 def validate_extra(self, val):
1391 Source.T.validate_extra(self, val)
1392 val.check_conflicts()
1394 def check_conflicts(self):
1395 '''
1396 Check for parameter conflicts.
1398 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError`
1399 on conflicts.
1400 '''
1401 pass
1403 def get_magnitude(self, store=None, target=None):
1404 raise DerivedMagnitudeError('No magnitude set.')
1406 def get_moment(self, store=None, target=None):
1407 return float(pmt.magnitude_to_moment(
1408 self.get_magnitude(store, target)))
1410 def pyrocko_moment_tensor(self, store=None, target=None):
1411 raise NotImplementedError(
1412 '%s does not implement pyrocko_moment_tensor()'
1413 % self.__class__.__name__)
1415 def pyrocko_event(self, store=None, target=None, **kwargs):
1416 try:
1417 mt = self.pyrocko_moment_tensor(store, target)
1418 magnitude = self.get_magnitude()
1419 except (DerivedMagnitudeError, NotImplementedError):
1420 mt = None
1421 magnitude = None
1423 return Source.pyrocko_event(
1424 self, store, target,
1425 moment_tensor=mt,
1426 magnitude=magnitude,
1427 **kwargs)
1430class ExplosionSource(SourceWithDerivedMagnitude):
1431 '''
1432 An isotropic explosion point source.
1433 '''
1435 magnitude = Float.T(
1436 optional=True,
1437 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
1439 volume_change = Float.T(
1440 optional=True,
1441 help='volume change of the explosion/implosion or '
1442 'the contracting/extending magmatic source. [m^3]')
1444 discretized_source_class = meta.DiscretizedExplosionSource
1446 def __init__(self, **kwargs):
1447 if 'moment' in kwargs:
1448 mom = kwargs.pop('moment')
1449 if 'magnitude' not in kwargs:
1450 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
1452 SourceWithDerivedMagnitude.__init__(self, **kwargs)
1454 def base_key(self):
1455 return SourceWithDerivedMagnitude.base_key(self) + \
1456 (self.volume_change,)
1458 def check_conflicts(self):
1459 if self.magnitude is not None and self.volume_change is not None:
1460 raise DerivedMagnitudeError(
1461 'Magnitude and volume_change are both defined.')
1463 def get_magnitude(self, store=None, target=None):
1464 self.check_conflicts()
1466 if self.magnitude is not None:
1467 return self.magnitude
1469 elif self.volume_change is not None:
1470 moment = self.volume_change * \
1471 self.get_moment_to_volume_change_ratio(store, target)
1473 return float(pmt.moment_to_magnitude(abs(moment)))
1474 else:
1475 return float(pmt.moment_to_magnitude(1.0))
1477 def get_volume_change(self, store=None, target=None):
1478 self.check_conflicts()
1480 if self.volume_change is not None:
1481 return self.volume_change
1483 elif self.magnitude is not None:
1484 moment = float(pmt.magnitude_to_moment(self.magnitude))
1485 return moment / self.get_moment_to_volume_change_ratio(
1486 store, target)
1488 else:
1489 return 1.0 / self.get_moment_to_volume_change_ratio(store)
1491 def get_moment_to_volume_change_ratio(self, store, target=None):
1492 if store is None:
1493 raise DerivedMagnitudeError(
1494 'Need earth model to convert between volume change and '
1495 'magnitude.')
1497 points = num.array(
1498 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1500 interpolation = target.interpolation if target else 'multilinear'
1501 try:
1502 shear_moduli = store.config.get_shear_moduli(
1503 self.lat, self.lon,
1504 points=points,
1505 interpolation=interpolation)[0]
1506 except meta.OutOfBounds:
1507 raise DerivedMagnitudeError(
1508 'Could not get shear modulus at source position.')
1510 return float(3. * shear_moduli)
1512 def get_factor(self):
1513 return 1.0
1515 def discretize_basesource(self, store, target=None):
1516 times, amplitudes = self.effective_stf_pre().discretize_t(
1517 store.config.deltat, self.time)
1519 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.)
1521 if self.volume_change is not None:
1522 if self.volume_change < 0.:
1523 amplitudes *= -1
1525 return meta.DiscretizedExplosionSource(
1526 m0s=amplitudes,
1527 **self._dparams_base_repeated(times))
1529 def pyrocko_moment_tensor(self, store=None, target=None):
1530 a = self.get_moment(store, target) * math.sqrt(2. / 3.)
1531 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.))
1534class RectangularExplosionSource(ExplosionSource):
1535 '''
1536 Rectangular or line explosion source.
1537 '''
1539 discretized_source_class = meta.DiscretizedExplosionSource
1541 strike = Float.T(
1542 default=0.0,
1543 help='strike direction in [deg], measured clockwise from north')
1545 dip = Float.T(
1546 default=90.0,
1547 help='dip angle in [deg], measured downward from horizontal')
1549 length = Float.T(
1550 default=0.,
1551 help='length of rectangular source area [m]')
1553 width = Float.T(
1554 default=0.,
1555 help='width of rectangular source area [m]')
1557 anchor = StringChoice.T(
1558 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
1559 'bottom_left', 'bottom_right'],
1560 default='center',
1561 optional=True,
1562 help='Anchor point for positioning the plane, can be: top, center or'
1563 'bottom and also top_left, top_right,bottom_left,'
1564 'bottom_right, center_left and center right')
1566 nucleation_x = Float.T(
1567 optional=True,
1568 help='horizontal position of rupture nucleation in normalized fault '
1569 'plane coordinates (-1 = left edge, +1 = right edge)')
1571 nucleation_y = Float.T(
1572 optional=True,
1573 help='down-dip position of rupture nucleation in normalized fault '
1574 'plane coordinates (-1 = upper edge, +1 = lower edge)')
1576 velocity = Float.T(
1577 default=3500.,
1578 help='speed of explosion front [m/s]')
1580 aggressive_oversampling = Bool.T(
1581 default=False,
1582 help='Aggressive oversampling for basesource discretization. '
1583 'When using \'multilinear\' interpolation oversampling has'
1584 ' practically no effect.')
1586 def base_key(self):
1587 return Source.base_key(self) + (self.strike, self.dip, self.length,
1588 self.width, self.nucleation_x,
1589 self.nucleation_y, self.velocity,
1590 self.anchor)
1592 def discretize_basesource(self, store, target=None):
1594 if self.nucleation_x is not None:
1595 nucx = self.nucleation_x * 0.5 * self.length
1596 else:
1597 nucx = None
1599 if self.nucleation_y is not None:
1600 nucy = self.nucleation_y * 0.5 * self.width
1601 else:
1602 nucy = None
1604 stf = self.effective_stf_pre()
1606 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
1607 store.config.deltas, store.config.deltat,
1608 self.time, self.north_shift, self.east_shift, self.depth,
1609 self.strike, self.dip, self.length, self.width, self.anchor,
1610 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
1612 amplitudes /= num.sum(amplitudes)
1613 amplitudes *= self.get_moment(store, target)
1615 return meta.DiscretizedExplosionSource(
1616 lat=self.lat,
1617 lon=self.lon,
1618 times=times,
1619 north_shifts=points[:, 0],
1620 east_shifts=points[:, 1],
1621 depths=points[:, 2],
1622 m0s=amplitudes)
1624 def outline(self, cs='xyz'):
1625 points = outline_rect_source(self.strike, self.dip, self.length,
1626 self.width, self.anchor)
1628 points[:, 0] += self.north_shift
1629 points[:, 1] += self.east_shift
1630 points[:, 2] += self.depth
1631 if cs == 'xyz':
1632 return points
1633 elif cs == 'xy':
1634 return points[:, :2]
1635 elif cs in ('latlon', 'lonlat'):
1636 latlon = ne_to_latlon(
1637 self.lat, self.lon, points[:, 0], points[:, 1])
1639 latlon = num.array(latlon).T
1640 if cs == 'latlon':
1641 return latlon
1642 else:
1643 return latlon[:, ::-1]
1645 def get_nucleation_abs_coord(self, cs='xy'):
1647 if self.nucleation_x is None:
1648 return None, None
1650 coords = from_plane_coords(self.strike, self.dip, self.length,
1651 self.width, self.depth, self.nucleation_x,
1652 self.nucleation_y, lat=self.lat,
1653 lon=self.lon, north_shift=self.north_shift,
1654 east_shift=self.east_shift, cs=cs)
1655 return coords
1658class DCSource(SourceWithMagnitude):
1659 '''
1660 A double-couple point source.
1661 '''
1663 strike = Float.T(
1664 default=0.0,
1665 help='strike direction in [deg], measured clockwise from north')
1667 dip = Float.T(
1668 default=90.0,
1669 help='dip angle in [deg], measured downward from horizontal')
1671 rake = Float.T(
1672 default=0.0,
1673 help='rake angle in [deg], '
1674 'measured counter-clockwise from right-horizontal '
1675 'in on-plane view')
1677 discretized_source_class = meta.DiscretizedMTSource
1679 def base_key(self):
1680 return Source.base_key(self) + (self.strike, self.dip, self.rake)
1682 def get_factor(self):
1683 return float(pmt.magnitude_to_moment(self.magnitude))
1685 def discretize_basesource(self, store, target=None):
1686 mot = pmt.MomentTensor(
1687 strike=self.strike, dip=self.dip, rake=self.rake)
1689 times, amplitudes = self.effective_stf_pre().discretize_t(
1690 store.config.deltat, self.time)
1691 return meta.DiscretizedMTSource(
1692 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis],
1693 **self._dparams_base_repeated(times))
1695 def pyrocko_moment_tensor(self, store=None, target=None):
1696 return pmt.MomentTensor(
1697 strike=self.strike,
1698 dip=self.dip,
1699 rake=self.rake,
1700 scalar_moment=self.moment)
1702 def pyrocko_event(self, store=None, target=None, **kwargs):
1703 return SourceWithMagnitude.pyrocko_event(
1704 self, store, target,
1705 moment_tensor=self.pyrocko_moment_tensor(store, target),
1706 **kwargs)
1708 @classmethod
1709 def from_pyrocko_event(cls, ev, **kwargs):
1710 d = {}
1711 mt = ev.moment_tensor
1712 if mt:
1713 (strike, dip, rake), _ = mt.both_strike_dip_rake()
1714 d.update(
1715 strike=float(strike),
1716 dip=float(dip),
1717 rake=float(rake),
1718 magnitude=float(mt.moment_magnitude()))
1720 d.update(kwargs)
1721 return super(DCSource, cls).from_pyrocko_event(ev, **d)
1724class CLVDSource(SourceWithMagnitude):
1725 '''
1726 A pure CLVD point source.
1727 '''
1729 discretized_source_class = meta.DiscretizedMTSource
1731 azimuth = Float.T(
1732 default=0.0,
1733 help='azimuth direction of largest dipole, clockwise from north [deg]')
1735 dip = Float.T(
1736 default=90.,
1737 help='dip direction of largest dipole, downward from horizontal [deg]')
1739 def base_key(self):
1740 return Source.base_key(self) + (self.azimuth, self.dip)
1742 def get_factor(self):
1743 return float(pmt.magnitude_to_moment(self.magnitude))
1745 @property
1746 def m6(self):
1747 a = math.sqrt(4. / 3.) * self.get_factor()
1748 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1749 rotmat1 = pmt.euler_to_matrix(
1750 d2r * (self.dip - 90.),
1751 d2r * (self.azimuth - 90.),
1752 0.)
1753 m = rotmat1.T * m * rotmat1
1754 return pmt.to6(m)
1756 @property
1757 def m6_astuple(self):
1758 return tuple(self.m6.tolist())
1760 def discretize_basesource(self, store, target=None):
1761 factor = self.get_factor()
1762 times, amplitudes = self.effective_stf_pre().discretize_t(
1763 store.config.deltat, self.time)
1764 return meta.DiscretizedMTSource(
1765 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor,
1766 **self._dparams_base_repeated(times))
1768 def pyrocko_moment_tensor(self, store=None, target=None):
1769 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1771 def pyrocko_event(self, store=None, target=None, **kwargs):
1772 mt = self.pyrocko_moment_tensor(store, target)
1773 return Source.pyrocko_event(
1774 self, store, target,
1775 moment_tensor=self.pyrocko_moment_tensor(store, target),
1776 magnitude=float(mt.moment_magnitude()),
1777 **kwargs)
1780class VLVDSource(SourceWithDerivedMagnitude):
1781 '''
1782 Volumetric linear vector dipole source.
1784 This source is a parameterization for a restricted moment tensor point
1785 source, useful to represent dyke or sill like inflation or deflation
1786 sources. The restriction is such that the moment tensor is rotational
1787 symmetric. It can be represented by a superposition of a linear vector
1788 dipole (here we use a CLVD for convenience) and an isotropic component. The
1789 restricted moment tensor has 4 degrees of freedom: 2 independent
1790 eigenvalues and 2 rotation angles orienting the the symmetry axis.
1792 In this parameterization, the isotropic component is controlled by
1793 ``volume_change``. To define the moment tensor, it must be converted to the
1794 scalar moment of the the MT's isotropic component. For the conversion, the
1795 shear modulus at the source's position must be known. This value is
1796 extracted from the earth model defined in the GF store in use.
1798 The CLVD part by controlled by its scalar moment :math:`M_0`:
1799 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a
1800 positiv or negativ CLVD (the sign of the largest eigenvalue).
1801 '''
1803 discretized_source_class = meta.DiscretizedMTSource
1805 azimuth = Float.T(
1806 default=0.0,
1807 help='azimuth direction of symmetry axis, clockwise from north [deg].')
1809 dip = Float.T(
1810 default=90.,
1811 help='dip direction of symmetry axis, downward from horizontal [deg].')
1813 volume_change = Float.T(
1814 default=0.,
1815 help='volume change of the inflation/deflation [m^3].')
1817 clvd_moment = Float.T(
1818 default=0.,
1819 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign '
1820 'controls the sign of the CLVD (the sign of its largest '
1821 'eigenvalue).')
1823 def get_moment_to_volume_change_ratio(self, store, target):
1824 if store is None or target is None:
1825 raise DerivedMagnitudeError(
1826 'Need earth model to convert between volume change and '
1827 'magnitude.')
1829 points = num.array(
1830 [[self.north_shift, self.east_shift, self.depth]], dtype=float)
1832 try:
1833 shear_moduli = store.config.get_shear_moduli(
1834 self.lat, self.lon,
1835 points=points,
1836 interpolation=target.interpolation)[0]
1837 except meta.OutOfBounds:
1838 raise DerivedMagnitudeError(
1839 'Could not get shear modulus at source position.')
1841 return float(3. * shear_moduli)
1843 def base_key(self):
1844 return Source.base_key(self) + \
1845 (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
1847 def get_magnitude(self, store=None, target=None):
1848 mt = self.pyrocko_moment_tensor(store, target)
1849 return float(pmt.moment_to_magnitude(mt.moment))
1851 def get_m6(self, store, target):
1852 a = math.sqrt(4. / 3.) * self.clvd_moment
1853 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.)
1855 rotmat1 = pmt.euler_to_matrix(
1856 d2r * (self.dip - 90.),
1857 d2r * (self.azimuth - 90.),
1858 0.)
1859 m_clvd = rotmat1.T * m_clvd * rotmat1
1861 m_iso = self.volume_change * \
1862 self.get_moment_to_volume_change_ratio(store, target)
1864 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0.,
1865 0., 0.,) * math.sqrt(2. / 3)
1867 m = pmt.to6(m_clvd) + pmt.to6(m_iso)
1868 return m
1870 def get_moment(self, store=None, target=None):
1871 return float(pmt.magnitude_to_moment(
1872 self.get_magnitude(store, target)))
1874 def get_m6_astuple(self, store, target):
1875 m6 = self.get_m6(store, target)
1876 return tuple(m6.tolist())
1878 def discretize_basesource(self, store, target=None):
1879 times, amplitudes = self.effective_stf_pre().discretize_t(
1880 store.config.deltat, self.time)
1882 m6 = self.get_m6(store, target)
1883 m6 *= amplitudes / self.get_factor()
1885 return meta.DiscretizedMTSource(
1886 m6s=m6[num.newaxis, :],
1887 **self._dparams_base_repeated(times))
1889 def pyrocko_moment_tensor(self, store=None, target=None):
1890 m6_astuple = self.get_m6_astuple(store, target)
1891 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
1894class MTSource(Source):
1895 '''
1896 A moment tensor point source.
1897 '''
1899 discretized_source_class = meta.DiscretizedMTSource
1901 mnn = Float.T(
1902 default=1.,
1903 help='north-north component of moment tensor in [Nm]')
1905 mee = Float.T(
1906 default=1.,
1907 help='east-east component of moment tensor in [Nm]')
1909 mdd = Float.T(
1910 default=1.,
1911 help='down-down component of moment tensor in [Nm]')
1913 mne = Float.T(
1914 default=0.,
1915 help='north-east component of moment tensor in [Nm]')
1917 mnd = Float.T(
1918 default=0.,
1919 help='north-down component of moment tensor in [Nm]')
1921 med = Float.T(
1922 default=0.,
1923 help='east-down component of moment tensor in [Nm]')
1925 def __init__(self, **kwargs):
1926 if 'm6' in kwargs:
1927 for (k, v) in zip('mnn mee mdd mne mnd med'.split(),
1928 kwargs.pop('m6')):
1929 kwargs[k] = float(v)
1931 Source.__init__(self, **kwargs)
1933 @property
1934 def m6(self):
1935 return num.array(self.m6_astuple)
1937 @property
1938 def m6_astuple(self):
1939 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med)
1941 @m6.setter
1942 def m6(self, value):
1943 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value
1945 def base_key(self):
1946 return Source.base_key(self) + self.m6_astuple
1948 def discretize_basesource(self, store, target=None):
1949 times, amplitudes = self.effective_stf_pre().discretize_t(
1950 store.config.deltat, self.time)
1951 return meta.DiscretizedMTSource(
1952 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis],
1953 **self._dparams_base_repeated(times))
1955 def get_magnitude(self, store=None, target=None):
1956 m6 = self.m6
1957 return pmt.moment_to_magnitude(
1958 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) /
1959 math.sqrt(2.))
1961 def pyrocko_moment_tensor(self, store=None, target=None):
1962 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple))
1964 def pyrocko_event(self, store=None, target=None, **kwargs):
1965 mt = self.pyrocko_moment_tensor(store, target)
1966 return Source.pyrocko_event(
1967 self, store, target,
1968 moment_tensor=self.pyrocko_moment_tensor(store, target),
1969 magnitude=float(mt.moment_magnitude()),
1970 **kwargs)
1972 @classmethod
1973 def from_pyrocko_event(cls, ev, **kwargs):
1974 d = {}
1975 mt = ev.moment_tensor
1976 if mt:
1977 d.update(m6=tuple(map(float, mt.m6())))
1978 else:
1979 if ev.magnitude is not None:
1980 mom = pmt.magnitude_to_moment(ev.magnitude)
1981 v = math.sqrt(2. / 3.) * mom
1982 d.update(m6=(v, v, v, 0., 0., 0.))
1984 d.update(kwargs)
1985 return super(MTSource, cls).from_pyrocko_event(ev, **d)
1988map_anchor = {
1989 'center': (0.0, 0.0),
1990 'center_left': (-1.0, 0.0),
1991 'center_right': (1.0, 0.0),
1992 'top': (0.0, -1.0),
1993 'top_left': (-1.0, -1.0),
1994 'top_right': (1.0, -1.0),
1995 'bottom': (0.0, 1.0),
1996 'bottom_left': (-1.0, 1.0),
1997 'bottom_right': (1.0, 1.0)}
2000class RectangularSource(SourceWithDerivedMagnitude):
2001 '''
2002 Classical Haskell source model modified for bilateral rupture.
2003 '''
2005 discretized_source_class = meta.DiscretizedMTSource
2007 magnitude = Float.T(
2008 optional=True,
2009 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
2011 strike = Float.T(
2012 default=0.0,
2013 help='strike direction in [deg], measured clockwise from north')
2015 dip = Float.T(
2016 default=90.0,
2017 help='dip angle in [deg], measured downward from horizontal')
2019 rake = Float.T(
2020 default=0.0,
2021 help='rake angle in [deg], '
2022 'measured counter-clockwise from right-horizontal '
2023 'in on-plane view')
2025 length = Float.T(
2026 default=0.,
2027 help='length of rectangular source area [m]')
2029 width = Float.T(
2030 default=0.,
2031 help='width of rectangular source area [m]')
2033 anchor = StringChoice.T(
2034 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2035 'bottom_left', 'bottom_right'],
2036 default='center',
2037 optional=True,
2038 help='Anchor point for positioning the plane, can be: ``top, center '
2039 'bottom, top_left, top_right,bottom_left,'
2040 'bottom_right, center_left, center right``.')
2042 nucleation_x = Float.T(
2043 optional=True,
2044 help='horizontal position of rupture nucleation in normalized fault '
2045 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)')
2047 nucleation_y = Float.T(
2048 optional=True,
2049 help='down-dip position of rupture nucleation in normalized fault '
2050 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)')
2052 velocity = Float.T(
2053 default=3500.,
2054 help='speed of rupture front [m/s]')
2056 slip = Float.T(
2057 optional=True,
2058 help='Slip on the rectangular source area [m]')
2060 opening_fraction = Float.T(
2061 default=0.,
2062 help='Determines fraction of slip related to opening. '
2063 '(``-1``: pure tensile closing, '
2064 '``0``: pure shear, '
2065 '``1``: pure tensile opening)')
2067 decimation_factor = Int.T(
2068 optional=True,
2069 default=1,
2070 help='Sub-source decimation factor, a larger decimation will'
2071 ' make the result inaccurate but shorten the necessary'
2072 ' computation time (use for testing puposes only).')
2074 aggressive_oversampling = Bool.T(
2075 default=False,
2076 help='Aggressive oversampling for basesource discretization. '
2077 'When using \'multilinear\' interpolation oversampling has'
2078 ' practically no effect.')
2080 def __init__(self, **kwargs):
2081 if 'moment' in kwargs:
2082 mom = kwargs.pop('moment')
2083 if 'magnitude' not in kwargs:
2084 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
2086 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2088 def base_key(self):
2089 return SourceWithDerivedMagnitude.base_key(self) + (
2090 self.magnitude,
2091 self.slip,
2092 self.strike,
2093 self.dip,
2094 self.rake,
2095 self.length,
2096 self.width,
2097 self.nucleation_x,
2098 self.nucleation_y,
2099 self.velocity,
2100 self.decimation_factor,
2101 self.anchor)
2103 def check_conflicts(self):
2104 if self.magnitude is not None and self.slip is not None:
2105 raise DerivedMagnitudeError(
2106 'Magnitude and slip are both defined.')
2108 def get_magnitude(self, store=None, target=None):
2109 self.check_conflicts()
2110 if self.magnitude is not None:
2111 return self.magnitude
2113 elif self.slip is not None:
2114 if None in (store, target):
2115 raise DerivedMagnitudeError(
2116 'Magnitude for a rectangular source with slip defined '
2117 'can only be derived when earth model and target '
2118 'interpolation method are available.')
2120 amplitudes = self._discretize(store, target)[2]
2121 if amplitudes.ndim == 2:
2122 # CLVD component has no net moment, leave out
2123 return float(pmt.moment_to_magnitude(
2124 num.sum(num.abs(amplitudes[0:2, :]).sum())))
2125 else:
2126 return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
2128 else:
2129 return float(pmt.moment_to_magnitude(1.0))
2131 def get_factor(self):
2132 return 1.0
2134 def get_slip_tensile(self):
2135 return self.slip * self.opening_fraction
2137 def get_slip_shear(self):
2138 return self.slip - abs(self.get_slip_tensile)
2140 def _discretize(self, store, target):
2141 if self.nucleation_x is not None:
2142 nucx = self.nucleation_x * 0.5 * self.length
2143 else:
2144 nucx = None
2146 if self.nucleation_y is not None:
2147 nucy = self.nucleation_y * 0.5 * self.width
2148 else:
2149 nucy = None
2151 stf = self.effective_stf_pre()
2153 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source(
2154 store.config.deltas, store.config.deltat,
2155 self.time, self.north_shift, self.east_shift, self.depth,
2156 self.strike, self.dip, self.length, self.width, self.anchor,
2157 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy,
2158 decimation_factor=self.decimation_factor,
2159 aggressive_oversampling=self.aggressive_oversampling)
2161 if self.slip is not None:
2162 if target is not None:
2163 interpolation = target.interpolation
2164 else:
2165 interpolation = 'nearest_neighbor'
2166 logger.warn(
2167 'no target information available, will use '
2168 '"nearest_neighbor" interpolation when extracting shear '
2169 'modulus from earth model')
2171 shear_moduli = store.config.get_shear_moduli(
2172 self.lat, self.lon,
2173 points=points,
2174 interpolation=interpolation)
2176 tensile_slip = self.get_slip_tensile()
2177 shear_slip = self.slip - abs(tensile_slip)
2179 amplitudes_total = [shear_moduli * shear_slip]
2180 if tensile_slip != 0:
2181 bulk_moduli = store.config.get_bulk_moduli(
2182 self.lat, self.lon,
2183 points=points,
2184 interpolation=interpolation)
2186 tensile_iso = bulk_moduli * tensile_slip
2187 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip
2189 amplitudes_total.extend([tensile_iso, tensile_clvd])
2191 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \
2192 amplitudes * dl * dw
2194 else:
2195 # normalization to retain total moment
2196 amplitudes_norm = amplitudes / num.sum(amplitudes)
2197 moment = self.get_moment(store, target)
2199 amplitudes_total = [
2200 amplitudes_norm * moment * (1 - abs(self.opening_fraction))]
2201 if self.opening_fraction != 0.:
2202 amplitudes_total.append(
2203 amplitudes_norm * self.opening_fraction * moment)
2205 amplitudes_total = num.vstack(amplitudes_total).squeeze()
2207 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw
2209 def discretize_basesource(self, store, target=None):
2211 points, times, amplitudes, dl, dw, nl, nw = self._discretize(
2212 store, target)
2214 mot = pmt.MomentTensor(
2215 strike=self.strike, dip=self.dip, rake=self.rake)
2216 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0)
2218 if amplitudes.ndim == 1:
2219 m6s[:, :] *= amplitudes[:, num.newaxis]
2220 elif amplitudes.ndim == 2:
2221 # shear MT components
2222 rotmat1 = pmt.euler_to_matrix(
2223 d2r * self.dip, d2r * self.strike, d2r * -self.rake)
2224 m6s[:, :] *= amplitudes[0, :][:, num.newaxis]
2226 if amplitudes.shape[0] == 2:
2227 # tensile MT components - moment/magnitude input
2228 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.)
2229 rot_tensile = pmt.to6(rotmat1.T * tensile * rotmat1)
2231 m6s_tensile = rot_tensile[
2232 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2233 m6s += m6s_tensile
2235 elif amplitudes.shape[0] == 3:
2236 # tensile MT components - slip input
2237 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.)
2238 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.)
2240 rot_iso = pmt.to6(rotmat1.T * iso * rotmat1)
2241 rot_clvd = pmt.to6(rotmat1.T * clvd * rotmat1)
2243 m6s_iso = rot_iso[
2244 num.newaxis, :] * amplitudes[1, :][:, num.newaxis]
2245 m6s_clvd = rot_clvd[
2246 num.newaxis, :] * amplitudes[2, :][:, num.newaxis]
2247 m6s += m6s_iso + m6s_clvd
2248 else:
2249 raise ValueError('Unknwown amplitudes shape!')
2250 else:
2251 raise ValueError(
2252 'Unexpected dimension of {}'.format(amplitudes.ndim))
2254 ds = meta.DiscretizedMTSource(
2255 lat=self.lat,
2256 lon=self.lon,
2257 times=times,
2258 north_shifts=points[:, 0],
2259 east_shifts=points[:, 1],
2260 depths=points[:, 2],
2261 m6s=m6s,
2262 dl=dl,
2263 dw=dw,
2264 nl=nl,
2265 nw=nw)
2267 return ds
2269 def outline(self, cs='xyz'):
2270 points = outline_rect_source(self.strike, self.dip, self.length,
2271 self.width, self.anchor)
2273 points[:, 0] += self.north_shift
2274 points[:, 1] += self.east_shift
2275 points[:, 2] += self.depth
2276 if cs == 'xyz':
2277 return points
2278 elif cs == 'xy':
2279 return points[:, :2]
2280 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2281 latlon = ne_to_latlon(
2282 self.lat, self.lon, points[:, 0], points[:, 1])
2284 latlon = num.array(latlon).T
2285 if cs == 'latlon':
2286 return latlon
2287 elif cs == 'lonlat':
2288 return latlon[:, ::-1]
2289 else:
2290 return num.concatenate(
2291 (latlon, points[:, 2].reshape((len(points), 1))),
2292 axis=1)
2294 def points_on_source(self, cs='xyz', **kwargs):
2296 points = points_on_rect_source(
2297 self.strike, self.dip, self.length, self.width,
2298 self.anchor, **kwargs)
2300 points[:, 0] += self.north_shift
2301 points[:, 1] += self.east_shift
2302 points[:, 2] += self.depth
2303 if cs == 'xyz':
2304 return points
2305 elif cs == 'xy':
2306 return points[:, :2]
2307 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2308 latlon = ne_to_latlon(
2309 self.lat, self.lon, points[:, 0], points[:, 1])
2311 latlon = num.array(latlon).T
2312 if cs == 'latlon':
2313 return latlon
2314 elif cs == 'lonlat':
2315 return latlon[:, ::-1]
2316 else:
2317 return num.concatenate(
2318 (latlon, points[:, 2].reshape((len(points), 1))),
2319 axis=1)
2321 def get_nucleation_abs_coord(self, cs='xy'):
2323 if self.nucleation_x is None:
2324 return None, None
2326 coords = from_plane_coords(self.strike, self.dip, self.length,
2327 self.width, self.depth, self.nucleation_x,
2328 self.nucleation_y, lat=self.lat,
2329 lon=self.lon, north_shift=self.north_shift,
2330 east_shift=self.east_shift, cs=cs)
2331 return coords
2333 def pyrocko_moment_tensor(self, store=None, target=None):
2334 return pmt.MomentTensor(
2335 strike=self.strike,
2336 dip=self.dip,
2337 rake=self.rake,
2338 scalar_moment=self.get_moment(store, target))
2340 def pyrocko_event(self, store=None, target=None, **kwargs):
2341 return SourceWithDerivedMagnitude.pyrocko_event(
2342 self, store, target,
2343 **kwargs)
2345 @classmethod
2346 def from_pyrocko_event(cls, ev, **kwargs):
2347 d = {}
2348 mt = ev.moment_tensor
2349 if mt:
2350 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2351 d.update(
2352 strike=float(strike),
2353 dip=float(dip),
2354 rake=float(rake),
2355 magnitude=float(mt.moment_magnitude()))
2357 d.update(kwargs)
2358 return super(RectangularSource, cls).from_pyrocko_event(ev, **d)
2361class PseudoDynamicRupture(SourceWithDerivedMagnitude):
2362 '''Merged Eikonal and Okada Source for quasi-dynamic rupture modeling.
2364 Details are described in :doc:`/topics/pseudo-dynamic-rupture`.
2365 Note: attribute `stf` is not used so far, but kept for future applications.
2366 '''
2368 discretized_source_class = meta.DiscretizedMTSource
2370 strike = Float.T(
2371 default=0.0,
2372 help='strike direction in [deg], measured clockwise from north')
2374 dip = Float.T(
2375 default=0.0,
2376 help='dip angle in [deg], measured downward from horizontal')
2378 length = Float.T(
2379 default=10. * km,
2380 help='length of rectangular source area in [m]')
2382 width = Float.T(
2383 default=5. * km,
2384 help='width of rectangular source area in [m]')
2386 anchor = StringChoice.T(
2387 choices=['top', 'top_left', 'top_right', 'center', 'bottom',
2388 'bottom_left', 'bottom_right'],
2389 default='center',
2390 optional=True,
2391 help='anchor point for positioning the plane, can be: ``top, center, '
2392 'bottom, top_left, top_right, bottom_left, '
2393 'bottom_right, center_left, center_right``')
2395 nucleation_x__ = Array.T(
2396 default=num.array([0.]),
2397 dtype=num.float,
2398 serialize_as='list',
2399 help='horizontal position of rupture nucleation in normalized fault '
2400 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)')
2402 nucleation_y__ = Array.T(
2403 default=num.array([0.]),
2404 dtype=num.float,
2405 serialize_as='list',
2406 help='down-dip position of rupture nucleation in normalized fault '
2407 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)')
2409 nucleation_time__ = Array.T(
2410 optional=True,
2411 help='time in [s] after origin, when nucleation points defined by '
2412 '``nucleation_x`` and ``nucleation_y`` rupture.',
2413 dtype=num.float,
2414 serialize_as='list')
2416 gamma = Float.T(
2417 default=0.8,
2418 help='scaling factor between rupture velocity and S-wave velocity: '
2419 r':math:`v_r = \gamma * v_s`')
2421 nx = Int.T(
2422 default=2,
2423 help='number of discrete source patches in x direction (along strike)')
2425 ny = Int.T(
2426 default=2,
2427 help='number of discrete source patches in y direction (down dip)')
2429 slip = Float.T(
2430 optional=True,
2431 help='maximum slip of the rectangular source [m]. '
2432 'Setting the slip the tractions/stress field '
2433 'will be normalized to accomodate the desired maximum slip.')
2435 rake = Float.T(
2436 optional=True,
2437 help='rake angle in [deg], '
2438 'measured counter-clockwise from right-horizontal '
2439 'in on-plane view. Rake is translated into homogenous tractions '
2440 'in strike and up-dip direction. ``rake`` is mutually exclusive '
2441 'with tractions parameter.')
2443 patches = List.T(
2444 OkadaSource.T(),
2445 optional=True,
2446 help='list of all boundary elements/sub faults/fault patches')
2448 patch_mask__ = Array.T(
2449 dtype=num.bool,
2450 serialize_as='list',
2451 shape=(None,),
2452 optional=True,
2453 help='mask for all boundary elements/sub faults/fault patches. True '
2454 'leaves the patch in the calculation, False excludes the patch.')
2456 tractions = TractionField.T(
2457 optional=True,
2458 help='traction field the rupture plane is exposed to. See the'
2459 ':py:mod:`pyrocko.gf.tractions` module for more details. '
2460 'If ``tractions=None`` and ``rake`` is given'
2461 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will'
2462 ' be used.')
2464 coef_mat = Array.T(
2465 optional=True,
2466 help='coefficient matrix linking traction and dislocation field',
2467 dtype=num.float,
2468 shape=(None, None))
2470 eikonal_decimation = Int.T(
2471 optional=True,
2472 default=1,
2473 help='sub-source eikonal factor, a smaller eikonal factor will'
2474 ' increase the accuracy of rupture front calculation but'
2475 ' increases also the computation time.')
2477 decimation_factor = Int.T(
2478 optional=True,
2479 default=1,
2480 help='sub-source decimation factor, a larger decimation will'
2481 ' make the result inaccurate but shorten the necessary'
2482 ' computation time (use for testing puposes only).')
2484 nthreads = Int.T(
2485 optional=True,
2486 default=1,
2487 help='number of threads for Okada forward modelling, '
2488 'matrix inversion and calculation of point subsources. '
2489 'Note: for small/medium matrices 1 thread is most efficient!')
2491 pure_shear = Bool.T(
2492 optional=True,
2493 default=False,
2494 help='calculate only shear tractions and omit tensile tractions.')
2496 smooth_rupture = Bool.T(
2497 default=True,
2498 help='smooth the tractions by weighting partially ruptured'
2499 ' fault patches.')
2501 aggressive_oversampling = Bool.T(
2502 default=False,
2503 help='aggressive oversampling for basesource discretization. '
2504 'When using \'multilinear\' interpolation oversampling has'
2505 ' practically no effect.')
2507 def __init__(self, **kwargs):
2508 SourceWithDerivedMagnitude.__init__(self, **kwargs)
2509 self._interpolators = {}
2510 self.check_conflicts()
2512 @property
2513 def nucleation_x(self):
2514 return self.nucleation_x__
2516 @nucleation_x.setter
2517 def nucleation_x(self, nucleation_x):
2518 if isinstance(nucleation_x, list):
2519 nucleation_x = num.array(nucleation_x)
2521 elif not isinstance(
2522 nucleation_x, num.ndarray) and nucleation_x is not None:
2524 nucleation_x = num.array([nucleation_x])
2525 self.nucleation_x__ = nucleation_x
2527 @property
2528 def nucleation_y(self):
2529 return self.nucleation_y__
2531 @nucleation_y.setter
2532 def nucleation_y(self, nucleation_y):
2533 if isinstance(nucleation_y, list):
2534 nucleation_y = num.array(nucleation_y)
2536 elif not isinstance(nucleation_y, num.ndarray) \
2537 and nucleation_y is not None:
2538 nucleation_y = num.array([nucleation_y])
2540 self.nucleation_y__ = nucleation_y
2542 @property
2543 def nucleation(self):
2544 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2546 if (nucl_x is None) or (nucl_y is None):
2547 return None
2549 assert nucl_x.shape[0] == nucl_y.shape[0]
2551 return num.concatenate(
2552 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1)
2554 @nucleation.setter
2555 def nucleation(self, nucleation):
2556 if isinstance(nucleation, list):
2557 nucleation = num.array(nucleation)
2559 assert nucleation.shape[1] == 2
2561 self.nucleation_x = nucleation[:, 0]
2562 self.nucleation_y = nucleation[:, 1]
2564 @property
2565 def nucleation_time(self):
2566 return self.nucleation_time__
2568 @nucleation_time.setter
2569 def nucleation_time(self, nucleation_time):
2570 if not isinstance(nucleation_time, num.ndarray) \
2571 and nucleation_time is not None:
2572 nucleation_time = num.array([nucleation_time])
2574 self.nucleation_time__ = nucleation_time
2576 @property
2577 def patch_mask(self):
2578 if self.patch_mask__ is not None:
2579 return self.patch_mask__
2580 else:
2581 return num.ones(self.nx * self.ny, dtype=bool)
2583 @patch_mask.setter
2584 def patch_mask(self, patch_mask):
2585 self.patch_mask__ = patch_mask
2587 def get_tractions(self):
2588 '''Return source traction vectors
2590 If :py:attr:`rake` is given, unit length directed traction vectors
2591 (:py:class:`pyrocko.gf.tractions.DirectedTractions`) are returned, else
2592 the given :py:attr:`tractions` are used.
2594 :returns: traction vectors per patch
2595 :rtype: :py:class:`numpy.ndarray` of shape ``(n_patches, 3)``
2596 '''
2598 if self.rake is not None:
2599 if num.isnan(self.rake):
2600 raise ValueError('Rake must be a real number, not NaN.')
2602 logger.warning(
2603 'tractions are derived based on the given source rake')
2604 tractions = DirectedTractions(rake=self.rake)
2605 else:
2606 tractions = self.tractions
2607 return tractions.get_tractions(self.nx, self.ny, self.patches)
2609 def base_key(self):
2610 return SourceWithDerivedMagnitude.base_key(self) + (
2611 self.slip,
2612 self.strike,
2613 self.dip,
2614 self.rake,
2615 self.length,
2616 self.width,
2617 float(self.nucleation_x.mean()),
2618 float(self.nucleation_y.mean()),
2619 self.decimation_factor,
2620 self.anchor,
2621 self.pure_shear,
2622 self.gamma,
2623 tuple(self.patch_mask))
2625 def check_conflicts(self):
2626 if self.tractions and self.rake:
2627 raise AttributeError(
2628 'tractions and rake are mutually exclusive')
2629 if self.tractions is None and self.rake is None:
2630 self.rake = 0.
2632 def get_magnitude(self, store=None, target=None):
2633 self.check_conflicts()
2634 if self.slip is not None or self.tractions is not None:
2635 if store is None:
2636 raise DerivedMagnitudeError(
2637 'magnitude for a rectangular source with slip or '
2638 'tractions defined can only be derived when earth model '
2639 'is set')
2641 moment_rate, calc_times = self.discretize_basesource(
2642 store, target=target).get_moment_rate(store.config.deltat)
2644 deltat = num.concatenate((
2645 (num.diff(calc_times)[0],),
2646 num.diff(calc_times)))
2648 return float(pmt.moment_to_magnitude(
2649 num.sum(moment_rate * deltat)))
2651 else:
2652 return float(pmt.moment_to_magnitude(1.0))
2654 def get_factor(self):
2655 return 1.0
2657 def outline(self, cs='xyz'):
2658 ''' Get source outline corner coordinates
2660 :param cs: output coordinate system. Choose between:
2661 ``xyz`` - north_shift, east_shift, depth in [m],
2662 ``xy`` - north_shift, east_shift in [m],
2663 ``latlon`` - Latitude, Longitude in [deg],
2664 ``lonlat`` - Longitude, Latitude in [deg] or
2665 ``latlondepth`` - Latitude, Longitude in [deg], depth in [m].
2666 :type cs: optional, str
2668 :returns: Corner points in desired coordinate system
2669 :rtype: :py:class:`numpy.ndarray` of shape ``(5, [2, 3])``
2670 '''
2671 points = outline_rect_source(self.strike, self.dip, self.length,
2672 self.width, self.anchor)
2674 points[:, 0] += self.north_shift
2675 points[:, 1] += self.east_shift
2676 points[:, 2] += self.depth
2677 if cs == 'xyz':
2678 return points
2679 elif cs == 'xy':
2680 return points[:, :2]
2681 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2682 latlon = ne_to_latlon(
2683 self.lat, self.lon, points[:, 0], points[:, 1])
2685 latlon = num.array(latlon).T
2686 if cs == 'latlon':
2687 return latlon
2688 elif cs == 'lonlat':
2689 return latlon[:, ::-1]
2690 else:
2691 return num.concatenate(
2692 (latlon, points[:, 2].reshape((len(points), 1))),
2693 axis=1)
2695 def points_on_source(self, cs='xyz', **kwargs):
2696 ''' Convert relative x, y coordinates to geographical coordinates
2698 Given x and y coordinates (relative source coordinates between -1.
2699 and 1.) are converted to desired geographical coordinates. Coordinates
2700 need to be given as :py:class:`numpy.ndarray` arguments ``points_x``
2701 and ``points_y``
2703 :param cs: output coordinate system. Choose between:
2704 ``xyz`` - north_shift, east_shift, depth in [m],
2705 ``xy`` - north_shift, east_shift in [m],
2706 ``latlon`` - Latitude, Longitude in [deg],
2707 ``lonlat`` - Longitude, Latitude in [deg] or
2708 ``latlondepth`` - Latitude, Longitude in [deg], depth in [m].
2709 :type cs: optional, str
2711 :returns: point coordinates in desired coordinate system
2712 :rtype: :py:class:`numpy.ndarray` of shape ``(n_points, [2, 3])``
2713 '''
2714 points = points_on_rect_source(
2715 self.strike, self.dip, self.length, self.width,
2716 self.anchor, **kwargs)
2718 points[:, 0] += self.north_shift
2719 points[:, 1] += self.east_shift
2720 points[:, 2] += self.depth
2721 if cs == 'xyz':
2722 return points
2723 elif cs == 'xy':
2724 return points[:, :2]
2725 elif cs in ('latlon', 'lonlat', 'latlondepth'):
2726 latlon = ne_to_latlon(
2727 self.lat, self.lon, points[:, 0], points[:, 1])
2729 latlon = num.array(latlon).T
2730 if cs == 'latlon':
2731 return latlon
2732 elif cs == 'lonlat':
2733 return latlon[:, ::-1]
2734 else:
2735 return num.concatenate(
2736 (latlon, points[:, 2].reshape((len(points), 1))),
2737 axis=1)
2739 def pyrocko_moment_tensor(self, store=None, target=None):
2740 # TODO: Now this should be slip, then it depends on the store.
2741 # TODO: default to tractions is store is not given?
2742 tractions = self.get_tractions()
2743 tractions = tractions.mean(axis=0)
2744 rake = num.arctan2(tractions[1], tractions[0]) # arctan2(dip, slip)
2746 return pmt.MomentTensor(
2747 strike=self.strike,
2748 dip=self.dip,
2749 rake=rake,
2750 scalar_moment=self.get_moment(store, target))
2752 def pyrocko_event(self, store=None, target=None, **kwargs):
2753 return SourceWithDerivedMagnitude.pyrocko_event(
2754 self, store, target,
2755 **kwargs)
2757 @classmethod
2758 def from_pyrocko_event(cls, ev, **kwargs):
2759 d = {}
2760 mt = ev.moment_tensor
2761 if mt:
2762 (strike, dip, rake), _ = mt.both_strike_dip_rake()
2763 d.update(
2764 strike=float(strike),
2765 dip=float(dip),
2766 rake=float(rake))
2768 d.update(kwargs)
2769 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d)
2771 def _discretize_points(self, store, *args, **kwargs):
2772 '''
2773 Discretize source plane with equal vertical and horizontal spacing
2775 Arguments and keyword arguments needed for :py:meth:`points_on_source`.
2777 :param store: Greens function database (needs to cover whole region of
2778 the source)
2779 :type store: :py:class:`pyrocko.gf.store.Store`
2781 :returns: Number of points in strike and dip direction, distance
2782 between adjacent points, coordinates (latlondepth) and coordinates
2783 (xy on fault) for discrete points
2784 :rtype: int, int, float, :py:class:`numpy.ndarray`,
2785 :py:class:`numpy.ndarray`
2786 '''
2787 anch_x, anch_y = map_anchor[self.anchor]
2789 npoints = int(self.width // km) + 1
2790 points = num.zeros((npoints, 3))
2791 points[:, 1] = num.linspace(-1., 1., npoints)
2792 points[:, 1] = (points[:, 1] - anch_y) * self.width/2
2794 rotmat = num.asarray(
2795 pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0))
2796 points = num.dot(rotmat.T, points.T).T
2797 points[:, 2] += self.depth
2799 vs_min = store.config.get_vs(
2800 self.lat, self.lon, points,
2801 interpolation='nearest_neighbor')
2802 vr_min = max(vs_min.min(), .5*km) * self.gamma
2804 oversampling = 10.
2805 delta_l = self.length / (self.nx * oversampling)
2806 delta_w = self.width / (self.ny * oversampling)
2808 delta = self.eikonal_decimation * num.min([
2809 store.config.deltat * vr_min / oversampling,
2810 delta_l, delta_w] + [
2811 deltas for deltas in store.config.deltas])
2813 delta = delta_w / num.ceil(delta_w / delta)
2815 nx = int(num.ceil(self.length / delta)) + 1
2816 ny = int(num.ceil(self.width / delta)) + 1
2818 rem_l = (nx-1)*delta - self.length
2819 lim_x = rem_l / self.length
2821 points_xy = num.zeros((nx * ny, 2))
2822 points_xy[:, 0] = num.repeat(
2823 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny)
2824 points_xy[:, 1] = num.tile(
2825 num.linspace(-1., 1., ny), nx)
2827 points = self.points_on_source(
2828 points_x=points_xy[:, 0],
2829 points_y=points_xy[:, 1],
2830 **kwargs)
2832 return nx, ny, delta, points, points_xy
2834 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor',
2835 points=None):
2836 '''
2837 Get rupture velocity for discrete points on source plane
2839 :param store: Greens function database (needs to cover whole region of
2840 of the source)
2841 :type store: optional, :py:class:`pyrocko.gf.store.Store`
2842 :param interpolation: Interpolation method to use ("multilinear")
2843 :type interpolation: optional, str
2844 :param points: xy coordinates on fault (-1.:1.) of discrete points
2845 :type points: optional, :py:class:`numpy.ndarray`, ``(n_points, 2)``
2847 :returns: Rupture velocity assumed as :math:`v_s * \\gamma` for
2848 discrete points
2849 :rtype: :py:class:`numpy.ndarray`, ``(points.shape[0], 1)``
2850 '''
2852 if points is None:
2853 _, _, _, points, _ = self._discretize_points(store, cs='xyz')
2855 return store.config.get_vs(
2856 self.lat, self.lon,
2857 points=points,
2858 interpolation=interpolation) * self.gamma
2860 def discretize_time(
2861 self, store, interpolation='nearest_neighbor',
2862 vr=None, times=None, *args, **kwargs):
2863 '''
2864 Get rupture start time for discrete points on source plane
2866 :param store: Greens function database (needs to cover whole region of
2867 of the source)
2868 :type store: :py:class:`pyrocko.gf.store.Store`
2869 :param interpolation: Interpolation method to use (choose between
2870 ``nearest_neighbor`` and ``multilinear``)
2871 :type interpolation: optional, str
2872 :param vr: Array, containing rupture user defined rupture velocity
2873 values
2874 :type vr: optional, :py:class:`numpy.ndarray`
2875 :param times: Array, containing zeros, where rupture is starting, real
2876 positive numbers at later secondary nucleation points and -1, where
2877 time will be calculated. If not given, rupture starts at
2878 nucleation_x, nucleation_y. Times are given for discrete points
2879 with equal horizontal and vertical spacing
2880 :type times: optional, :py:class:`numpy.ndarray`
2882 :returns: Coordinates (latlondepth), Coordinates (xy), rupture velocity
2883 rupture propagation time of discrete points
2884 :rtype: :py:class:`numpy.ndarray`, ``(points.shape[0], 1)``,
2885 :py:class:`numpy.ndarray`, ``(points.shape[0], 1)``,
2886 :py:class:`numpy.ndarray`, ``(n_points_dip, n_points_strike)``,
2887 :py:class:`numpy.ndarray`, ``(n_points_dip, n_points_strike)``
2888 '''
2889 nx, ny, delta, points, points_xy = self._discretize_points(
2890 store, cs='xyz')
2892 if vr is None or vr.shape != tuple((nx, ny)):
2893 if vr:
2894 logger.warning(
2895 'Given rupture velocities are not in right shape: '
2896 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny))
2897 vr = self._discretize_rupture_v(store, interpolation, points)\
2898 .reshape(nx, ny)
2900 if vr.shape != tuple((nx, ny)):
2901 logger.warning(
2902 'Given rupture velocities are not in right shape. Therefore'
2903 ' standard rupture velocity array is used.')
2905 def initialize_times():
2906 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
2908 if nucl_x.shape != nucl_y.shape:
2909 raise ValueError(
2910 'Nucleation coordinates have different shape.')
2912 dist_points = num.array([
2913 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1)
2914 for x, y in zip(nucl_x, nucl_y)])
2915 nucl_indices = num.argmin(dist_points, axis=1)
2917 if self.nucleation_time is None:
2918 nucl_times = num.zeros_like(nucl_indices)
2919 else:
2920 if self.nucleation_time.shape == nucl_x.shape:
2921 nucl_times = self.nucleation_time
2922 else:
2923 raise ValueError(
2924 'Nucleation coordinates and times have different '
2925 'shapes')
2927 t = num.full(nx * ny, -1.)
2928 t[nucl_indices] = nucl_times
2929 return t.reshape(nx, ny)
2931 if times is None:
2932 times = initialize_times()
2933 elif times.shape != tuple((nx, ny)):
2934 times = initialize_times()
2935 logger.warning(
2936 'Given times are not in right shape. Therefore standard time'
2937 ' array is used.')
2939 eikonal_ext.eikonal_solver_fmm_cartesian(
2940 speeds=vr, times=times, delta=delta)
2942 return points, points_xy, vr, times
2944 def get_vr_time_interpolators(
2945 self, store, interpolation='nearest_neighbor', force=False,
2946 *args, **kwargs):
2947 '''
2948 Calculate/return interpolators for rupture velocity and rupture time
2950 Arguments and keyword arguments needed for :py:meth:`discretize_time`.
2952 :param store: Greens function database (needs to cover whole region of
2953 of the source)
2954 :type store: :py:class:`pyrocko.gf.store.Store`
2955 :param interpolation: Kind of interpolation used. Choice between
2956 ``multilinear`` and ``nearest_neighbor``
2957 :type interpolation: optional, str
2958 :param force: Force recalculation of the interpolators (e.g. after
2959 change of nucleation point locations/times). Default is False
2960 :type force: optional, bool
2961 '''
2962 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'}
2963 if interpolation not in interp_map:
2964 raise TypeError(
2965 'Interpolation method %s not available' % interpolation)
2967 if not self._interpolators.get(interpolation, False) or force:
2968 _, points_xy, vr, times = self.discretize_time(
2969 store, *args, **kwargs)
2971 if self.length <= 0.:
2972 raise ValueError(
2973 'length must be larger then 0. not %g' % self.length)
2975 if self.width <= 0.:
2976 raise ValueError(
2977 'width must be larger then 0. not %g' % self.width)
2979 nx, ny = times.shape
2980 anch_x, anch_y = map_anchor[self.anchor]
2982 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2.
2983 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2.
2985 self._interpolators[interpolation] = (
2986 nx, ny, times, vr,
2987 RegularGridInterpolator(
2988 (points_xy[::ny, 0], points_xy[:ny, 1]), times,
2989 method=interp_map[interpolation]),
2990 RegularGridInterpolator(
2991 (points_xy[::ny, 0], points_xy[:ny, 1]), vr,
2992 method=interp_map[interpolation]))
2993 return self._interpolators[interpolation]
2995 def discretize_patches(
2996 self, store, interpolation='nearest_neighbor', force=False,
2997 grid_shape=(),
2998 *args, **kwargs):
2999 '''
3000 Get rupture start time and OkadaSource elements for points on rupture
3002 All source elements and their corresponding center points are
3003 calculated and stored in the :py:attr:`patches` attribute
3005 Arguments and keyword arguments needed for :py:meth:`discretize_time`.
3007 :param store: Greens function database (needs to cover whole region of
3008 of the source)
3009 :type store: :py:class:`pyrocko.gf.store.Store`
3010 :param interpolation: Kind of interpolation used. Choice between
3011 ``multilinear`` and ``nearest_neighbor``
3012 :type interpolation: optional, str
3013 :param force: Force recalculation of the vr and time interpolators (
3014 e.g. after change of nucleation point locations/times). Default is
3015 False
3016 :type force: optional, bool
3017 :param grid_shape: Desired sub fault patch grid size (nlength, nwidth).
3018 Either factor or grid_shape should be set.
3019 :type grid_shape: optional, tuple of int
3020 '''
3021 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3022 self.get_vr_time_interpolators(
3023 store, *args,
3024 interpolation=interpolation, force=force, **kwargs)
3025 anch_x, anch_y = map_anchor[self.anchor]
3027 al = self.length / 2.
3028 aw = self.width / 2.
3029 al1 = -(al + anch_x * al)
3030 al2 = al - anch_x * al
3031 aw1 = -aw + anch_y * aw
3032 aw2 = aw + anch_y * aw
3033 assert num.abs([al1, al2]).sum() == self.length
3034 assert num.abs([aw1, aw2]).sum() == self.width
3036 def get_lame(*a, **kw):
3037 shear_mod = store.config.get_shear_moduli(*a, **kw)
3038 lamb = store.config.get_vp(*a, **kw)**2 \
3039 * store.config.get_rho(*a, **kw) - 2. * shear_mod
3040 return shear_mod, lamb / (2. * (lamb + shear_mod))
3042 shear_mod, poisson = get_lame(
3043 self.lat, self.lon,
3044 num.array([[self.north_shift, self.east_shift, self.depth]]),
3045 interpolation=interpolation)
3047 okada_src = OkadaSource(
3048 lat=self.lat, lon=self.lon,
3049 strike=self.strike, dip=self.dip,
3050 north_shift=self.north_shift, east_shift=self.east_shift,
3051 depth=self.depth,
3052 al1=al1, al2=al2, aw1=aw1, aw2=aw2,
3053 poisson=poisson.mean(),
3054 shearmod=shear_mod.mean(),
3055 opening=kwargs.get('opening', 0.))
3057 if not (self.nx and self.ny):
3058 if grid_shape:
3059 self.nx, self.ny = grid_shape
3060 else:
3061 self.nx = nx
3062 self.ny = ny
3064 source_disc, source_points = okada_src.discretize(self.nx, self.ny)
3066 shear_mod, poisson = get_lame(
3067 self.lat, self.lon,
3068 num.array([src.source_patch()[:3] for src in source_disc]),
3069 interpolation=interpolation)
3071 if (self.nx, self.ny) != (nx, ny):
3072 times_interp = time_interpolator(source_points[:, :2])
3073 vr_interp = vr_interpolator(source_points[:, :2])
3074 else:
3075 times_interp = times.T.ravel()
3076 vr_interp = vr.T.ravel()
3078 for isrc, src in enumerate(source_disc):
3079 src.vr = vr_interp[isrc]
3080 src.time = times_interp[isrc] + self.time
3082 self.patches = source_disc
3084 def discretize_basesource(self, store, target=None):
3085 '''
3086 Prepare source for synthetic waveform calculation
3088 :param store: Greens function database (needs to cover whole region of
3089 of the source)
3090 :type store: :py:class:`pyrocko.gf.store.Store`
3091 :param target: Target information
3092 :type target: optional, :py:class:`pyrocko.gf.targets.Target`
3094 :returns: Source discretized by a set of moment tensors and times
3095 :rtype: :py:class:`pyrocko.gf.meta.DiscretizedMTSource`
3096 '''
3097 if not target:
3098 interpolation = 'nearest_neighbor'
3099 else:
3100 interpolation = target.interpolation
3102 if not self.patches:
3103 self.discretize_patches(store, interpolation)
3105 if self.coef_mat is None:
3106 self.calc_coef_mat()
3108 delta_slip, slip_times = self.get_delta_slip(store)
3109 npatches = self.nx * self.ny
3110 ntimes = slip_times.size
3112 anch_x, anch_y = map_anchor[self.anchor]
3114 pln = self.length / self.nx
3115 pwd = self.width / self.ny
3117 patch_coords = num.array([
3118 (p.ix, p.iy)
3119 for p in self.patches]).reshape(self.nx, self.ny, 2)
3121 # boundary condition is zero-slip
3122 # is not valid to avoid unwished interpolation effects
3123 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3))
3124 slip_grid[1:-1, 1:-1, :, :] = \
3125 delta_slip.reshape(self.nx, self.ny, ntimes, 3)
3127 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :]
3128 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :]
3129 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :]
3130 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :]
3132 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :]
3133 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :]
3134 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :]
3135 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :]
3137 def make_grid(patch_parameter):
3138 grid = num.zeros((self.nx + 2, self.ny + 2))
3139 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny)
3141 grid[0, 0] = grid[1, 1]
3142 grid[0, -1] = grid[1, -2]
3143 grid[-1, 0] = grid[-2, 1]
3144 grid[-1, -1] = grid[-2, -2]
3146 grid[1:-1, 0] = grid[1:-1, 1]
3147 grid[1:-1, -1] = grid[1:-1, -2]
3148 grid[0, 1:-1] = grid[1, 1:-1]
3149 grid[-1, 1:-1] = grid[-2, 1:-1]
3151 return grid
3153 lamb = self.get_patch_attribute('lamb')
3154 mu = self.get_patch_attribute('shearmod')
3156 lamb_grid = make_grid(lamb)
3157 mu_grid = make_grid(mu)
3159 coords_x = num.zeros(self.nx + 2)
3160 coords_x[1:-1] = patch_coords[:, 0, 0]
3161 coords_x[0] = coords_x[1] - pln / 2
3162 coords_x[-1] = coords_x[-2] + pln / 2
3164 coords_y = num.zeros(self.ny + 2)
3165 coords_y[1:-1] = patch_coords[0, :, 1]
3166 coords_y[0] = coords_y[1] - pwd / 2
3167 coords_y[-1] = coords_y[-2] + pwd / 2
3169 slip_interp = RegularGridInterpolator(
3170 (coords_x, coords_y, slip_times),
3171 slip_grid, method='nearest')
3173 lamb_interp = RegularGridInterpolator(
3174 (coords_x, coords_y),
3175 lamb_grid, method='nearest')
3177 mu_interp = RegularGridInterpolator(
3178 (coords_x, coords_y),
3179 mu_grid, method='nearest')
3181 # discretize basesources
3182 mindeltagf = min(tuple(
3183 (self.length / self.nx, self.width / self.ny) +
3184 tuple(store.config.deltas)))
3186 nl = int((1. / self.decimation_factor) *
3187 num.ceil(pln / mindeltagf)) + 1
3188 nw = int((1. / self.decimation_factor) *
3189 num.ceil(pwd / mindeltagf)) + 1
3190 nsrc_patch = int(nl * nw)
3191 dl = pln / nl
3192 dw = pwd / nw
3194 patch_area = dl * dw
3196 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl)
3197 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw)
3199 base_coords = num.zeros((nsrc_patch, 3), dtype=num.float)
3200 base_coords[:, 0] = num.tile(xl, nw)
3201 base_coords[:, 1] = num.repeat(xw, nl)
3202 base_coords = num.tile(base_coords, (npatches, 1))
3204 center_coords = num.zeros((npatches, 3))
3205 center_coords[:, 0] = num.repeat(
3206 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2
3207 center_coords[:, 1] = num.tile(
3208 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2
3210 base_coords -= center_coords.repeat(nsrc_patch, axis=0)
3211 nbaselocs = base_coords.shape[0]
3213 base_interp = base_coords.repeat(ntimes, axis=0)
3215 base_times = num.tile(slip_times, nbaselocs)
3216 base_interp[:, 0] -= anch_x * self.length / 2
3217 base_interp[:, 1] -= anch_y * self.width / 2
3218 base_interp[:, 2] = base_times
3220 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3221 store, interpolation=interpolation)
3223 time_eikonal_max = time_interpolator.values.max()
3225 nbasesrcs = base_interp.shape[0]
3226 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3)
3227 lamb = lamb_interp(base_interp[:, :2]).ravel()
3228 mu = mu_interp(base_interp[:, :2]).ravel()
3230 if False:
3231 try:
3232 import matplotlib.pyplot as plt
3233 coords = base_coords.copy()
3234 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1)
3235 plt.scatter(coords[:, 0], coords[:, 1], c=norm)
3236 plt.show()
3237 except AttributeError:
3238 pass
3240 base_interp[:, 2] = 0.
3241 rotmat = num.asarray(
3242 pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0))
3243 base_interp = num.dot(rotmat.T, base_interp.T).T
3244 base_interp[:, 0] += self.north_shift
3245 base_interp[:, 1] += self.east_shift
3246 base_interp[:, 2] += self.depth
3248 slip_strike = delta_slip[:, :, 0].ravel()
3249 slip_dip = delta_slip[:, :, 1].ravel()
3250 slip_norm = delta_slip[:, :, 2].ravel()
3252 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0)
3253 slip_rake = r2d * num.arctan2(slip_dip, slip_strike)
3255 m6s = okada_ext.patch2m6(
3256 strikes=num.full(nbasesrcs, self.strike, dtype=num.float),
3257 dips=num.full(nbasesrcs, self.dip, dtype=num.float),
3258 rakes=slip_rake,
3259 disl_shear=slip_shear,
3260 disl_norm=slip_norm,
3261 lamb=lamb,
3262 mu=mu,
3263 nthreads=self.nthreads)
3265 m6s *= patch_area
3267 dl = -self.patches[0].al1 + self.patches[0].al2
3268 dw = -self.patches[0].aw1 + self.patches[0].aw2
3270 base_times[base_times > time_eikonal_max] = time_eikonal_max
3272 ds = meta.DiscretizedMTSource(
3273 lat=self.lat,
3274 lon=self.lon,
3275 times=base_times + self.time,
3276 north_shifts=base_interp[:, 0],
3277 east_shifts=base_interp[:, 1],
3278 depths=base_interp[:, 2],
3279 m6s=m6s,
3280 dl=dl,
3281 dw=dw,
3282 nl=self.nx,
3283 nw=self.ny)
3285 return ds
3287 def calc_coef_mat(self):
3288 '''
3289 Calculate linear coefficient relating tractions and dislocations on the
3290 patches
3291 '''
3292 if not self.patches:
3293 raise ValueError(
3294 'Patches are needed. Please calculate them first.')
3296 self.coef_mat = make_okada_coefficient_matrix(
3297 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear)
3299 def get_patch_attribute(self, attr):
3300 '''
3301 Get array of patch attributes
3303 :param attr: Attribute of fault patches which shall be listed for
3304 all patches in an array (possible attributes: check
3305 :py:class`pyrocko.modelling.okada.OkadaSource`)
3306 :type attr: str
3308 :returns: Array of attribute values per fault patch
3309 :rtype: :py:class`numpy.ndarray`
3311 '''
3312 if not self.patches:
3313 raise ValueError(
3314 'Patches are needed. Please calculate them first.')
3315 return num.array([getattr(p, attr) for p in self.patches])
3317 def get_slip(
3318 self,
3319 time=None,
3320 scale_slip=True,
3321 interpolation='nearest_neighbor',
3322 **kwargs):
3323 '''
3324 Get slip per subfault patch for given time after rupture start
3326 :param time: time after origin [s], for which slip is computed. If not
3327 given, final static slip is returned
3328 :type time: optional, float > 0.
3329 :param scale_slip: If ``True`` and :py:attr:`slip` given, all slip
3330 values are scaled to fit the given maximum slip
3331 :type scale_slip: optional, bool
3332 :param interpolation: Kind of interpolation used. Choice between
3333 ``multilinear`` and ``nearest_neighbor``
3334 :type interpolation: optional, str
3336 :returns: inverted dislocations (:math:`u_{strike}, u_{dip} ,
3337 u_{tensile}`) for each source patch. order:
3339 .. math::
3341 &[\\\\
3342 &[u_{strike, patch1}, u_{dip, patch1}, u_{tensile, patch1}],\\\\
3343 &[u_{strike, patch2}, ...]]\\\\
3345 :rtype: :py:class:`numpy.ndarray`, ``(n_sources, 3)``
3346 '''
3348 if self.patches is None:
3349 raise ValueError(
3350 'Please discretize the source first (discretize_patches())')
3351 npatches = len(self.patches)
3352 tractions = self.get_tractions()
3353 time_patch_max = self.get_patch_attribute('time').max() - self.time
3355 time_patch = time
3356 if time is None:
3357 time_patch = time_patch_max
3359 if self.coef_mat is None:
3360 self.calc_coef_mat()
3362 if tractions.shape != (npatches, 3):
3363 raise AttributeError(
3364 'The traction vector is of invalid shape.'
3365 ' Required shape is (npatches, 3)')
3367 patch_mask = num.ones(npatches, dtype=num.bool)
3368 if self.patch_mask is not None:
3369 patch_mask = self.patch_mask
3371 times = self.get_patch_attribute('time') - self.time
3372 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches
3373 relevant_sources = num.nonzero(times <= time_patch)[0]
3374 disloc_est = num.zeros_like(tractions)
3376 if self.smooth_rupture:
3377 patch_activation = num.zeros(npatches)
3379 nx, ny, times, vr, time_interpolator, vr_interpolator = \
3380 self.get_vr_time_interpolators(
3381 store, interpolation=interpolation)
3383 # Getting the native Eikonal grid, bit hackish
3384 points_x = num.round(time_interpolator.grid[0], decimals=2)
3385 points_y = num.round(time_interpolator.grid[1], decimals=2)
3386 times_eikonal = time_interpolator.values
3388 time_max = time
3389 if time is None:
3390 time_max = times_eikonal.max()
3392 for ip, p in enumerate(self.patches):
3393 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2)
3394 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2)
3396 idx_length = num.logical_and(
3397 points_x >= ul[0], points_x <= lr[0])
3398 idx_width = num.logical_and(
3399 points_y >= ul[1], points_y <= lr[1])
3401 times_patch = times_eikonal[num.ix_(idx_length, idx_width)]
3402 if times_patch.size == 0:
3403 raise AttributeError('could not use smooth_rupture')
3405 patch_activation[ip] = \
3406 (times_patch <= time_max).sum() / times_patch.size
3408 if time_patch == 0 and time_patch != time_patch_max:
3409 patch_activation[ip] = 0.
3411 patch_activation[~patch_mask] = 0. # exlcude unmasked patches
3413 relevant_sources = num.nonzero(patch_activation > 0.)[0]
3415 if relevant_sources.size == 0:
3416 return disloc_est
3418 indices_disl = num.repeat(relevant_sources * 3, 3)
3419 indices_disl[1::3] += 1
3420 indices_disl[2::3] += 2
3422 disloc_est[relevant_sources] = invert_fault_dislocations_bem(
3423 stress_field=tractions[relevant_sources, :].ravel(),
3424 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3425 pure_shear=self.pure_shear, nthreads=self.nthreads,
3426 epsilon=None,
3427 **kwargs)
3429 if self.smooth_rupture:
3430 disloc_est *= patch_activation[:, num.newaxis]
3432 if scale_slip and self.slip is not None:
3433 disloc_tmax = num.zeros(npatches)
3435 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3)
3436 indices_disl[1::3] += 1
3437 indices_disl[2::3] += 2
3439 disloc_tmax[patch_mask] = num.linalg.norm(
3440 invert_fault_dislocations_bem(
3441 stress_field=tractions[patch_mask, :].ravel(),
3442 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl],
3443 pure_shear=self.pure_shear, nthreads=self.nthreads,
3444 epsilon=None,
3445 **kwargs), axis=1)
3447 disloc_tmax_max = disloc_tmax.max()
3448 if disloc_tmax_max == 0.:
3449 logger.warning(
3450 'slip scaling not performed. Maximum slip is 0.')
3452 disloc_est *= self.slip / disloc_tmax_max
3454 return disloc_est
3456 def get_delta_slip(
3457 self,
3458 store=None,
3459 deltat=None,
3460 delta=True,
3461 interpolation='nearest_neighbor',
3462 **kwargs):
3463 '''
3464 Get slip change inverted from patches depending on certain deltat
3466 The time intervall, within which the slip changes are computed is
3467 determined by the sampling rate of the Greens function database or
3468 ``deltat``. Arguments and keyword arguments needed for
3469 :py:meth:`get_slip`.
3471 :param store: Greens function database (needs to cover whole region of
3472 of the source). Its deltat [s] is used as time increment for slip
3473 difference calculation. Either ``deltat`` or ``store`` should be
3474 given.
3475 :type store: optional, :py:class:`pyrocko.gf.store.Store`
3476 :param deltat: time increment for slip difference calculation [s].
3477 Either ``deltat`` or ``store`` should be given.
3478 :type deltat: optional, float
3479 :param delta: If ``True``, slip differences between two time steps are
3480 given. If ``False``, cumulative slip for all time steps
3481 :type delta: optional, bool
3482 :param interpolation: Kind of interpolation used. Choice between
3483 ``multilinear`` and ``nearest_neighbor``
3484 :type interpolation: optional, str
3486 :returns: displacement changes(:math:`\\Delta u_{strike},
3487 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and
3488 time; corner times, for which delta slip is computed. The order of
3489 displacement changes array is:
3491 .. math::
3493 &[[\\\\
3494 &[\\Delta u_{strike, patch1, t1},
3495 \\Delta u_{dip, patch1, t1},
3496 \\Delta u_{tensile, patch1, t1}],\\\\
3497 &[\\Delta u_{strike, patch1, t2},
3498 \\Delta u_{dip, patch1, t2},
3499 \\Delta u_{tensile, patch1, t2}]\\\\
3500 &], [\\\\
3501 &[\\Delta u_{strike, patch2, t1}, ...],\\\\
3502 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\
3504 :rtype: :py:class:`numpy.ndarray`, ``(n_sources, n_times, 3)``
3505 :py:class:`numpy.ndarray`, ``(n_times, 1)``
3506 '''
3507 if store and deltat:
3508 raise AttributeError(
3509 'Argument collision. '
3510 'Please define only the store or the deltat argument.')
3512 if store:
3513 deltat = store.config.deltat
3515 if not deltat:
3516 raise AttributeError('Please give a GF store or set deltat.')
3518 npatches = len(self.patches)
3520 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators(
3521 store, interpolation=interpolation)
3522 tmax = time_interpolator.values.max()
3524 calc_times = num.arange(0., tmax + deltat, deltat)
3525 calc_times[calc_times > tmax] = tmax
3527 disloc_est = num.zeros((npatches, calc_times.size, 3))
3529 for itime, t in enumerate(calc_times):
3530 disloc_est[:, itime, :] = self.get_slip(
3531 time=t, scale_slip=False, **kwargs)
3533 if self.slip:
3534 disloc_tmax = num.linalg.norm(
3535 self.get_slip(scale_slip=False, time=tmax),
3536 axis=1)
3538 disloc_tmax_max = disloc_tmax.max()
3539 if disloc_tmax_max == 0.:
3540 logger.warning(
3541 'Slip scaling not performed. Maximum slip is 0.')
3542 else:
3543 disloc_est *= self.slip / disloc_tmax_max
3545 if not delta:
3546 return disloc_est, calc_times
3548 # if we have only one timestep there is no gradient
3549 if calc_times.size > 1:
3550 disloc_init = disloc_est[:, 0, :]
3551 disloc_est = num.diff(disloc_est, axis=1)
3552 disloc_est = num.concatenate((
3553 disloc_init[:, num.newaxis, :], disloc_est), axis=1)
3555 calc_times = calc_times
3557 return disloc_est, calc_times
3559 def get_slip_rate(self, *args, **kwargs):
3560 '''
3561 Get slip rate inverted from patches
3563 The time intervall, within which the slip rates are computed is
3564 determined by the sampling rate of the Greens function database or
3565 ``deltat``. Arguments and keyword arguments needed for
3566 :py:meth:`get_delta_slip`.
3568 :returns: slip rates(:math:`\\Delta u_{strike}/\\Delta t`,
3569 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`)
3570 for each source patch and time; corner times, for which slip rate
3571 is computed. The order of sliprate array is:
3573 .. math::
3575 &[[\\\\
3576 &[\\Delta u_{strike, patch1, t1}/\\Delta t,
3577 \\Delta u_{dip, patch1, t1}/\\Delta t,
3578 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\
3579 &[\\Delta u_{strike, patch1, t2}/\\Delta t,
3580 \\Delta u_{dip, patch1, t2}/\\Delta t,
3581 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\
3582 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\
3583 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\
3585 :rtype: :py:class:`numpy.ndarray`, ``(n_sources, n_times, 3)``
3586 :py:class:`numpy.ndarray`, ``(n_times, 1)``
3587 '''
3588 ddisloc_est, calc_times = self.get_delta_slip(
3589 *args, delta=True, **kwargs)
3591 dt = num.concatenate(
3592 [(num.diff(calc_times)[0], ), num.diff(calc_times)])
3593 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt
3595 return slip_rate, calc_times
3597 def get_moment_rate_patches(self, *args, **kwargs):
3598 '''
3599 Get scalar seismic moment rate for each patch individually
3601 Arguments and keyword arguments needed for :py:meth:`get_slip_rate`.
3603 :returns: seismic moment rate for each
3604 source patch and time; corner times, for which patch moment rate is
3605 computed based on slip rate. The order of the moment rate array is:
3607 .. math::
3609 &[\\\\
3610 &[(\\Delta M / \\Delta t)_{patch1, t1},
3611 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\
3612 &[(\\Delta M / \\Delta t)_{patch2, t1},
3613 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\
3614 &[...]]\\\\
3616 :rtype: :py:class:`numpy.ndarray`, ``(n_sources, n_times)``
3617 :py:class:`numpy.ndarray`, ``(n_times, 1)``
3618 '''
3619 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs)
3621 shear_mod = self.get_patch_attribute('shearmod')
3622 p_length = self.get_patch_attribute('length')
3623 p_width = self.get_patch_attribute('width')
3625 dA = p_length * p_width
3627 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis]
3629 return mom_rate, calc_times
3631 def get_moment_rate(self, store, target=None, deltat=None):
3632 '''
3633 Get seismic source moment rate for the total source (STF)
3635 :param store: Greens function database (needs to cover whole region of
3636 of the source). Its ``deltat`` [s] is used as time increment for
3637 slip difference calculation. Either ``deltat`` or ``store`` should
3638 be given.
3639 :type store: :py:class:`pyrocko.gf.store.Store`
3640 :param target: Target information, needed for interpolation method
3641 :type target: optional, :py:class:`pyrocko.gf.target.Target`
3642 :param deltat: time increment for slip difference calculation [s].
3643 If not given ``store.deltat`` is used.
3644 :type deltat: optional, float
3646 :return: seismic moment rate [Nm/s] for each time; corner times, for
3647 which moment rate is computed. The order of the moment rate array
3648 is:
3650 .. math::
3652 &[\\\\
3653 &(\\Delta M / \\Delta t)_{t1},\\\\
3654 &(\\Delta M / \\Delta t)_{t2},\\\\
3655 &...]\\\\
3657 :rtype: :py:class:`numpy.ndarray`, ``(n_times, 1)``
3658 :py:class:`numpy.ndarray`, ``(n_times, 1)``
3659 '''
3660 if not deltat:
3661 deltat = store.config.deltat
3662 return self.discretize_basesource(
3663 store, target=target).get_moment_rate(deltat)
3665 def get_moment(self, *args, **kwargs):
3666 '''
3667 Get seismic cumulative moment
3669 Arguments and keyword arguments needed for :py:meth:`get_magnitude`.
3671 :returns: cumulative seismic moment in [Nm]
3672 :rtype: float
3673 '''
3674 return float(pmt.magnitude_to_moment(self.get_magnitude(
3675 *args, **kwargs)))
3677 def rescale_slip(self, magnitude=None, moment=None, **kwargs):
3678 '''
3679 Rescale source slip based on given target magnitude or seismic moment.
3681 Rescale the maximum source slip to fit the source moment magnitude or
3682 seismic moment to the given target values. Either ``magnitude`` or
3683 ``moment`` need to be given. Arguments and keyword arguments needed for
3684 :py:meth:`get_moment`.
3686 :param magnitude: target moment magnitude :math:`M_\\mathrm{w}` as in
3687 [Hanks and Kanamori, 1979]
3688 :type magnitude: optional, float
3689 :param moment: target seismic moment :math:`M_0` [Nm]
3690 :type moment: optional, float`
3691 '''
3692 if self.slip is None:
3693 self.slip = 1.
3694 logger.warning('No slip found for rescaling. '
3695 'An initial slip of 1 m is assumed.')
3697 if magnitude is None and moment is None:
3698 raise ValueError(
3699 'Either target magnitude or moment need to be given.')
3701 moment_init = self.get_moment(**kwargs)
3703 if magnitude is not None:
3704 moment = pmt.magnitude_to_moment(magnitude)
3706 self.slip *= moment / moment_init
3709class DoubleDCSource(SourceWithMagnitude):
3710 '''
3711 Two double-couple point sources separated in space and time.
3712 Moment share between the sub-sources is controlled by the
3713 parameter mix.
3714 The position of the subsources is dependent on the moment
3715 distribution between the two sources. Depth, east and north
3716 shift are given for the centroid between the two double-couples.
3717 The subsources will positioned according to their moment shares
3718 around this centroid position.
3719 This is done according to their delta parameters, which are
3720 therefore in relation to that centroid.
3721 Note that depth of the subsources therefore can be
3722 depth+/-delta_depth. For shallow earthquakes therefore
3723 the depth has to be chosen deeper to avoid sampling
3724 above surface.
3725 '''
3727 strike1 = Float.T(
3728 default=0.0,
3729 help='strike direction in [deg], measured clockwise from north')
3731 dip1 = Float.T(
3732 default=90.0,
3733 help='dip angle in [deg], measured downward from horizontal')
3735 azimuth = Float.T(
3736 default=0.0,
3737 help='azimuth to second double-couple [deg], '
3738 'measured at first, clockwise from north')
3740 rake1 = Float.T(
3741 default=0.0,
3742 help='rake angle in [deg], '
3743 'measured counter-clockwise from right-horizontal '
3744 'in on-plane view')
3746 strike2 = Float.T(
3747 default=0.0,
3748 help='strike direction in [deg], measured clockwise from north')
3750 dip2 = Float.T(
3751 default=90.0,
3752 help='dip angle in [deg], measured downward from horizontal')
3754 rake2 = Float.T(
3755 default=0.0,
3756 help='rake angle in [deg], '
3757 'measured counter-clockwise from right-horizontal '
3758 'in on-plane view')
3760 delta_time = Float.T(
3761 default=0.0,
3762 help='separation of double-couples in time (t2-t1) [s]')
3764 delta_depth = Float.T(
3765 default=0.0,
3766 help='difference in depth (z2-z1) [m]')
3768 distance = Float.T(
3769 default=0.0,
3770 help='distance between the two double-couples [m]')
3772 mix = Float.T(
3773 default=0.5,
3774 help='how to distribute the moment to the two doublecouples '
3775 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
3777 stf1 = STF.T(
3778 optional=True,
3779 help='Source time function of subsource 1 '
3780 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
3782 stf2 = STF.T(
3783 optional=True,
3784 help='Source time function of subsource 2 '
3785 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
3787 discretized_source_class = meta.DiscretizedMTSource
3789 def base_key(self):
3790 return (
3791 self.time, self.depth, self.lat, self.north_shift,
3792 self.lon, self.east_shift, type(self).__name__) + \
3793 self.effective_stf1_pre().base_key() + \
3794 self.effective_stf2_pre().base_key() + (
3795 self.strike1, self.dip1, self.rake1,
3796 self.strike2, self.dip2, self.rake2,
3797 self.delta_time, self.delta_depth,
3798 self.azimuth, self.distance, self.mix)
3800 def get_factor(self):
3801 return self.moment
3803 def effective_stf1_pre(self):
3804 return self.stf1 or self.stf or g_unit_pulse
3806 def effective_stf2_pre(self):
3807 return self.stf2 or self.stf or g_unit_pulse
3809 def effective_stf_post(self):
3810 return g_unit_pulse
3812 def split(self):
3813 a1 = 1.0 - self.mix
3814 a2 = self.mix
3815 delta_north = math.cos(self.azimuth * d2r) * self.distance
3816 delta_east = math.sin(self.azimuth * d2r) * self.distance
3818 dc1 = DCSource(
3819 lat=self.lat,
3820 lon=self.lon,
3821 time=self.time - self.delta_time * a2,
3822 north_shift=self.north_shift - delta_north * a2,
3823 east_shift=self.east_shift - delta_east * a2,
3824 depth=self.depth - self.delta_depth * a2,
3825 moment=self.moment * a1,
3826 strike=self.strike1,
3827 dip=self.dip1,
3828 rake=self.rake1,
3829 stf=self.stf1 or self.stf)
3831 dc2 = DCSource(
3832 lat=self.lat,
3833 lon=self.lon,
3834 time=self.time + self.delta_time * a1,
3835 north_shift=self.north_shift + delta_north * a1,
3836 east_shift=self.east_shift + delta_east * a1,
3837 depth=self.depth + self.delta_depth * a1,
3838 moment=self.moment * a2,
3839 strike=self.strike2,
3840 dip=self.dip2,
3841 rake=self.rake2,
3842 stf=self.stf2 or self.stf)
3844 return [dc1, dc2]
3846 def discretize_basesource(self, store, target=None):
3847 a1 = 1.0 - self.mix
3848 a2 = self.mix
3849 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
3850 rake=self.rake1, scalar_moment=a1)
3851 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
3852 rake=self.rake2, scalar_moment=a2)
3854 delta_north = math.cos(self.azimuth * d2r) * self.distance
3855 delta_east = math.sin(self.azimuth * d2r) * self.distance
3857 times1, amplitudes1 = self.effective_stf1_pre().discretize_t(
3858 store.config.deltat, self.time - self.delta_time * a1)
3860 times2, amplitudes2 = self.effective_stf2_pre().discretize_t(
3861 store.config.deltat, self.time + self.delta_time * a2)
3863 nt1 = times1.size
3864 nt2 = times2.size
3866 ds = meta.DiscretizedMTSource(
3867 lat=self.lat,
3868 lon=self.lon,
3869 times=num.concatenate((times1, times2)),
3870 north_shifts=num.concatenate((
3871 num.repeat(self.north_shift - delta_north * a1, nt1),
3872 num.repeat(self.north_shift + delta_north * a2, nt2))),
3873 east_shifts=num.concatenate((
3874 num.repeat(self.east_shift - delta_east * a1, nt1),
3875 num.repeat(self.east_shift + delta_east * a2, nt2))),
3876 depths=num.concatenate((
3877 num.repeat(self.depth - self.delta_depth * a1, nt1),
3878 num.repeat(self.depth + self.delta_depth * a2, nt2))),
3879 m6s=num.vstack((
3880 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis],
3881 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
3883 return ds
3885 def pyrocko_moment_tensor(self, store=None, target=None):
3886 a1 = 1.0 - self.mix
3887 a2 = self.mix
3888 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1,
3889 rake=self.rake1,
3890 scalar_moment=a1 * self.moment)
3891 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2,
3892 rake=self.rake2,
3893 scalar_moment=a2 * self.moment)
3894 return pmt.MomentTensor(m=mot1.m() + mot2.m())
3896 def pyrocko_event(self, store=None, target=None, **kwargs):
3897 return SourceWithMagnitude.pyrocko_event(
3898 self, store, target,
3899 moment_tensor=self.pyrocko_moment_tensor(store, target),
3900 **kwargs)
3902 @classmethod
3903 def from_pyrocko_event(cls, ev, **kwargs):
3904 d = {}
3905 mt = ev.moment_tensor
3906 if mt:
3907 (strike, dip, rake), _ = mt.both_strike_dip_rake()
3908 d.update(
3909 strike1=float(strike),
3910 dip1=float(dip),
3911 rake1=float(rake),
3912 strike2=float(strike),
3913 dip2=float(dip),
3914 rake2=float(rake),
3915 mix=0.0,
3916 magnitude=float(mt.moment_magnitude()))
3918 d.update(kwargs)
3919 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d)
3920 source.stf1 = source.stf
3921 source.stf2 = HalfSinusoidSTF(effective_duration=0.)
3922 source.stf = None
3923 return source
3926class RingfaultSource(SourceWithMagnitude):
3927 '''
3928 A ring fault with vertical doublecouples.
3929 '''
3931 diameter = Float.T(
3932 default=1.0,
3933 help='diameter of the ring in [m]')
3935 sign = Float.T(
3936 default=1.0,
3937 help='inside of the ring moves up (+1) or down (-1)')
3939 strike = Float.T(
3940 default=0.0,
3941 help='strike direction of the ring plane, clockwise from north,'
3942 ' in [deg]')
3944 dip = Float.T(
3945 default=0.0,
3946 help='dip angle of the ring plane from horizontal in [deg]')
3948 npointsources = Int.T(
3949 default=360,
3950 help='number of point sources to use')
3952 discretized_source_class = meta.DiscretizedMTSource
3954 def base_key(self):
3955 return Source.base_key(self) + (
3956 self.strike, self.dip, self.diameter, self.npointsources)
3958 def get_factor(self):
3959 return self.sign * self.moment
3961 def discretize_basesource(self, store=None, target=None):
3962 n = self.npointsources
3963 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False)
3965 points = num.zeros((n, 3))
3966 points[:, 0] = num.cos(phi) * 0.5 * self.diameter
3967 points[:, 1] = num.sin(phi) * 0.5 * self.diameter
3969 rotmat = num.array(pmt.euler_to_matrix(
3970 self.dip * d2r, self.strike * d2r, 0.0))
3971 points = num.dot(rotmat.T, points.T).T # !!! ?
3973 points[:, 0] += self.north_shift
3974 points[:, 1] += self.east_shift
3975 points[:, 2] += self.depth
3977 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90.,
3978 scalar_moment=1.0 / n).m())
3980 rotmats = num.transpose(
3981 [[num.cos(phi), num.sin(phi), num.zeros(n)],
3982 [-num.sin(phi), num.cos(phi), num.zeros(n)],
3983 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
3985 ms = num.zeros((n, 3, 3))
3986 for i in range(n):
3987 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i]))
3988 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat))
3990 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2],
3991 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
3993 times, amplitudes = self.effective_stf_pre().discretize_t(
3994 store.config.deltat, self.time)
3996 nt = times.size
3998 return meta.DiscretizedMTSource(
3999 times=num.tile(times, n),
4000 lat=self.lat,
4001 lon=self.lon,
4002 north_shifts=num.repeat(points[:, 0], nt),
4003 east_shifts=num.repeat(points[:, 1], nt),
4004 depths=num.repeat(points[:, 2], nt),
4005 m6s=num.repeat(m6s, nt, axis=0) * num.tile(
4006 amplitudes, n)[:, num.newaxis])
4009class CombiSource(Source):
4010 '''
4011 Composite source model.
4012 '''
4014 discretized_source_class = meta.DiscretizedMTSource
4016 subsources = List.T(Source.T())
4018 def __init__(self, subsources=[], **kwargs):
4019 if not subsources:
4020 raise BadRequest(
4021 'Need at least one sub-source to create a CombiSource object.')
4023 lats = num.array(
4024 [subsource.lat for subsource in subsources], dtype=float)
4025 lons = num.array(
4026 [subsource.lon for subsource in subsources], dtype=float)
4028 lat, lon = lats[0], lons[0]
4029 if not num.all(lats == lat) and num.all(lons == lon):
4030 subsources = [s.clone() for s in subsources]
4031 for subsource in subsources[1:]:
4032 subsource.set_origin(lat, lon)
4034 depth = float(num.mean([p.depth for p in subsources]))
4035 time = float(num.mean([p.time for p in subsources]))
4036 north_shift = float(num.mean([p.north_shift for p in subsources]))
4037 east_shift = float(num.mean([p.east_shift for p in subsources]))
4038 kwargs.update(
4039 time=time,
4040 lat=float(lat),
4041 lon=float(lon),
4042 north_shift=north_shift,
4043 east_shift=east_shift,
4044 depth=depth)
4046 Source.__init__(self, subsources=subsources, **kwargs)
4048 def get_factor(self):
4049 return 1.0
4051 def discretize_basesource(self, store, target=None):
4052 dsources = []
4053 for sf in self.subsources:
4054 ds = sf.discretize_basesource(store, target)
4055 ds.m6s *= sf.get_factor()
4056 dsources.append(ds)
4058 return meta.DiscretizedMTSource.combine(dsources)
4061class SFSource(Source):
4062 '''
4063 A single force point source.
4065 Supported GF schemes: `'elastic5'`.
4066 '''
4068 discretized_source_class = meta.DiscretizedSFSource
4070 fn = Float.T(
4071 default=0.,
4072 help='northward component of single force [N]')
4074 fe = Float.T(
4075 default=0.,
4076 help='eastward component of single force [N]')
4078 fd = Float.T(
4079 default=0.,
4080 help='downward component of single force [N]')
4082 def __init__(self, **kwargs):
4083 Source.__init__(self, **kwargs)
4085 def base_key(self):
4086 return Source.base_key(self) + (self.fn, self.fe, self.fd)
4088 def get_factor(self):
4089 return 1.0
4091 def discretize_basesource(self, store, target=None):
4092 times, amplitudes = self.effective_stf_pre().discretize_t(
4093 store.config.deltat, self.time)
4094 forces = amplitudes[:, num.newaxis] * num.array(
4095 [[self.fn, self.fe, self.fd]], dtype=float)
4097 return meta.DiscretizedSFSource(forces=forces,
4098 **self._dparams_base_repeated(times))
4100 def pyrocko_event(self, store=None, target=None, **kwargs):
4101 return Source.pyrocko_event(
4102 self, store, target,
4103 **kwargs)
4105 @classmethod
4106 def from_pyrocko_event(cls, ev, **kwargs):
4107 d = {}
4108 d.update(kwargs)
4109 return super(SFSource, cls).from_pyrocko_event(ev, **d)
4112class PorePressurePointSource(Source):
4113 '''
4114 Excess pore pressure point source.
4116 For poro-elastic initial value problem where an excess pore pressure is
4117 brought into a small source volume.
4118 '''
4120 discretized_source_class = meta.DiscretizedPorePressureSource
4122 pp = Float.T(
4123 default=1.0,
4124 help='initial excess pore pressure in [Pa]')
4126 def base_key(self):
4127 return Source.base_key(self)
4129 def get_factor(self):
4130 return self.pp
4132 def discretize_basesource(self, store, target=None):
4133 return meta.DiscretizedPorePressureSource(pp=arr(1.0),
4134 **self._dparams_base())
4137class PorePressureLineSource(Source):
4138 '''
4139 Excess pore pressure line source.
4141 The line source is centered at (north_shift, east_shift, depth).
4142 '''
4144 discretized_source_class = meta.DiscretizedPorePressureSource
4146 pp = Float.T(
4147 default=1.0,
4148 help='initial excess pore pressure in [Pa]')
4150 length = Float.T(
4151 default=0.0,
4152 help='length of the line source [m]')
4154 azimuth = Float.T(
4155 default=0.0,
4156 help='azimuth direction, clockwise from north [deg]')
4158 dip = Float.T(
4159 default=90.,
4160 help='dip direction, downward from horizontal [deg]')
4162 def base_key(self):
4163 return Source.base_key(self) + (self.azimuth, self.dip, self.length)
4165 def get_factor(self):
4166 return self.pp
4168 def discretize_basesource(self, store, target=None):
4170 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1
4172 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n)
4174 sa = math.sin(self.azimuth * d2r)
4175 ca = math.cos(self.azimuth * d2r)
4176 sd = math.sin(self.dip * d2r)
4177 cd = math.cos(self.dip * d2r)
4179 points = num.zeros((n, 3))
4180 points[:, 0] = self.north_shift + a * ca * cd
4181 points[:, 1] = self.east_shift + a * sa * cd
4182 points[:, 2] = self.depth + a * sd
4184 return meta.DiscretizedPorePressureSource(
4185 times=util.num_full(n, self.time),
4186 lat=self.lat,
4187 lon=self.lon,
4188 north_shifts=points[:, 0],
4189 east_shifts=points[:, 1],
4190 depths=points[:, 2],
4191 pp=num.ones(n) / n)
4194class Request(Object):
4195 '''
4196 Synthetic seismogram computation request.
4198 ::
4200 Request(**kwargs)
4201 Request(sources, targets, **kwargs)
4202 '''
4204 sources = List.T(
4205 Source.T(),
4206 help='list of sources for which to produce synthetics.')
4208 targets = List.T(
4209 Target.T(),
4210 help='list of targets for which to produce synthetics.')
4212 @classmethod
4213 def args2kwargs(cls, args):
4214 if len(args) not in (0, 2, 3):
4215 raise BadRequest('Invalid arguments.')
4217 if len(args) == 2:
4218 return dict(sources=args[0], targets=args[1])
4219 else:
4220 return {}
4222 def __init__(self, *args, **kwargs):
4223 kwargs.update(self.args2kwargs(args))
4224 sources = kwargs.pop('sources', [])
4225 targets = kwargs.pop('targets', [])
4227 if isinstance(sources, Source):
4228 sources = [sources]
4230 if isinstance(targets, Target) or isinstance(targets, StaticTarget):
4231 targets = [targets]
4233 Object.__init__(self, sources=sources, targets=targets, **kwargs)
4235 @property
4236 def targets_dynamic(self):
4237 return [t for t in self.targets if isinstance(t, Target)]
4239 @property
4240 def targets_static(self):
4241 return [t for t in self.targets if isinstance(t, StaticTarget)]
4243 @property
4244 def has_dynamic(self):
4245 return True if len(self.targets_dynamic) > 0 else False
4247 @property
4248 def has_statics(self):
4249 return True if len(self.targets_static) > 0 else False
4251 def subsources_map(self):
4252 m = defaultdict(list)
4253 for source in self.sources:
4254 m[source.base_key()].append(source)
4256 return m
4258 def subtargets_map(self):
4259 m = defaultdict(list)
4260 for target in self.targets:
4261 m[target.base_key()].append(target)
4263 return m
4265 def subrequest_map(self):
4266 ms = self.subsources_map()
4267 mt = self.subtargets_map()
4268 m = {}
4269 for (ks, ls) in ms.items():
4270 for (kt, lt) in mt.items():
4271 m[ks, kt] = (ls, lt)
4273 return m
4276class ProcessingStats(Object):
4277 t_perc_get_store_and_receiver = Float.T(default=0.)
4278 t_perc_discretize_source = Float.T(default=0.)
4279 t_perc_make_base_seismogram = Float.T(default=0.)
4280 t_perc_make_same_span = Float.T(default=0.)
4281 t_perc_post_process = Float.T(default=0.)
4282 t_perc_optimize = Float.T(default=0.)
4283 t_perc_stack = Float.T(default=0.)
4284 t_perc_static_get_store = Float.T(default=0.)
4285 t_perc_static_discretize_basesource = Float.T(default=0.)
4286 t_perc_static_sum_statics = Float.T(default=0.)
4287 t_perc_static_post_process = Float.T(default=0.)
4288 t_wallclock = Float.T(default=0.)
4289 t_cpu = Float.T(default=0.)
4290 n_read_blocks = Int.T(default=0)
4291 n_results = Int.T(default=0)
4292 n_subrequests = Int.T(default=0)
4293 n_stores = Int.T(default=0)
4294 n_records_stacked = Int.T(default=0)
4297class Response(Object):
4298 '''
4299 Resonse object to a synthetic seismogram computation request.
4300 '''
4302 request = Request.T()
4303 results_list = List.T(List.T(meta.SeismosizerResult.T()))
4304 stats = ProcessingStats.T()
4306 def pyrocko_traces(self):
4307 '''
4308 Return a list of requested
4309 :class:`~pyrocko.trace.Trace` instances.
4310 '''
4312 traces = []
4313 for results in self.results_list:
4314 for result in results:
4315 if not isinstance(result, meta.Result):
4316 continue
4317 traces.append(result.trace.pyrocko_trace())
4319 return traces
4321 def kite_scenes(self):
4322 '''
4323 Return a list of requested
4324 :class:`~kite.scenes` instances.
4325 '''
4326 kite_scenes = []
4327 for results in self.results_list:
4328 for result in results:
4329 if isinstance(result, meta.KiteSceneResult):
4330 sc = result.get_scene()
4331 kite_scenes.append(sc)
4333 return kite_scenes
4335 def static_results(self):
4336 '''
4337 Return a list of requested
4338 :class:`~pyrocko.gf.meta.StaticResult` instances.
4339 '''
4340 statics = []
4341 for results in self.results_list:
4342 for result in results:
4343 if not isinstance(result, meta.StaticResult):
4344 continue
4345 statics.append(result)
4347 return statics
4349 def iter_results(self, get='pyrocko_traces'):
4350 '''
4351 Generator function to iterate over results of request.
4353 Yields associated :py:class:`Source`,
4354 :class:`~pyrocko.gf.targets.Target`,
4355 :class:`~pyrocko.trace.Trace` instances in each iteration.
4356 '''
4358 for isource, source in enumerate(self.request.sources):
4359 for itarget, target in enumerate(self.request.targets):
4360 result = self.results_list[isource][itarget]
4361 if get == 'pyrocko_traces':
4362 yield source, target, result.trace.pyrocko_trace()
4363 elif get == 'results':
4364 yield source, target, result
4366 def snuffle(self, **kwargs):
4367 '''
4368 Open *snuffler* with requested traces.
4369 '''
4371 trace.snuffle(self.pyrocko_traces(), **kwargs)
4374class Engine(Object):
4375 '''
4376 Base class for synthetic seismogram calculators.
4377 '''
4379 def get_store_ids(self):
4380 '''
4381 Get list of available GF store IDs
4382 '''
4384 return []
4387class Rule(object):
4388 pass
4391class VectorRule(Rule):
4393 def __init__(self, quantity, differentiate=0, integrate=0):
4394 self.components = [quantity + '.' + c for c in 'ned']
4395 self.differentiate = differentiate
4396 self.integrate = integrate
4398 def required_components(self, target):
4399 n, e, d = self.components
4400 sa, ca, sd, cd = target.get_sin_cos_factors()
4402 comps = []
4403 if nonzero(ca * cd):
4404 comps.append(n)
4406 if nonzero(sa * cd):
4407 comps.append(e)
4409 if nonzero(sd):
4410 comps.append(d)
4412 return tuple(comps)
4414 def apply_(self, target, base_seismogram):
4415 n, e, d = self.components
4416 sa, ca, sd, cd = target.get_sin_cos_factors()
4418 if nonzero(ca * cd):
4419 data = base_seismogram[n].data * (ca * cd)
4420 deltat = base_seismogram[n].deltat
4421 else:
4422 data = 0.0
4424 if nonzero(sa * cd):
4425 data = data + base_seismogram[e].data * (sa * cd)
4426 deltat = base_seismogram[e].deltat
4428 if nonzero(sd):
4429 data = data + base_seismogram[d].data * sd
4430 deltat = base_seismogram[d].deltat
4432 if self.differentiate:
4433 data = util.diff_fd(self.differentiate, 4, deltat, data)
4435 if self.integrate:
4436 raise NotImplementedError('Integration is not implemented yet.')
4438 return data
4441class HorizontalVectorRule(Rule):
4443 def __init__(self, quantity, differentiate=0, integrate=0):
4444 self.components = [quantity + '.' + c for c in 'ne']
4445 self.differentiate = differentiate
4446 self.integrate = integrate
4448 def required_components(self, target):
4449 n, e = self.components
4450 sa, ca, _, _ = target.get_sin_cos_factors()
4452 comps = []
4453 if nonzero(ca):
4454 comps.append(n)
4456 if nonzero(sa):
4457 comps.append(e)
4459 return tuple(comps)
4461 def apply_(self, target, base_seismogram):
4462 n, e = self.components
4463 sa, ca, _, _ = target.get_sin_cos_factors()
4465 if nonzero(ca):
4466 data = base_seismogram[n].data * ca
4467 else:
4468 data = 0.0
4470 if nonzero(sa):
4471 data = data + base_seismogram[e].data * sa
4473 if self.differentiate:
4474 deltat = base_seismogram[e].deltat
4475 data = util.diff_fd(self.differentiate, 4, deltat, data)
4477 if self.integrate:
4478 raise NotImplementedError('Integration is not implemented yet.')
4480 return data
4483class ScalarRule(Rule):
4485 def __init__(self, quantity, differentiate=0):
4486 self.c = quantity
4488 def required_components(self, target):
4489 return (self.c, )
4491 def apply_(self, target, base_seismogram):
4492 data = base_seismogram[self.c].data.copy()
4493 deltat = base_seismogram[self.c].deltat
4494 if self.differentiate:
4495 data = util.diff_fd(self.differentiate, 4, deltat, data)
4497 return data
4500class StaticDisplacement(Rule):
4502 def required_components(self, target):
4503 return tuple(['displacement.%s' % c for c in list('ned')])
4505 def apply_(self, target, base_statics):
4506 if isinstance(target, SatelliteTarget):
4507 los_fac = target.get_los_factors()
4508 base_statics['displacement.los'] =\
4509 (los_fac[:, 0] * -base_statics['displacement.d'] +
4510 los_fac[:, 1] * base_statics['displacement.e'] +
4511 los_fac[:, 2] * base_statics['displacement.n'])
4512 return base_statics
4515channel_rules = {
4516 'displacement': [VectorRule('displacement')],
4517 'rotation': [VectorRule('rotation')],
4518 'velocity': [
4519 VectorRule('velocity'),
4520 VectorRule('displacement', differentiate=1)],
4521 'acceleration': [
4522 VectorRule('acceleration'),
4523 VectorRule('velocity', differentiate=1),
4524 VectorRule('displacement', differentiate=2)],
4525 'pore_pressure': [ScalarRule('pore_pressure')],
4526 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')],
4527 'darcy_velocity': [VectorRule('darcy_velocity')],
4528}
4530static_rules = {
4531 'displacement': [StaticDisplacement()]
4532}
4535class OutOfBoundsContext(Object):
4536 source = Source.T()
4537 target = Target.T()
4538 distance = Float.T()
4539 components = List.T(String.T())
4542def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0):
4543 dsource_cache = {}
4544 tcounters = list(range(6))
4546 store_ids = set()
4547 sources = set()
4548 targets = set()
4550 for itarget, target in enumerate(ptargets):
4551 target._id = itarget
4553 for w in work:
4554 _, _, isources, itargets = w
4556 sources.update([psources[isource] for isource in isources])
4557 targets.update([ptargets[itarget] for itarget in itargets])
4559 store_ids = set([t.store_id for t in targets])
4561 for isource, source in enumerate(psources):
4563 components = set()
4564 for itarget, target in enumerate(targets):
4565 rule = engine.get_rule(source, target)
4566 components.update(rule.required_components(target))
4568 for store_id in store_ids:
4569 store_targets = [t for t in targets if t.store_id == store_id]
4571 sample_rates = set([t.sample_rate for t in store_targets])
4572 interpolations = set([t.interpolation for t in store_targets])
4574 base_seismograms = []
4575 store_targets_out = []
4577 for samp_rate in sample_rates:
4578 for interp in interpolations:
4579 engine_targets = [
4580 t for t in store_targets if t.sample_rate == samp_rate
4581 and t.interpolation == interp]
4583 if not engine_targets:
4584 continue
4586 store_targets_out += engine_targets
4588 base_seismograms += engine.base_seismograms(
4589 source,
4590 engine_targets,
4591 components,
4592 dsource_cache,
4593 nthreads)
4595 for iseis, seismogram in enumerate(base_seismograms):
4596 for tr in seismogram.values():
4597 if tr.err != store.SeismosizerErrorEnum.SUCCESS:
4598 e = SeismosizerError(
4599 'Seismosizer failed with return code %i\n%s' % (
4600 tr.err, str(
4601 OutOfBoundsContext(
4602 source=source,
4603 target=store_targets[iseis],
4604 distance=source.distance_to(
4605 store_targets[iseis]),
4606 components=components))))
4607 raise e
4609 for seismogram, target in zip(base_seismograms, store_targets_out):
4611 try:
4612 result = engine._post_process_dynamic(
4613 seismogram, source, target)
4614 except SeismosizerError as e:
4615 result = e
4617 yield (isource, target._id, result), tcounters
4620def process_dynamic(work, psources, ptargets, engine, nthreads=0):
4621 dsource_cache = {}
4623 for w in work:
4624 _, _, isources, itargets = w
4626 sources = [psources[isource] for isource in isources]
4627 targets = [ptargets[itarget] for itarget in itargets]
4629 components = set()
4630 for target in targets:
4631 rule = engine.get_rule(sources[0], target)
4632 components.update(rule.required_components(target))
4634 for isource, source in zip(isources, sources):
4635 for itarget, target in zip(itargets, targets):
4637 try:
4638 base_seismogram, tcounters = engine.base_seismogram(
4639 source, target, components, dsource_cache, nthreads)
4640 except meta.OutOfBounds as e:
4641 e.context = OutOfBoundsContext(
4642 source=sources[0],
4643 target=targets[0],
4644 distance=sources[0].distance_to(targets[0]),
4645 components=components)
4646 raise
4648 n_records_stacked = 0
4649 t_optimize = 0.0
4650 t_stack = 0.0
4652 for _, tr in base_seismogram.items():
4653 n_records_stacked += tr.n_records_stacked
4654 t_optimize += tr.t_optimize
4655 t_stack += tr.t_stack
4657 try:
4658 result = engine._post_process_dynamic(
4659 base_seismogram, source, target)
4660 result.n_records_stacked = n_records_stacked
4661 result.n_shared_stacking = len(sources) *\
4662 len(targets)
4663 result.t_optimize = t_optimize
4664 result.t_stack = t_stack
4665 except SeismosizerError as e:
4666 result = e
4668 tcounters.append(xtime())
4669 yield (isource, itarget, result), tcounters
4672def process_static(work, psources, ptargets, engine, nthreads=0):
4673 for w in work:
4674 _, _, isources, itargets = w
4676 sources = [psources[isource] for isource in isources]
4677 targets = [ptargets[itarget] for itarget in itargets]
4679 for isource, source in zip(isources, sources):
4680 for itarget, target in zip(itargets, targets):
4681 components = engine.get_rule(source, target)\
4682 .required_components(target)
4684 try:
4685 base_statics, tcounters = engine.base_statics(
4686 source, target, components, nthreads)
4687 except meta.OutOfBounds as e:
4688 e.context = OutOfBoundsContext(
4689 source=sources[0],
4690 target=targets[0],
4691 distance=float('nan'),
4692 components=components)
4693 raise
4694 result = engine._post_process_statics(
4695 base_statics, source, target)
4696 tcounters.append(xtime())
4698 yield (isource, itarget, result), tcounters
4701class LocalEngine(Engine):
4702 '''
4703 Offline synthetic seismogram calculator.
4705 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and
4706 :py:attr:`store_dirs` with paths set in environment variables
4707 GF_STORE_SUPERDIRS and GF_STORE_DIRS.
4708 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and
4709 :py:attr:`store_dirs` with paths set in the user's config file.
4711 The config file can be found at :file:`~/.pyrocko/config.pf`
4713 .. code-block :: python
4715 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/']
4716 gf_store_superdirs: ['/home/pyrocko/gf_stores/']
4717 '''
4719 store_superdirs = List.T(
4720 String.T(),
4721 help='directories which are searched for Green\'s function stores')
4723 store_dirs = List.T(
4724 String.T(),
4725 help='additional individual Green\'s function store directories')
4727 default_store_id = String.T(
4728 optional=True,
4729 help='default store ID to be used when a request does not provide '
4730 'one')
4732 def __init__(self, **kwargs):
4733 use_env = kwargs.pop('use_env', False)
4734 use_config = kwargs.pop('use_config', False)
4735 Engine.__init__(self, **kwargs)
4736 if use_env:
4737 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '')
4738 env_store_dirs = os.environ.get('GF_STORE_DIRS', '')
4739 if env_store_superdirs:
4740 self.store_superdirs.extend(env_store_superdirs.split(':'))
4742 if env_store_dirs:
4743 self.store_dirs.extend(env_store_dirs.split(':'))
4745 if use_config:
4746 c = config.config()
4747 self.store_superdirs.extend(c.gf_store_superdirs)
4748 self.store_dirs.extend(c.gf_store_dirs)
4750 self._check_store_dirs_type()
4751 self._id_to_store_dir = {}
4752 self._open_stores = {}
4753 self._effective_default_store_id = None
4755 def _check_store_dirs_type(self):
4756 for sdir in ['store_dirs', 'store_superdirs']:
4757 if not isinstance(self.__getattribute__(sdir), list):
4758 raise TypeError("{} of {} is not of type list".format(
4759 sdir, self.__class__.__name__))
4761 def _get_store_id(self, store_dir):
4762 store_ = store.Store(store_dir)
4763 store_id = store_.config.id
4764 store_.close()
4765 return store_id
4767 def _looks_like_store_dir(self, store_dir):
4768 return os.path.isdir(store_dir) and \
4769 all(os.path.isfile(pjoin(store_dir, x)) for x in
4770 ('index', 'traces', 'config'))
4772 def iter_store_dirs(self):
4773 store_dirs = set()
4774 for d in self.store_superdirs:
4775 if not os.path.exists(d):
4776 logger.warning('store_superdir not available: %s' % d)
4777 continue
4779 for entry in os.listdir(d):
4780 store_dir = os.path.realpath(pjoin(d, entry))
4781 if self._looks_like_store_dir(store_dir):
4782 store_dirs.add(store_dir)
4784 for store_dir in self.store_dirs:
4785 store_dirs.add(os.path.realpath(store_dir))
4787 return store_dirs
4789 def _scan_stores(self):
4790 for store_dir in self.iter_store_dirs():
4791 store_id = self._get_store_id(store_dir)
4792 if store_id not in self._id_to_store_dir:
4793 self._id_to_store_dir[store_id] = store_dir
4794 else:
4795 if store_dir != self._id_to_store_dir[store_id]:
4796 raise DuplicateStoreId(
4797 'GF store ID %s is used in (at least) two '
4798 'different stores. Locations are: %s and %s' %
4799 (store_id, self._id_to_store_dir[store_id], store_dir))
4801 def get_store_dir(self, store_id):
4802 '''
4803 Lookup directory given a GF store ID.
4804 '''
4806 if store_id not in self._id_to_store_dir:
4807 self._scan_stores()
4809 if store_id not in self._id_to_store_dir:
4810 raise NoSuchStore(store_id, self.iter_store_dirs())
4812 return self._id_to_store_dir[store_id]
4814 def get_store_ids(self):
4815 '''
4816 Get list of available store IDs.
4817 '''
4819 self._scan_stores()
4820 return sorted(self._id_to_store_dir.keys())
4822 def effective_default_store_id(self):
4823 if self._effective_default_store_id is None:
4824 if self.default_store_id is None:
4825 store_ids = self.get_store_ids()
4826 if len(store_ids) == 1:
4827 self._effective_default_store_id = self.get_store_ids()[0]
4828 else:
4829 raise NoDefaultStoreSet()
4830 else:
4831 self._effective_default_store_id = self.default_store_id
4833 return self._effective_default_store_id
4835 def get_store(self, store_id=None):
4836 '''
4837 Get a store from the engine.
4839 :param store_id: identifier of the store (optional)
4840 :returns: :py:class:`~pyrocko.gf.store.Store` object
4842 If no ``store_id`` is provided the store
4843 associated with the :py:gattr:`default_store_id` is returned.
4844 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is
4845 undefined.
4846 '''
4848 if store_id is None:
4849 store_id = self.effective_default_store_id()
4851 if store_id not in self._open_stores:
4852 store_dir = self.get_store_dir(store_id)
4853 self._open_stores[store_id] = store.Store(store_dir)
4855 return self._open_stores[store_id]
4857 def get_store_config(self, store_id):
4858 store = self.get_store(store_id)
4859 return store.config
4861 def get_store_extra(self, store_id, key):
4862 store = self.get_store(store_id)
4863 return store.get_extra(key)
4865 def close_cashed_stores(self):
4866 '''
4867 Close and remove ids from cashed stores.
4868 '''
4869 store_ids = []
4870 for store_id, store_ in self._open_stores.items():
4871 store_.close()
4872 store_ids.append(store_id)
4874 for store_id in store_ids:
4875 self._open_stores.pop(store_id)
4877 def get_rule(self, source, target):
4878 cprovided = self.get_store(target.store_id).get_provided_components()
4880 if isinstance(target, StaticTarget):
4881 quantity = target.quantity
4882 available_rules = static_rules
4883 elif isinstance(target, Target):
4884 quantity = target.effective_quantity()
4885 available_rules = channel_rules
4887 try:
4888 for rule in available_rules[quantity]:
4889 cneeded = rule.required_components(target)
4890 if all(c in cprovided for c in cneeded):
4891 return rule
4893 except KeyError:
4894 pass
4896 raise BadRequest(
4897 'No rule to calculate "%s" with GFs from store "%s" '
4898 'for source model "%s".' % (
4899 target.effective_quantity(),
4900 target.store_id,
4901 source.__class__.__name__))
4903 def _cached_discretize_basesource(self, source, store, cache, target):
4904 if (source, store) not in cache:
4905 cache[source, store] = source.discretize_basesource(store, target)
4907 return cache[source, store]
4909 def base_seismograms(self, source, targets, components, dsource_cache,
4910 nthreads=0):
4912 target = targets[0]
4914 interp = set([t.interpolation for t in targets])
4915 if len(interp) > 1:
4916 raise BadRequest('Targets have different interpolation schemes.')
4918 rates = set([t.sample_rate for t in targets])
4919 if len(rates) > 1:
4920 raise BadRequest('Targets have different sample rates.')
4922 store_ = self.get_store(target.store_id)
4923 receivers = [t.receiver(store_) for t in targets]
4925 if target.sample_rate is not None:
4926 deltat = 1. / target.sample_rate
4927 rate = target.sample_rate
4928 else:
4929 deltat = None
4930 rate = store_.config.sample_rate
4932 tmin = num.fromiter(
4933 (t.tmin for t in targets), dtype=float, count=len(targets))
4934 tmax = num.fromiter(
4935 (t.tmax for t in targets), dtype=float, count=len(targets))
4937 itmin = num.floor(tmin * rate).astype(num.int64)
4938 itmax = num.ceil(tmax * rate).astype(num.int64)
4939 nsamples = itmax - itmin + 1
4941 mask = num.isnan(tmin)
4942 itmin[mask] = 0
4943 nsamples[mask] = -1
4945 base_source = self._cached_discretize_basesource(
4946 source, store_, dsource_cache, target)
4948 base_seismograms = store_.calc_seismograms(
4949 base_source, receivers, components,
4950 deltat=deltat,
4951 itmin=itmin, nsamples=nsamples,
4952 interpolation=target.interpolation,
4953 optimization=target.optimization,
4954 nthreads=nthreads)
4956 for i, base_seismogram in enumerate(base_seismograms):
4957 base_seismograms[i] = store.make_same_span(base_seismogram)
4959 return base_seismograms
4961 def base_seismogram(self, source, target, components, dsource_cache,
4962 nthreads):
4964 tcounters = [xtime()]
4966 store_ = self.get_store(target.store_id)
4967 receiver = target.receiver(store_)
4969 if target.tmin and target.tmax is not None:
4970 rate = store_.config.sample_rate
4971 itmin = int(num.floor(target.tmin * rate))
4972 itmax = int(num.ceil(target.tmax * rate))
4973 nsamples = itmax - itmin + 1
4974 else:
4975 itmin = None
4976 nsamples = None
4978 tcounters.append(xtime())
4979 base_source = self._cached_discretize_basesource(
4980 source, store_, dsource_cache, target)
4982 tcounters.append(xtime())
4984 if target.sample_rate is not None:
4985 deltat = 1. / target.sample_rate
4986 else:
4987 deltat = None
4989 base_seismogram = store_.seismogram(
4990 base_source, receiver, components,
4991 deltat=deltat,
4992 itmin=itmin, nsamples=nsamples,
4993 interpolation=target.interpolation,
4994 optimization=target.optimization,
4995 nthreads=nthreads)
4997 tcounters.append(xtime())
4999 base_seismogram = store.make_same_span(base_seismogram)
5001 tcounters.append(xtime())
5003 return base_seismogram, tcounters
5005 def base_statics(self, source, target, components, nthreads):
5006 tcounters = [xtime()]
5007 store_ = self.get_store(target.store_id)
5009 if target.tsnapshot is not None:
5010 rate = store_.config.sample_rate
5011 itsnapshot = int(num.floor(target.tsnapshot * rate))
5012 else:
5013 itsnapshot = None
5014 tcounters.append(xtime())
5016 base_source = source.discretize_basesource(store_, target=target)
5018 tcounters.append(xtime())
5020 base_statics = store_.statics(
5021 base_source,
5022 target,
5023 itsnapshot,
5024 components,
5025 target.interpolation,
5026 nthreads)
5028 tcounters.append(xtime())
5030 return base_statics, tcounters
5032 def _post_process_dynamic(self, base_seismogram, source, target):
5033 base_any = next(iter(base_seismogram.values()))
5034 deltat = base_any.deltat
5035 itmin = base_any.itmin
5037 rule = self.get_rule(source, target)
5038 data = rule.apply_(target, base_seismogram)
5040 factor = source.get_factor() * target.get_factor()
5041 if factor != 1.0:
5042 data = data * factor
5044 stf = source.effective_stf_post()
5046 times, amplitudes = stf.discretize_t(
5047 deltat, 0.0)
5049 # repeat end point to prevent boundary effects
5050 padded_data = num.empty(data.size + amplitudes.size, dtype=float)
5051 padded_data[:data.size] = data
5052 padded_data[data.size:] = data[-1]
5053 data = num.convolve(amplitudes, padded_data)
5055 tmin = itmin * deltat + times[0]
5057 tr = meta.SeismosizerTrace(
5058 codes=target.codes,
5059 data=data[:-amplitudes.size],
5060 deltat=deltat,
5061 tmin=tmin)
5063 return target.post_process(self, source, tr)
5065 def _post_process_statics(self, base_statics, source, starget):
5066 rule = self.get_rule(source, starget)
5067 data = rule.apply_(starget, base_statics)
5069 factor = source.get_factor()
5070 if factor != 1.0:
5071 for v in data.values():
5072 v *= factor
5074 return starget.post_process(self, source, base_statics)
5076 def process(self, *args, **kwargs):
5077 '''
5078 Process a request.
5080 ::
5082 process(**kwargs)
5083 process(request, **kwargs)
5084 process(sources, targets, **kwargs)
5086 The request can be given a a :py:class:`Request` object, or such an
5087 object is created using ``Request(**kwargs)`` for convenience.
5089 :returns: :py:class:`Response` object
5090 '''
5092 if len(args) not in (0, 1, 2):
5093 raise BadRequest('Invalid arguments.')
5095 if len(args) == 1:
5096 kwargs['request'] = args[0]
5098 elif len(args) == 2:
5099 kwargs.update(Request.args2kwargs(args))
5101 request = kwargs.pop('request', None)
5102 status_callback = kwargs.pop('status_callback', None)
5103 calc_timeseries = kwargs.pop('calc_timeseries', True)
5105 nprocs = kwargs.pop('nprocs', None)
5106 nthreads = kwargs.pop('nthreads', 1)
5107 if nprocs is not None:
5108 nthreads = nprocs
5110 if request is None:
5111 request = Request(**kwargs)
5113 if resource:
5114 rs0 = resource.getrusage(resource.RUSAGE_SELF)
5115 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN)
5116 tt0 = xtime()
5118 # make sure stores are open before fork()
5119 store_ids = set(target.store_id for target in request.targets)
5120 for store_id in store_ids:
5121 self.get_store(store_id)
5123 source_index = dict((x, i) for (i, x) in
5124 enumerate(request.sources))
5125 target_index = dict((x, i) for (i, x) in
5126 enumerate(request.targets))
5128 m = request.subrequest_map()
5130 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware))
5131 results_list = []
5133 for i in range(len(request.sources)):
5134 results_list.append([None] * len(request.targets))
5136 tcounters_dyn_list = []
5137 tcounters_static_list = []
5138 nsub = len(skeys)
5139 isub = 0
5141 # Processing dynamic targets through
5142 # parimap(process_subrequest_dynamic)
5144 if calc_timeseries:
5145 _process_dynamic = process_dynamic_timeseries
5146 else:
5147 _process_dynamic = process_dynamic
5149 if request.has_dynamic:
5150 work_dynamic = [
5151 (i, nsub,
5152 [source_index[source] for source in m[k][0]],
5153 [target_index[target] for target in m[k][1]
5154 if not isinstance(target, StaticTarget)])
5155 for (i, k) in enumerate(skeys)]
5157 for ii_results, tcounters_dyn in _process_dynamic(
5158 work_dynamic, request.sources, request.targets, self,
5159 nthreads):
5161 tcounters_dyn_list.append(num.diff(tcounters_dyn))
5162 isource, itarget, result = ii_results
5163 results_list[isource][itarget] = result
5165 if status_callback:
5166 status_callback(isub, nsub)
5168 isub += 1
5170 # Processing static targets through process_static
5171 if request.has_statics:
5172 work_static = [
5173 (i, nsub,
5174 [source_index[source] for source in m[k][0]],
5175 [target_index[target] for target in m[k][1]
5176 if isinstance(target, StaticTarget)])
5177 for (i, k) in enumerate(skeys)]
5179 for ii_results, tcounters_static in process_static(
5180 work_static, request.sources, request.targets, self,
5181 nthreads=nthreads):
5183 tcounters_static_list.append(num.diff(tcounters_static))
5184 isource, itarget, result = ii_results
5185 results_list[isource][itarget] = result
5187 if status_callback:
5188 status_callback(isub, nsub)
5190 isub += 1
5192 if status_callback:
5193 status_callback(nsub, nsub)
5195 tt1 = time.time()
5196 if resource:
5197 rs1 = resource.getrusage(resource.RUSAGE_SELF)
5198 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN)
5200 s = ProcessingStats()
5202 if request.has_dynamic:
5203 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0)
5204 t_dyn = float(num.sum(tcumu_dyn))
5205 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.)
5206 (s.t_perc_get_store_and_receiver,
5207 s.t_perc_discretize_source,
5208 s.t_perc_make_base_seismogram,
5209 s.t_perc_make_same_span,
5210 s.t_perc_post_process) = perc_dyn
5211 else:
5212 t_dyn = 0.
5214 if request.has_statics:
5215 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0)
5216 t_static = num.sum(tcumu_static)
5217 perc_static = map(float, tcumu_static / t_static * 100.)
5218 (s.t_perc_static_get_store,
5219 s.t_perc_static_discretize_basesource,
5220 s.t_perc_static_sum_statics,
5221 s.t_perc_static_post_process) = perc_static
5223 s.t_wallclock = tt1 - tt0
5224 if resource:
5225 s.t_cpu = (
5226 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) -
5227 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime))
5228 s.n_read_blocks = (
5229 (rs1.ru_inblock + rc1.ru_inblock) -
5230 (rs0.ru_inblock + rc0.ru_inblock))
5232 n_records_stacked = 0.
5233 for results in results_list:
5234 for result in results:
5235 if not isinstance(result, meta.Result):
5236 continue
5237 shr = float(result.n_shared_stacking)
5238 n_records_stacked += result.n_records_stacked / shr
5239 s.t_perc_optimize += result.t_optimize / shr
5240 s.t_perc_stack += result.t_stack / shr
5241 s.n_records_stacked = int(n_records_stacked)
5242 if t_dyn != 0.:
5243 s.t_perc_optimize /= t_dyn * 100
5244 s.t_perc_stack /= t_dyn * 100
5246 return Response(
5247 request=request,
5248 results_list=results_list,
5249 stats=s)
5252class RemoteEngine(Engine):
5253 '''
5254 Client for remote synthetic seismogram calculator.
5255 '''
5257 site = String.T(default=ws.g_default_site, optional=True)
5258 url = String.T(default=ws.g_url, optional=True)
5260 def process(self, request=None, status_callback=None, **kwargs):
5262 if request is None:
5263 request = Request(**kwargs)
5265 return ws.seismosizer(url=self.url, site=self.site, request=request)
5268g_engine = None
5271def get_engine(store_superdirs=[]):
5272 global g_engine
5273 if g_engine is None:
5274 g_engine = LocalEngine(use_env=True, use_config=True)
5276 for d in store_superdirs:
5277 if d not in g_engine.store_superdirs:
5278 g_engine.store_superdirs.append(d)
5280 return g_engine
5283class SourceGroup(Object):
5285 def __getattr__(self, k):
5286 return num.fromiter((getattr(s, k) for s in self),
5287 dtype=float)
5289 def __iter__(self):
5290 raise NotImplementedError(
5291 'This method should be implemented in subclass.')
5293 def __len__(self):
5294 raise NotImplementedError(
5295 'This method should be implemented in subclass.')
5298class SourceList(SourceGroup):
5299 sources = List.T(Source.T())
5301 def append(self, s):
5302 self.sources.append(s)
5304 def __iter__(self):
5305 return iter(self.sources)
5307 def __len__(self):
5308 return len(self.sources)
5311class SourceGrid(SourceGroup):
5313 base = Source.T()
5314 variables = Dict.T(String.T(), Range.T())
5315 order = List.T(String.T())
5317 def __len__(self):
5318 n = 1
5319 for (k, v) in self.make_coords(self.base):
5320 n *= len(list(v))
5322 return n
5324 def __iter__(self):
5325 for items in permudef(self.make_coords(self.base)):
5326 s = self.base.clone(**{k: v for (k, v) in items})
5327 s.regularize()
5328 yield s
5330 def ordered_params(self):
5331 ks = list(self.variables.keys())
5332 for k in self.order + list(self.base.keys()):
5333 if k in ks:
5334 yield k
5335 ks.remove(k)
5336 if ks:
5337 raise Exception('Invalid parameter "%s" for source type "%s".' %
5338 (ks[0], self.base.__class__.__name__))
5340 def make_coords(self, base):
5341 return [(param, self.variables[param].make(base=base[param]))
5342 for param in self.ordered_params()]
5345source_classes = [
5346 Source,
5347 SourceWithMagnitude,
5348 SourceWithDerivedMagnitude,
5349 ExplosionSource,
5350 RectangularExplosionSource,
5351 DCSource,
5352 CLVDSource,
5353 VLVDSource,
5354 MTSource,
5355 RectangularSource,
5356 PseudoDynamicRupture,
5357 DoubleDCSource,
5358 RingfaultSource,
5359 CombiSource,
5360 SFSource,
5361 PorePressurePointSource,
5362 PorePressureLineSource,
5363]
5365stf_classes = [
5366 STF,
5367 BoxcarSTF,
5368 TriangularSTF,
5369 HalfSinusoidSTF,
5370 ResonatorSTF,
5371]
5373__all__ = '''
5374SeismosizerError
5375BadRequest
5376NoSuchStore
5377DerivedMagnitudeError
5378STFMode
5379'''.split() + [S.__name__ for S in source_classes + stf_classes] + '''
5380Request
5381ProcessingStats
5382Response
5383Engine
5384LocalEngine
5385RemoteEngine
5386source_classes
5387get_engine
5388Range
5389SourceGroup
5390SourceList
5391SourceGrid
5392map_anchor
5393'''.split()