# http://pyrocko.org - GPLv3 # # The Pyrocko Developers, 21st Century # ---|P------/S----------~Lg----------
Timestamp, Int, SObject, ArgumentError, Dict, ValidationError, Bool)
return -1
return 1
def __str__(self): if self.store_id is not None: rstr = 'no GF store with id "%s" found.' % self.store_id else: rstr = 'GF store not found.'
if self.dirs is not None: rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) return rstr
'k': 1e3, 'M': 1e6, }
raise ValueError('unit without a number: \'%s\'' % s)
else:
else:
else:
strike, dip, length, width, anchor, velocity=None, stf=None, nucleation_x=None, nucleation_y=None, decimation_factor=1, pointsonly=False, plane_coords=False, aggressive_oversampling=False):
stf = STF()
raise AttributeError('velocity is required in time mode')
nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 else:
else:
dist_y = num.abs(nucleation_y - points[:, 1]) else:
return points, dl, dw, nl, nw
pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
return points, dl, dw, nl, nw
# We assume a non-rotated fault plane N_CRITICAL = 8 points = points2.T.reshape((3, nl, nw)) if points.size <= N_CRITICAL: logger.warning('RectangularSource is defined by only %d sub-sources!' % points.size) return True
distances = num.sqrt( (points[0, 0, :] - points[0, 1, :])**2 + (points[1, 0, :] - points[1, 1, :])**2 + (points[2, 0, :] - points[2, 1, :])**2)
depths = points[2, 0, :] vs_profile = store.config.get_vs( lat=0., lon=0., points=num.repeat(depths[:, num.newaxis], 3, axis=1), interpolation='multilinear')
min_wavelength = vs_profile * (store.config.deltat * 2) if not num.all(min_wavelength > distances / 2): return False return True
[[-0.5 * ln, -0.5 * wd, 0.], [0.5 * ln, -0.5 * wd, 0.], [0.5 * ln, 0.5 * wd, 0.], [-0.5 * ln, 0.5 * wd, 0.], [-0.5 * ln, -0.5 * wd, 0.]])
pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
strike, dip, length, width, depth, x_plane_coords, y_plane_coords, lat=0., lon=0., north_shift=0, east_shift=0, anchor='top', cs='xy'):
ln = length wd = width x_abs = [] y_abs = [] if not isinstance(x_plane_coords, list): x_plane_coords = [x_plane_coords] y_plane_coords = [y_plane_coords]
for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): points = num.array( [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]])
anch_x, anch_y = map_anchor[anchor] points[:, 0] -= anch_x * 0.5 * length points[:, 1] -= anch_y * 0.5 * width
rotmat = num.asarray( pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
points = num.dot(rotmat.T, points.T).T points[:, 0] += north_shift points[:, 1] += east_shift points[:, 2] += depth if cs in ('latlon', 'lonlat'): latlon = ne_to_latlon(lat, lon, points[:, 0], points[:, 1]) latlon = num.array(latlon).T x_abs.append(latlon[1:2, 1]) y_abs.append(latlon[2:3, 0]) if cs == 'xy': x_abs.append(points[1:2, 1]) y_abs.append(points[2:3, 0])
if cs == 'lonlat': return y_abs, x_abs else: return x_abs, y_abs
strike, dip, length, width, anchor, discretized_basesource=None, points_x=None, points_y=None):
points_x = num.array([points_x]) points_y = num.array([points_y])
ds = discretized_basesource
nl_patches = ds.nl + 1 nw_patches = ds.nw + 1
npoints = nl_patches * nw_patches points = num.zeros((npoints, 3)) ln_patches = num.array([il for il in range(nl_patches)]) wd_patches = num.array([iw for iw in range(nw_patches)])
points_ln =\ 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 points_wd =\ 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1
for il in range(nl_patches): for iw in range(nw_patches): points[il * nw_patches + iw, :] = num.array([ points_ln[il] * ln * 0.5, points_wd[iw] * wd * 0.5, 0.0])
[x * 0.5 * ln, y * 0.5 * wd, 0.0])
pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0))
''' Convenient range specification.
Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]::
Range('0 .. 10k : 1k') Range(start=0., stop=10e3, step=1e3) Range(0, 10e3, 1e3) Range('0 .. 10k @ 11') Range(start=0., stop=10*km, n=11)
Range(0, 10e3, n=11) Range(values=[x*1e3 for x in range(11)])
Depending on the use context, it can be possible to omit any part of the specification. E.g. in the context of extracting a subset of an already existing range, the existing range's specification values would be filled in where missing.
The values are distributed with equal spacing, unless the ``spacing`` argument is modified. The values can be created offset or relative to an external base value with the ``relative`` argument if the use context supports this.
The range specification can be expressed with a short string representation::
'start .. stop @ num | spacing, relative' 'start .. stop : step | spacing, relative'
most parts of the expression can be omitted if not needed. Whitespace is allowed for readability but can also be omitted. '''
choices=['lin', 'log', 'symlog'], default='lin', optional=True)
choices=['', 'add', 'mult'], default='', optional=True)
r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' r'(\|(?P<stuff>.+))?$')
raise ArgumentError('%s specified more than once' % k)
def __str__(self): def sfloat(x): if x is not None: return '%g' % x else: return ''
if self.values: return ','.join('%g' % x for x in self.values)
if self.start is None and self.stop is None: s0 = '' else: s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop))
s1 = '' if self.step is not None: s1 = [' : %g', ':%g'][s0 == ''] % self.step elif self.n is not None: s1 = [' @ %i', '@%i'][s0 == ''] % self.n
if self.spacing == 'lin' and self.relative == '': s2 = '' else: x = [] if self.spacing != 'lin': x.append(self.spacing)
if self.relative != '': x.append(self.relative)
s2 = ' | %s' % ','.join(x)
return s0 + s1 + s2
def parse(cls, s): try: vals = [ufloat(x) for x in s.split(',')] except Exception: raise InvalidGridDef( '"%s" is not a valid range specification' % s)
return dict(values=num.array(vals, dtype=num.float))
except Exception: raise InvalidGridDef( '"%s" is not a valid range specification' % s)
t = d['stuff'].split(',') for x in t: if x in cls.spacing.choices: spacing = x elif x and x in cls.relative.choices: relative = x else: raise InvalidGridDef( '"%s" is not a valid range specification' % s)
relative=relative)
return self.values
start = [mi, ma][swap] stop = [ma, mi][swap] step = [inc, -inc][ma < mi]
raise InvalidGridDef( 'Cannot use range specification "%s" without start ' 'and stop in this context' % self)
step = stop - start
raise InvalidGridDef( 'Range specification "%s" has inconsistent ordering ' '(step < 0 => stop > start)' % self)
n = int(math.floor((stop - start) / step)) + 1 stop = start + (n - 1) * step else:
n = 1
elif self.spacing in ('log', 'symlog'): if start > 0. and stop > 0.: vals = num.exp(num.linspace(num.log(start), num.log(stop), n)) elif start < 0. and stop < 0.: vals = -num.exp(num.linspace(num.log(-start), num.log(-stop), n)) else: raise InvalidGridDef( 'log ranges should not include or cross zero ' '(in range specification "%s")' % self)
if self.spacing == 'symlog': nvals = - vals vals = num.concatenate((nvals[::-1], vals))
raise InvalidGridDef( 'cannot use relative range specification in this context')
vals += base
if shorthand is not None: t = shorthand.split('=') if len(t) != 2: raise InvalidGridDef( 'invalid grid specification element: %s' % shorthand)
sp, sr = t[0].strip(), t[1].strip()
kwargs['param'] = sp kwargs['rs'] = Range(sr)
Object.__init__(self, **kwargs)
return self.param + ' = ' + str(self.rs)
if shorthand is not None: t = shorthand.splitlines() tt = [] for x in t: x = x.strip() if x: tt.extend(x.split(';'))
elements = [] for se in tt: elements.append(GridDef(se))
kwargs['elements'] = elements
Object.__init__(self, **kwargs)
return '; '.join(str(x) for x in self.elements)
return iter(self.T.propnames)
raise KeyError(k)
raise KeyError(k)
''' Make a copy of the object.
A new object of the same class is created and initialized with the parameters of the object on which this method is called on. If ``kwargs`` are given, these are used to override any of the initialization parameters. '''
d[k] = v.clone()
def keys(cls): ''' Get list of the source model's parameter names. '''
''' Base class for source time functions. '''
self.factor_duration_to_effective()
def factor_duration_to_effective(cls): return 1.0
return tref
def effective_duration(self):
else: num.array([tl, th], dtype=num.float), num.array([th - tref, tref - tl], dtype=num.float) / deltat)
deltat + times[0] + t0
''' Boxcar type source time function.
.. figure :: /static/stf-BoxcarSTF.svg :width: 40% :align: center :alt: boxcar source time function '''
default=0.0, help='duration of the boxcar')
default=0.0, help='anchor point with respect to source.time: (' '-1.0: left -> source duration [0, T] ~ hypocenter time, ' ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
def factor_duration_to_effective(cls):
tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) [0.0, 0.0, 1.0, 1.0], dtype=num.float)
''' Triangular type source time function.
.. figure :: /static/stf-TriangularSTF.svg :width: 40% :align: center :alt: triangular source time function '''
default=0.0, help='baseline of the triangle')
default=0.5, help='fraction of time compared to duration, ' 'when the maximum amplitude is reached')
default=0.0, help='anchor point with respect to source.time: (' '-1.0: left -> source duration [0, T] ~ hypocenter time, ' ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
self.factor_duration_to_effective( kwargs.get('peak_ratio', None))
def centroid_ratio(self):
else: return tref - cb * self.duration * self.anchor
def effective_duration(self): self.peak_ratio)
else: tmax_stf = tref + cb * self.duration * (1. - self.anchor) tmin_stf = tmax_stf - self.duration
tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) [0.0, self.peak_ratio, 1.0], dtype=num.float) else:
return ( type(self).__name__, self.duration, self.peak_ratio, self.anchor)
''' Half sinusoid type source time function.
.. figure :: /static/stf-HalfSinusoidSTF.svg :width: 40% :align: center :alt: half-sinusouid source time function '''
default=0.0, help='duration of the half-sinusoid (baseline)')
default=0.0, help='anchor point with respect to source.time: (' '-1.0: left -> source duration [0, T] ~ hypocenter time, ' ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' '+1.0: right -> source duration [-T, 0] ~ rupture end time)')
default=1, help='set to 2 to use square of the half-period sinusoidal function.')
self.factor_duration_to_effective( kwargs.get('exponent', 1))
def factor_duration_to_effective(cls, exponent): else: raise ValueError('exponent for HalfSinusoidSTF must be 1 or 2')
def effective_duration(self):
tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1)))
(t_edges - tmin_stf) * (math.pi / self.duration))
- 1.0 / (2.0 * math.pi) * num.sin( (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) else: raise ValueError('exponent for HalfSinusoidSTF must be 1 or 2')
else:
return (type(self).__name__, self.duration, self.anchor)
'''Smooth-ramp type source time function for near-field displacement. Based on moment function of double-couple point source proposed by Bruestle and Mueller (PEPI, 1983).
.. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow earthquakes from Love-wave modelling for regional distances, PEPI 32, 312-324.
.. figure :: /static/stf-SmoothRampSTF.svg :width: 40% :alt: smooth ramp source time function ''' default=0.0, help='duration of the ramp (baseline)')
default=0.5, help='fraction of time compared to duration, ' 'when the maximum amplitude is reached')
default=0.0, help='anchor point with respect to source.time: (' '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)')
tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 tmin = round(tmin_stf / deltat) * deltat tmax = round(tmax_stf / deltat) * deltat D = round((tmax - tmin) / deltat) * deltat nt = int(round(D / deltat)) + 1 times = num.linspace(tmin, tmax, nt) if nt > 1: rise_time = self.rise_ratio * self.duration amplitudes = num.ones_like(times) tp = tmin + rise_time ii = num.where(times <= tp) t_inc = times[ii] a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b)
amplitudes /= num.sum(amplitudes) else: amplitudes = num.ones(1)
return times, amplitudes
return (type(self).__name__, self.duration, self.rise_ratio, self.anchor)
''' Simple resonator like source time function.
.. math ::
f(t) = 0 for t < 0 f(t) = e^{-t/tau} * sin(2 * pi * f * t)
.. figure :: /static/stf-SmoothRampSTF.svg :width: 40% :alt: smooth ramp source time function
'''
default=0.0, help='decay time')
default=1.0, help='resonance frequency')
* num.sin(2.0 * num.pi * self.frequency * (times - tref))
return (type(self).__name__, self.duration, self.frequency)
''' Base class for all source models. '''
default=0., help='source origin time.')
optional=True, help='source time function.')
default='post', help='whether to apply source time function in pre or ' 'post-processing.')
''' Change some of the source models parameters.
Example::
>>> from pyrocko import gf >>> s = gf.DCSource() >>> s.update(strike=66., dip=33.) >>> print(s) --- !pf.DCSource depth: 0.0 time: 1970-01-01 00:00:00 magnitude: 6.0 strike: 66.0 dip: 33.0 rake: 0.0
'''
''' Create grid of source model variations.
:returns: :py:class:`SourceGrid` instance.
Example::
>>> from pyrocko import gf >>> base = DCSource() >>> R = gf.Range >>> for s in base.grid(R('
'''
''' Get key to decide about source discretization / GF stack sharing.
When two source models differ only in amplitude and origin time, the discretization and the GF stacking can be done only once for a unit amplitude and a zero origin time and the amplitude and origin times of the seismograms can be applied during post-processing of the synthetic seismogram.
For any derived parameterized source model, this method is called to decide if discretization and stacking of the source should be shared. When two source models return an equal vector of values discretization is shared. ''' self.lon, self.east_shift, self.time, type(self).__name__) + \ self.effective_stf_pre().base_key()
''' Get the scaling factor to be applied during post-processing.
Discretization of the base seismogram is usually done for a unit amplitude, because a common factor can be efficiently multiplied to final seismograms. This eliminates to do repeat the stacking when creating seismograms for a series of source models only differing in amplitude.
This method should return the scaling factor to apply in the post-processing (often this is simply the scalar moment of the source). '''
''' Return the STF applied before stacking of the Green's functions.
This STF is used during discretization of the parameterized source models, i.e. to produce a temporal distribution of point sources.
Handling of the STF before stacking of the GFs is less efficient but allows to use different source time functions for different parts of the source. '''
else:
''' Return the STF applied after stacking of the Green's fuctions.
This STF is used in the post-processing of the synthetic seismograms.
Handling of the STF after stacking of the GFs is usually more efficient but is only possible when a common STF is used for all subsources. '''
else:
lat=self.lat, lon=self.lon, north_shifts=arr(self.north_shift), east_shifts=arr(self.east_shift), depths=arr(self.depth))
sha = sha1() for k in self.base_key(): sha.update(str(k).encode()) return sha.hexdigest()
return self._dparams_base()
lat=self.lat, lon=self.lon, north_shifts=north_shifts, east_shifts=east_shifts, depths=depths)
lat=self.lat, lon=self.lon, north_shift=self.north_shift, east_shift=self.east_shift, time=self.time, name=self.name, depth=self.depth, duration=duration, **kwargs)
elif cs == 'xy': return points[:, :2] elif cs in ('latlon', 'lonlat'): latlon = ne_to_latlon( self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T if cs == 'latlon': return latlon else: return latlon[:, ::-1]
def from_pyrocko_event(cls, ev, **kwargs): raise ConversionError( 'cannot convert event object to source object: ' 'no depth information available')
name=ev.name, time=ev.time, lat=ev.lat, lon=ev.lon, north_shift=ev.north_shift, east_shift=ev.east_shift, depth=ev.depth, stf=stf)
raise NotImplementedError( '%s does not implement get_magnitude()' % self.__class__.__name__)
''' Base class for sources containing a moment magnitude. '''
default=6.0, help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]')
def moment(self):
def moment(self, value): self.magnitude = float(pmt.moment_to_magnitude(value))
self, store, target, magnitude=self.magnitude, **kwargs)
def from_pyrocko_event(cls, ev, **kwargs):
''' Check for parameter conflicts.
To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` on conflicts. ''' pass
raise DerivedMagnitudeError('no magnitude set')
self.get_magnitude(store, target)))
raise NotImplementedError( '%s does not implement pyrocko_moment_tensor()' % self.__class__.__name__)
self, store, target, moment_tensor=mt, magnitude=magnitude, **kwargs)
''' An isotropic explosion point source. '''
optional=True, help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
optional=True, help='volume change of the explosion/implosion or ' 'the contracting/extending magmatic source. [m^3]')
(self.volume_change,)
raise DerivedMagnitudeError( 'magnitude and volume_change are both defined')
self.get_moment_to_volume_change_ratio(store, target)
else:
return self.volume_change
store, target)
else: return 1.0 / self.get_moment_to_volume_change_ratio(store)
raise DerivedMagnitudeError( 'need earth model to convert between volume change and ' 'magnitude')
[[self.north_shift, self.east_shift, self.depth]], dtype=num.float)
self.lat, self.lon, points=points, interpolation=interpolation)[0] raise DerivedMagnitudeError( 'could not get shear modulus at source position')
store.config.deltat, self.time)
if self.volume_change < 0.: amplitudes *= -1
m0s=amplitudes, **self._dparams_base_repeated(times))
''' Rectangular or line explosion source. '''
default=0.0, help='strike direction in [deg], measured clockwise from north')
default=90.0, help='dip angle in [deg], measured downward from horizontal')
default=0., help='length of rectangular source area [m]')
default=0., help='width of rectangular source area [m]')
choices=['top', 'top_left', 'top_right', 'center', 'bottom', 'bottom_left', 'bottom_right'], default='center', optional=True, help='Anchor point for positioning the plane, can be: top, center or' 'bottom and also top_left, top_right,bottom_left,' 'bottom_right, center_left and center right')
optional=True, help='horizontal position of rupture nucleation in normalized fault ' 'plane coordinates (-1 = left edge, +1 = right edge)')
optional=True, help='down-dip position of rupture nucleation in normalized fault ' 'plane coordinates (-1 = upper edge, +1 = lower edge)')
default=3500., help='speed of explosion front [m/s]')
default=False, help='Aggressive oversampling for basesource discretization. ' 'When using \'multilinear\' interpolation oversampling has' ' practically no effect.')
self.width, self.nucleation_x, self.nucleation_y, self.velocity, self.anchor)
else:
nucy = self.nucleation_y * 0.5 * self.width else:
store.config.deltas, store.config.deltat, self.time, self.north_shift, self.east_shift, self.depth, self.strike, self.dip, self.length, self.width, self.anchor, self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy)
lat=self.lat, lon=self.lon, times=times, north_shifts=points[:, 0], east_shifts=points[:, 1], depths=points[:, 2], m0s=amplitudes)
points = outline_rect_source(self.strike, self.dip, self.length, self.width, self.anchor)
points[:, 0] += self.north_shift points[:, 1] += self.east_shift points[:, 2] += self.depth if cs == 'xyz': return points elif cs == 'xy': return points[:, :2] elif cs in ('latlon', 'lonlat'): latlon = ne_to_latlon( self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T if cs == 'latlon': return latlon else: return latlon[:, ::-1]
if self.nucleation_x is None: return None, None
coords = from_plane_coords(self.strike, self.dip, self.length, self.width, self.depth, self.nucleation_x, self.nucleation_y, lat=self.lat, lon=self.lon, north_shift=self.north_shift, east_shift=self.east_shift, cs=cs) return coords
''' A double-couple point source. '''
default=0.0, help='strike direction in [deg], measured clockwise from north')
default=90.0, help='dip angle in [deg], measured downward from horizontal')
default=0.0, help='rake angle in [deg], ' 'measured counter-clockwise from right-horizontal ' 'in on-plane view')
strike=self.strike, dip=self.dip, rake=self.rake)
store.config.deltat, self.time) m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], **self._dparams_base_repeated(times))
strike=self.strike, dip=self.dip, rake=self.rake, scalar_moment=self.moment)
self, store, target, moment_tensor=self.pyrocko_moment_tensor(store, target), **kwargs)
def from_pyrocko_event(cls, ev, **kwargs): strike=float(strike), dip=float(dip), rake=float(rake), magnitude=float(mt.moment_magnitude()))
''' A pure CLVD point source. '''
default=0.0, help='azimuth direction of largest dipole, clockwise from north [deg]')
default=90., help='dip direction of largest dipole, downward from horizontal [deg]')
return Source.base_key(self) + (self.azimuth, self.dip)
def m6(self): d2r * (self.dip - 90.), d2r * (self.azimuth - 90.), 0.)
def m6_astuple(self):
store.config.deltat, self.time) m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, **self._dparams_base_repeated(times))
self, store, target, moment_tensor=self.pyrocko_moment_tensor(store, target), magnitude=float(mt.moment_magnitude()), **kwargs)
''' Volumetric linear vector dipole source.
This source is a parameterization for a restricted moment tensor point source, useful to represent dyke or sill like inflation or deflation sources. The restriction is such that the moment tensor is rotational symmetric. It can be represented by a superposition of a linear vector dipole (here we use a CLVD for convenience) and an isotropic component. The restricted moment tensor has 4 degrees of freedom: 2 independent eigenvalues and 2 rotation angles orienting the the symmetry axis.
In this parameterization, the isotropic component is controlled by ``volume_change``. To define the moment tensor, it must be converted to the scalar moment of the the MT's isotropic component. For the conversion, the shear modulus at the source's position must be known. This value is extracted from the earth model defined in the GF store in use.
The CLVD part by controlled by its scalar moment :math:`M_0`: ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a positiv or negativ CLVD (the sign of the largest eigenvalue). '''
default=0.0, help='azimuth direction of symmetry axis, clockwise from north [deg].')
default=90., help='dip direction of symmetry axis, downward from horizontal [deg].')
default=0., help='volume change of the inflation/deflation [m^3].')
default=0., help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 'controls the sign of the CLVD (the sign of its largest ' 'eigenvalue).')
raise DerivedMagnitudeError( 'need earth model to convert between volume change and ' 'magnitude')
[[self.north_shift, self.east_shift, self.depth]], dtype=num.float)
self.lat, self.lon, points=points, interpolation=target.interpolation)[0] except meta.OutOfBounds: raise DerivedMagnitudeError( 'could not get shear modulus at source position')
return Source.base_key(self) + \ (self.azimuth, self.dip, self.volume_change, self.clvd_moment)
return float(pmt.moment_to_magnitude(mt.moment))
d2r * (self.dip - 90.), d2r * (self.azimuth - 90.), 0.)
self.get_moment_to_volume_change_ratio(store, target)
0., 0.,) * math.sqrt(2. / 3)
return float(pmt.magnitude_to_moment( self.get_magnitude(store, target)))
return tuple(m6.tolist())
store.config.deltat, self.time)
m6s=m6[num.newaxis, :], **self._dparams_base_repeated(times))
return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple))
''' A moment tensor point source. '''
default=1., help='north-north component of moment tensor in [Nm]')
default=1., help='east-east component of moment tensor in [Nm]')
default=1., help='down-down component of moment tensor in [Nm]')
default=0., help='north-east component of moment tensor in [Nm]')
default=0., help='north-down component of moment tensor in [Nm]')
default=0., help='east-down component of moment tensor in [Nm]')
kwargs.pop('m6')):
def m6(self):
def m6_astuple(self):
def m6(self, value):
store.config.deltat, self.time) m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], **self._dparams_base_repeated(times))
math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / math.sqrt(2.))
self, store, target, moment_tensor=self.pyrocko_moment_tensor(store, target), magnitude=float(mt.moment_magnitude()), **kwargs)
def from_pyrocko_event(cls, ev, **kwargs): else: if ev.magnitude is not None: mom = pmt.magnitude_to_moment(ev.magnitude) v = math.sqrt(2. / 3.) * mom d.update(m6=(v, v, v, 0., 0., 0.))
'center': (0.0, 0.0), 'center_left': (-1.0, 0.0), 'center_right': (1.0, 0.0), 'top': (0.0, -1.0), 'top_left': (-1.0, -1.0), 'top_right': (1.0, -1.0), 'bottom': (0.0, 1.0), 'bottom_left': (-1.0, 1.0), 'bottom_right': (1.0, 1.0)}
''' Classical Haskell source model modified for bilateral rupture. '''
optional=True, help='moment magnitude Mw as in [Hanks and Kanamori, 1979]')
default=0.0, help='strike direction in [deg], measured clockwise from north')
default=90.0, help='dip angle in [deg], measured downward from horizontal')
default=0.0, help='rake angle in [deg], ' 'measured counter-clockwise from right-horizontal ' 'in on-plane view')
default=0., help='length of rectangular source area [m]')
default=0., help='width of rectangular source area [m]')
choices=['top', 'top_left', 'top_right', 'center', 'bottom', 'bottom_left', 'bottom_right'], default='center', optional=True, help='Anchor point for positioning the plane, can be: top, center or' 'bottom and also top_left, top_right,bottom_left,' 'bottom_right, center_left and center right')
optional=True, help='horizontal position of rupture nucleation in normalized fault ' 'plane coordinates (-1 = left edge, +1 = right edge)')
optional=True, help='down-dip position of rupture nucleation in normalized fault ' 'plane coordinates (-1 = upper edge, +1 = lower edge)')
default=3500., help='speed of rupture front [m/s]')
optional=True, help='Slip on the rectangular source area [m]')
optional=True, default=1, help='Sub-source decimation factor, a larger decimation will' ' make the result inaccurate but shorten the necessary' ' computation time (use for testing puposes only).')
default=False, help='Aggressive oversampling for basesource discretization. ' 'When using \'multilinear\' interpolation oversampling has' ' practically no effect.')
mom = kwargs.pop('moment') if 'magnitude' not in kwargs: kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
self.magnitude, self.slip, self.strike, self.dip, self.rake, self.length, self.width, self.nucleation_x, self.nucleation_y, self.velocity, self.decimation_factor, self.anchor)
raise DerivedMagnitudeError( 'magnitude and slip are both defined')
raise DerivedMagnitudeError( 'magnitude for a rectangular source with slip defined ' 'can only be derived when earth model and target ' 'interpolation method are available')
else:
nucx = self.nucleation_x * 0.5 * self.length else:
nucy = self.nucleation_y * 0.5 * self.width else:
store.config.deltas, store.config.deltat, self.time, self.north_shift, self.east_shift, self.depth, self.strike, self.dip, self.length, self.width, self.anchor, self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, decimation_factor=self.decimation_factor, aggressive_oversampling=self.aggressive_oversampling)
else: interpolation = 'nearest_neighbor' logger.warn( 'no target information available, will use ' '"nearest_neighbor" interpolation when extracting shear ' 'modulus from earth model')
self.lat, self.lon, points=points, interpolation=interpolation)
else: # normalization to retain total moment
store, target)
strike=self.strike, dip=self.dip, rake=self.rake)
lat=self.lat, lon=self.lon, times=times, north_shifts=points[:, 0], east_shifts=points[:, 1], depths=points[:, 2], m6s=m6s, dl=dl, dw=dw, nl=nl, nw=nw)
self.width, self.anchor)
elif cs in ('latlon', 'lonlat', 'latlondepth'): latlon = ne_to_latlon( self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T if cs == 'latlon': return latlon elif cs == 'lonlat': return latlon[:, ::-1] else: return num.concatenate( (latlon, points[:, 2].reshape((len(points), 1))), axis=1)
points = points_on_rect_source( self.strike, self.dip, self.length, self.width, self.anchor, **kwargs)
points[:, 0] += self.north_shift points[:, 1] += self.east_shift points[:, 2] += self.depth if cs == 'xyz': return points elif cs == 'xy': return points[:, :2] elif cs in ('latlon', 'lonlat', 'latlondepth'): latlon = ne_to_latlon( self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T if cs == 'latlon': return latlon elif cs == 'lonlat': return latlon[:, ::-1] else: return num.concatenate( (latlon, points[:, 2].reshape((len(points), 1))), axis=1)
if self.nucleation_x is None: return None, None
coords = from_plane_coords(self.strike, self.dip, self.length, self.width, self.depth, self.nucleation_x, self.nucleation_y, lat=self.lat, lon=self.lon, north_shift=self.north_shift, east_shift=self.east_shift, cs=cs) return coords
strike=self.strike, dip=self.dip, rake=self.rake, scalar_moment=self.get_moment(store, target))
self, store, target, **kwargs)
def from_pyrocko_event(cls, ev, **kwargs): strike=float(strike), dip=float(dip), rake=float(rake), magnitude=float(mt.moment_magnitude()))
''' Merged Eikonal and Okada Source for quasi-dynamic rupture modeling '''
default=0.0, help='strike direction in [deg], measured clockwise from north')
default=0.0, help='dip angle in [deg], measured downward from horizontal')
default=10. * km, help='length of rectangular source area [m]')
default=5. * km, help='width of rectangular source area [m]')
choices=['top', 'top_left', 'top_right', 'center', 'bottom', 'bottom_left', 'bottom_right'], default='center', optional=True, help='Anchor point for positioning the plane, can be: top, center or' ' bottom and also top_left, top_right,bottom_left,' ' bottom_right, center_left and center right')
default=num.array([0.]), dtype=num.float, serialize_as='list', help='horizontal position of rupture nucleation in normalized fault ' 'plane coordinates (-1 = left edge, +1 = right edge)')
default=num.array([0.]), dtype=num.float, serialize_as='list', help='down-dip position of rupture nucleation in normalized fault ' 'plane coordinates (-1 = upper edge, +1 = lower edge)')
optional=True, help='Time in [s] after origin, when nucleation points defined by ' 'nucleation_x and nucleation_y shall rupture.', dtype=num.float, shape=(None,))
default=0.8, help='scaling factor between S wave velocity and rupture velocity: ' 'v_r = gamma * v_s')
default=2, help='number of discrete source patches in x direction (along strike)')
default=2, help='number of discrete source patches in y direction (down dip)')
optional=True, help='moment magnitude Mw as in [Hanks and Kanamori, 1979]' 'Setting the moment magnitude the tractions/stress field' 'will be normalized to accomodate the desired moment magnitude.' 'Mutually exclusive with the slip parameter.')
optional=True, help='maximum slip of the rectangular source [m]' 'Setting the moment magnitude the tractions/stress field' 'will be normalized to accomodate the desired maximum slip.' 'Mutually exclusive with the magnitude parameter.')
optional=True, help='rake angle in [deg], ' 'measured counter-clockwise from right-horizontal ' 'in on-plane view. Rake is translated into homogenous tractions ' 'in strike and up-dip direction. Rake is mutually exclusive ' 'with tractions parameter.')
OkadaSource.T(), optional=True, help='List of all boundary elements/sub faults/fault patches')
optional=True, help='Traction field the rupture plane is exposed to.')
optional=True, help='Coefficient matrix linking traction and dislocation field', dtype=num.float, shape=(None,))
optional=True, default=1, help='Sub-source eikonal factor, a smaller eikonal factor will' ' increase the accuracy of rupture front calculation but' ' increases also the computation time.')
optional=True, default=1, help='Sub-source decimation factor, a larger decimation will' ' make the result inaccurate but shorten the necessary' ' computation time (use for testing puposes only).')
optional=True, default=1, help='Number of threads for Okada forward modelling ' 'and matrix inversion.' 'Note: for small/medium matrices 1 thread is most efficient!')
optional=True, default=False, help='Calculate only shear tractions, and omit tensile tractions.')
default=True, help='Smooth the tractions by weighting partially ruptured' ' fault patches.')
default=False, help='Aggressive oversampling for basesource discretization. ' 'When using \'multilinear\' interpolation oversampling has' ' practically no effect.')
mom = kwargs.pop('moment') if 'magnitude' not in kwargs: kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom))
def nucleation_x(self):
def nucleation_x(self, nucleation_x): nucleation_x, num.ndarray) and nucleation_x is not None:
def nucleation_y(self):
def nucleation_y(self, nucleation_y): and nucleation_y is not None:
def nucleation(self): nucl_x, nucl_y = self.nucleation_x, self.nucleation_y
if (nucl_x is None) or (nucl_y is None): return None
assert nucl_x.shape[0] == nucl_y.shape[0]
return num.concatenate( (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1)
def nucleation(self, nucleation): if isinstance(nucleation, list): nucleation = num.array([*nucleation])
assert nucleation.shape[1] == 2
self.nucleation_x = nucleation[:, 0] self.nucleation_y = nucleation[:, 1]
def nucleation_time(self):
def nucleation_time(self, nucleation_time): and nucleation_time is not None: nucleation_time = num.array([nucleation_time])
else:
self.magnitude, self.strike, self.dip, self.rake, self.length, self.width, float(self.nucleation_x.mean()), float(self.nucleation_y.mean()), self.decimation_factor, self.anchor, self.pure_shear)
raise AttributeError( 'tractions and rake are mutually exclusive')
raise DerivedMagnitudeError( 'definition of slip and magnitude is mutually exclusive')
if None in (store, target): raise DerivedMagnitudeError( 'magnitude for a rectangular source with slip defined ' 'can only be derived when earth model and target ' 'interpolation method are available')
amplitudes = self._discretize(store, target)[2] return float(pmt.moment_to_magnitude(num.sum(amplitudes)))
else:
points = outline_rect_source(self.strike, self.dip, self.length, self.width, self.anchor)
points[:, 0] += self.north_shift points[:, 1] += self.east_shift points[:, 2] += self.depth if cs == 'xyz': return points elif cs == 'xy': return points[:, :2] elif cs in ('latlon', 'lonlat', 'latlondepth'): latlon = ne_to_latlon( self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T if cs == 'latlon': return latlon elif cs == 'lonlat': return latlon[:, ::-1] else: return num.concatenate( (latlon, points[:, 2].reshape((len(points), 1))), axis=1)
self.strike, self.dip, self.length, self.width, self.anchor, **kwargs)
elif cs == 'xy': return points[:, :2] elif cs in ('latlon', 'lonlat', 'latlondepth'): latlon = ne_to_latlon( self.lat, self.lon, points[:, 0], points[:, 1])
latlon = num.array(latlon).T if cs == 'latlon': return latlon elif cs == 'lonlat': return latlon[:, ::-1] else: return num.concatenate( (latlon, points[:, 2].reshape((len(points), 1))), axis=1)
# TODO: Now this should be slip, then it depends on the store. # TODO: default to tractions is store is not given?
strike=self.strike, dip=self.dip, rake=rake, scalar_moment=self.get_moment(store, target))
self, store, target, **kwargs)
def from_pyrocko_event(cls, ev, **kwargs): strike=float(strike), dip=float(dip), rake=float(rake), magnitude=float(mt.moment_magnitude()))
''' Discretize source plane with equal vertical and horizontal spacing
Arguments and keyword arguments needed for ``points_on_source``.
:param store: Greens function database (needs to cover whole region of of the source) :type store: :py:class:`pyrocko.gf.store.Store`
:return: Number of points in strike and dip direction, distance between adjacent points, coordinates (latlondepth) and coordinates (xy on fault) for discrete points :rtype: int, int, float, :py:class:`numpy.ndarray`, :py:class:`numpy.ndarray` '''
pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0))
self.lat, self.lon, points, interpolation='nearest_neighbor')
num.min(store.config.deltat * vs_min / 2.), num.min(store.config.deltas)])
else:
points_x=points_xy[:, 0], points_y=points_xy[:, 1], **kwargs)
points=None): ''' Get rupture velocity for discrete points on source plane
:param store: Greens function database (needs to cover whole region of of the source) :type store: optional, :py:class:`pyrocko.gf.store.Store` :param target: Target information :type target: optional, :py:class:`pyrocko.gf.target.Target` :param points: xy coordinates on fault (-1.:1.) of discrete points :type points: optional, :py:class:`numpy.ndarray`, ``(n_points, 2)``
:return: Rupture velocity assumed as vs * gamma for discrete points :rtype: :py:class:`numpy.ndarray`, ``(points.shape[0], 1)`` '''
_, _, _, points, _ = self._discretize_points(store, cs='xyz')
self.lat, self.lon, points=points, interpolation=interpolation) * self.gamma
self, store, interpolation='nearest_neighbor', vr=None, times=None, *args, **kwargs): ''' Get rupture start time for discrete points on source plane
:param store: Greens function database (needs to cover whole region of of the source) :type store: :py:class:`pyrocko.gf.store.Store` :param target: Target information :type target: optional, :py:class:`pyrocko.gf.target.Target` :param vr: Array, containing rupture user defined rupture velocity values :type vr: optional, :py:class:`numpy.ndarray` :param times: Array, containing zeros, where rupture is starting and otherwise -1, where time will be calculated. If not given, rupture starts at nucleation_x, nucleation_y. Times are given for discrete points with equal horizontal and vertical spacing :type times: optional, :py:class:`numpy.ndarray`
:return: Coordinates (latlondepth), Coordinates (xy), rupture velocity rupture propagation time of discrete points :rtype: :py:class:`numpy.ndarray`, ``(points.shape[0], 1)``, :py:class:`numpy.ndarray`, ``(points.shape[0], 1)``, :py:class:`numpy.ndarray`, ``(n_points_dip, n_points_strike)``, :py:class:`numpy.ndarray`, ``(n_points_dip, n_points_strike)`` ''' store, cs='xyz')
.reshape(nx, ny)
logger.warn( 'Given rupture velocities are not in right shape. Therefore' ' standard rupture velocity array is used.')
raise ValueError( 'nucleation coordinates have different shape.')
num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) for x, y in zip(nucl_x, nucl_y)])
else: if self.nucleation_time.shape == nucl_x.shape: nucl_times = self.nucleation_time else: raise ValueError( 'Nucleation coordinates and times have different ' 'shapes')
elif times.shape != tuple((nx, ny)): times = initialize_times() logger.warn( 'Given times are not in right shape. Therefore standard time' ' array is used.')
speeds=vr, times=times, delta=delta)
self, store, interpolation='multilinear', *args, **kwargs):
raise TypeError( 'Interpolation method %s not available' % interpolation)
store=store, *args, **kwargs)
raise ValueError( 'length must be larger then 0. not %g' % self.length)
raise ValueError( 'width must be larger then 0. not %g' % self.width)
nx, ny, times, vr, RegularGridInterpolator( (points_xy[::ny, 0], points_xy[:ny, 1]), times, method=interp_map[interpolation]), RegularGridInterpolator( (points_xy[::ny, 0], points_xy[:ny, 1]), vr, method=interp_map[interpolation]))
self, store=None, interpolation='nearest_neighbor', grid_shape=(), *args, **kwargs): ''' Get rupture start time and OkadaSource elements for discrete centre points on source plane and stores them in the patches-attribute
Arguments and keyword arguments needed for self.discretize_time.
:param store: Greens function database (needs to cover whole region of of the source) :type store: :py:class:`pyrocko.gf.store.Store` :param interpolation: Kind of interpolation used. Choice between 'multilinear' and 'nearest_neighbor' :type interpolation: optional, str :param grid_shape: Desired sub fault patch grid size (nlength, nwidth). Either factor or grid_shape should be set. :type grid_shape: optional, tuple of int ''' self.get_vr_time_interpolators(store, *args, **kwargs)
* store.config.get_rho(*a, **kw) - 2. * shear_mod
self.lat, self.lon, num.array([[self.north_shift, self.east_shift, self.depth]]), interpolation=interpolation)
lat=self.lat, lon=self.lon, strike=self.strike, dip=self.dip, north_shift=self.north_shift, east_shift=self.east_shift, depth=self.depth, al1=al1, al2=al2, aw1=aw1, aw2=aw2, poisson=poisson[0], shearmod=shear_mod[0], opening=kwargs.get('opening', 0.))
if grid_shape: self.nx, self.ny = grid_shape else: self.nx = nx self.ny = ny
self.lat, self.lon, num.array([src.source_patch()[:3] for src in source_disc]), interpolation=interpolation)
else: times_interp = times.T.ravel() vr_interp = vr.T.ravel()
''' Prepare source for synthetic waveform calculation
:param store: Greens function database (needs to cover whole region of of the source) :type store: :py:class:`pyrocko.gf.store.Store`
:returns: Source discretized by a set of moment tensors and times :rtype: :py:class:`pyrocko.gf.meta.DiscretizedMTSource` ''' else:
(p.length_pos, p.width_pos) for p in self.patches]).reshape(self.nx, self.ny, 2)
# boundary condition is zero-slip delta_slip.reshape(self.nx, self.ny, ntimes, 3)
(coords_x, coords_y, num.concatenate(([0.], slip_times))), slip_grid)
# discretize basesources (self.length / self.nx, self.width / self.ny, *store.config.deltas)))
num.ceil(pln / mindeltagf)) + 1 num.ceil(pwd / mindeltagf)) + 1
num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2
if False: try: import matplotlib.pyplot as plt coords = base_coords.copy() norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) plt.scatter(coords[:, 0], coords[:, 1], c=norm) plt.show() except AttributeError: pass
pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0))
[[lamb * du_n, 0., -mu * du_s], [0., lamb * du_n, 0.], [-mu * du_s, 0., (lamb + 2. * mu) * du_n]])
patch2m6( self.strike * d2r, self.dip * d2r, rake=slip_rake[im], du_s=slip_shear[im], du_n=slip_norm[im]) for im in range(nbasesrcs)])
lat=self.lat, lon=self.lon, times=base_times + self.time, north_shifts=base_interp[:, 0], east_shifts=base_interp[:, 1], depths=base_interp[:, 2], m6s=m6s, dl=dl, dw=dw, nl=self.nx, nw=self.ny)
''' Calculate linear coefficient relating tractions and dislocations on the fault patches ''' raise ValueError( 'Source patches are needed. Please calculate them first.')
self.patches, nthreads=self.nthreads)
''' Get array of patch attributes
:param attr: Attribute of fault patches which shall be listed for all patches in an array (possible attributes: check :py:class`pyrocko.modelling.okada.OkadaSource`) :type attr: str
:returns: Array of attribute values per fault patch :rtype: :py:class`numpy.ndarray`
''' raise ValueError( 'Source patches are needed. Please calculate them first.')
''' Get slip per subfault patch for given time after rupture start
:param time: time after origin [s], for which slip is computed :type time: float > 0.
:return: inverted displacements (u_strike, u_dip , u_tensile) for each source patch. order: [ patch1 u_Strike, patch1 u_Dip, patch1 u_Tensile, patch2 u_Strike, ...] :rtype: :py:class:`numpy.ndarray`, ``(n_sources * 3, 1)`` '''
raise ValueError( 'Please discretize the source first (discretize_patches())')
raise AttributeError( 'The traction vector is of invalid shape.' ' Required shape is (npatches, 3)')
self.get_vr_time_interpolators(store)
# Getting the native Eikonal grid, bit hackish
points_x >= ul[0]) & (points_x <= lr[0])) points_y >= ul[1]) & (points_y <= lr[1]))
(times_patch <= time).sum() / times_patch.size
return disloc_est
stress_field=tractions[relevant_sources, :].ravel(), coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], pure_shear=self.pure_shear, nthreads=self.nthreads, **kwargs)
disloc_est_max = num.max(num.linalg.norm( DislocationInverter.get_disloc_lsq( stress_field=tractions.ravel(), coef_mat=self.coef_mat, pure_shear=self.pure_shear, nthreads=self.nthreads, **kwargs), axis=1))
if disloc_est_max != 0.: disloc_est *= self.slip / disloc_est_max
''' Get slip change inverted from OkadaSources depending on store deltat
The time intervall, within which the slip changes are computed is determined by the sampling rate of the Greens function database. Arguments and keyword arguments needed for self.get_okada_slip.
:param store: Greens function database (needs to cover whole region of of the source). Its delta t [s] is used as time increment for slip difference calculation. Either dt or store should be given. :type store: optional, :py:class:`pyrocko.gf.store.Store` :param dt: time increment for slip difference calculation [s]. Either dt or store should be given. :type dt: optional, float
:return: displacement changes(du_strike, du_dip , du_tensile) for each source patch and time. order: [ patch1 du_Strike t1, patch1 du_Dip t1, patch1 du_Tensile t1, patch2 du_Strike t1, ...], [ patch1 du_Strike t2, patch1 du_Dip t2, patch1 du_Tensile t2, patch2 du_Strike t2, ...]; corner times, for which delta slip is computed :rtype: :py:class:`numpy.ndarray`, ``(n_sources, n_times, 3)`` :py:class:`numpy.ndarray`, ``(n_times, 1)`` ''' raise AttributeError( 'Argument collision. ' 'Please define only the store or the dt argument.')
raise AttributeError('Please give a GF store or set dt.')
time=t, **kwargs)
norm = num.linalg.norm(disloc_est, axis=2).max() disloc_est *= self.slip / norm
return disloc_est, calc_times
# if we have only one timestep there is no gradient
''' Get scalar seismic moment rate for each boundary element individually
Arguments and keyword arguments needed for self.get_okada_slip.
:return: seismic moment rate for each source patch and time. order: [ patch1 moment_rate t1, patch2 moment_rate t1, ...], [ patch1 moment_rate t2, patch2 moment_rate t2, ...]; corner times, for which moment rate is computed based on slip rate :rtype: :py:class:`numpy.ndarray`, ``(n_sources, n_times)`` :py:class:`numpy.ndarray`, ``(n_times, 1)`` '''
moment = pmt.magnitude_to_moment(self.magnitude) cum_mom = mom_rate.sum() * dt
if cum_mom != 0.: mom_rate *= moment / cum_mom
deltat = store.config.deltat
mom_rate, mom_times = self.get_moment_rate(store) # TODO complete or delete
''' Get seismic source moment rate for the total source
Arguments and keyword arguments needed for self.get_okada_slip.
:return: seismic moment rate for each time. order: [ moment_rate t1, moment_rate t2, ...]; corner times, for which moment rate is computed based on slip rate :rtype: :py:class:`numpy.ndarray`, ``(n_times, 1)`` :py:class:`numpy.ndarray`, ``(n_times, 1)`` '''
moment_rate, moment_times = self.get_moment_rate(*args, **kwargs) return num.sum(moment_rate, axis=0), moment_times
''' Two double-couple point sources separated in space and time. Moment share between the sub-sources is controlled by the parameter mix. The position of the subsources is dependent on the moment distribution between the two sources. Depth, east and north shift are given for the centroid between the two double-couples. The subsources will positioned according to their moment shares around this centroid position. This is done according to their delta parameters, which are therefore in relation to that centroid. Note that depth of the subsources therefore can be depth+/-delta_depth. For shallow earthquakes therefore the depth has to be chosen deeper to avoid sampling above surface. '''
default=0.0, help='strike direction in [deg], measured clockwise from north')
default=90.0, help='dip angle in [deg], measured downward from horizontal')
default=0.0, help='azimuth to second double-couple [deg], ' 'measured at first, clockwise from north')
default=0.0, help='rake angle in [deg], ' 'measured counter-clockwise from right-horizontal ' 'in on-plane view')
default=0.0, help='strike direction in [deg], measured clockwise from north')
default=90.0, help='dip angle in [deg], measured downward from horizontal')
default=0.0, help='rake angle in [deg], ' 'measured counter-clockwise from right-horizontal ' 'in on-plane view')
default=0.0, help='separation of double-couples in time (t2-t1) [s]')
default=0.0, help='difference in depth (z2-z1) [m]')
default=0.0, help='distance between the two double-couples [m]')
default=0.5, help='how to distribute the moment to the two doublecouples ' 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1')
optional=True, help='Source time function of subsource 1 ' '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
optional=True, help='Source time function of subsource 2 ' '(if given, overrides STF from attribute :py:gattr:`Source.stf`)')
return ( self.time, self.depth, self.lat, self.north_shift, self.lon, self.east_shift, type(self).__name__) + \ self.effective_stf1_pre().base_key() + \ self.effective_stf2_pre().base_key() + ( self.strike1, self.dip1, self.rake1, self.strike2, self.dip2, self.rake2, self.delta_time, self.delta_depth, self.azimuth, self.distance, self.mix)
return self.moment
return g_unit_pulse
a1 = 1.0 - self.mix a2 = self.mix delta_north = math.cos(self.azimuth * d2r) * self.distance delta_east = math.sin(self.azimuth * d2r) * self.distance
dc1 = DCSource( lat=self.lat, lon=self.lon, time=self.time - self.delta_time * a1, north_shift=self.north_shift - delta_north * a1, east_shift=self.east_shift - delta_east * a1, depth=self.depth - self.delta_depth * a1, moment=self.moment * a1, strike=self.strike1, dip=self.dip1, rake=self.rake1, stf=self.stf1 or self.stf)
dc2 = DCSource( lat=self.lat, lon=self.lon, time=self.time + self.delta_time * a2, north_shift=self.north_shift + delta_north * a2, east_shift=self.east_shift + delta_east * a2, depth=self.depth + self.delta_depth * a2, moment=self.moment * a2, strike=self.strike2, dip=self.dip2, rake=self.rake2, stf=self.stf2 or self.stf)
return [dc1, dc2]
rake=self.rake1, scalar_moment=a1) rake=self.rake2, scalar_moment=a2)
store.config.deltat, self.time - self.delta_time * a1)
store.config.deltat, self.time + self.delta_time * a2)
lat=self.lat, lon=self.lon, times=num.concatenate((times1, times2)), north_shifts=num.concatenate(( num.repeat(self.north_shift - delta_north * a1, nt1), num.repeat(self.north_shift + delta_north * a2, nt2))), east_shifts=num.concatenate(( num.repeat(self.east_shift - delta_east * a1, nt1), num.repeat(self.east_shift + delta_east * a2, nt2))), depths=num.concatenate(( num.repeat(self.depth - self.delta_depth * a1, nt1), num.repeat(self.depth + self.delta_depth * a2, nt2))), m6s=num.vstack(( mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis])))
rake=self.rake1, scalar_moment=a1 * self.moment) rake=self.rake2, scalar_moment=a2 * self.moment)
self, store, target, moment_tensor=self.pyrocko_moment_tensor(store, target), **kwargs)
def from_pyrocko_event(cls, ev, **kwargs): strike1=float(strike), dip1=float(dip), rake1=float(rake), strike2=float(strike), dip2=float(dip), rake2=float(rake), mix=0.0, magnitude=float(mt.moment_magnitude()))
''' A ring fault with vertical doublecouples. '''
default=1.0, help='diameter of the ring in [m]')
default=1.0, help='inside of the ring moves up (+1) or down (-1)')
default=0.0, help='strike direction of the ring plane, clockwise from north,' ' in [deg]')
default=0.0, help='dip angle of the ring plane from horizontal in [deg]')
default=360, help='number of point sources to use')
return Source.base_key(self) + ( self.strike, self.dip, self.diameter, self.npointsources)
return self.sign * self.moment
self.dip * d2r, self.strike * d2r, 0.0))
scalar_moment=1.0 / n).m())
[[num.cos(phi), num.sin(phi), num.zeros(n)], [-num.sin(phi), num.cos(phi), num.zeros(n)], [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1))
ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T
store.config.deltat, self.time)
times=num.tile(times, n), lat=self.lat, lon=self.lon, north_shifts=num.repeat(points[:, 0], nt), east_shifts=num.repeat(points[:, 1], nt), depths=num.repeat(points[:, 2], nt), m6s=num.repeat(m6s, nt, axis=0) * num.tile( amplitudes, n)[:, num.newaxis])
'''Composite source model.'''
raise BadRequest( 'need at least one sub-source to create a CombiSource object.')
[subsource.lat for subsource in subsources], dtype=num.float) [subsource.lon for subsource in subsources], dtype=num.float)
subsources = [s.clone() for s in subsources] for subsource in subsources[1:]: subsource.set_origin(lat, lon)
time=time, lat=float(lat), lon=float(lon), north_shift=north_shift, east_shift=east_shift, depth=depth)
return 1.0
''' A single force point source. '''
default=0., help='northward component of single force [N]')
default=0., help='eastward component of single force [N]')
default=0., help='downward component of single force [N]')
return Source.base_key(self) + (self.fn, self.fe, self.fd)
return 1.0
store.config.deltat, self.time) **self._dparams_base_repeated(times))
self, store, target, **kwargs)
def from_pyrocko_event(cls, ev, **kwargs):
''' Excess pore pressure point source.
For poro-elastic initial value problem where an excess pore pressure is brought into a small source volume. '''
default=1.0, help='initial excess pore pressure in [Pa]')
return Source.base_key(self)
return self.pp
**self._dparams_base())
''' Excess pore pressure line source.
The line source is centered at (north_shift, east_shift, depth). '''
default=1.0, help='initial excess pore pressure in [Pa]')
default=0.0, help='length of the line source [m]')
default=0.0, help='azimuth direction, clockwise from north [deg]')
default=90., help='dip direction, downward from horizontal [deg]')
return Source.base_key(self) + (self.azimuth, self.dip, self.length)
return self.pp
times=util.num_full(n, self.time), lat=self.lat, lon=self.lon, north_shifts=points[:, 0], east_shifts=points[:, 1], depths=points[:, 2], pp=num.ones(n) / n)
''' Synthetic seismogram computation request.
::
Request(**kwargs) Request(sources, targets, **kwargs) '''
Source.T(), help='list of sources for which to produce synthetics.')
Target.T(), help='list of targets for which to produce synthetics.')
def args2kwargs(cls, args): raise BadRequest('invalid arguments')
else:
def targets_dynamic(self):
def targets_static(self):
def has_dynamic(self):
def has_statics(self):
''' Resonse object to a synthetic seismogram computation request. '''
''' Return a list of requested :class:`~pyrocko.trace.Trace` instances. '''
continue
''' Return a list of requested :class:`~kite.scenes` instances. '''
''' Return a list of requested :class:`~pyrocko.gf.meta.StaticResult` instances. ''' continue
''' Generator function to iterate over results of request.
Yields associated :py:class:`Source`, :class:`~pyrocko.gf.targets.Target`, :class:`~pyrocko.trace.Trace` instances in each iteration. '''
elif get == 'results': yield source, target, result
''' Open *snuffler* with requested traces. '''
trace.snuffle(self.pyrocko_traces(), **kwargs)
''' Base class for synthetic seismogram calculators. '''
''' Get list of available GF store IDs '''
return []
else:
data = num.diff(data, n=self.differentiate)
n, e = self.components sa, ca, _, _ = target.get_sin_cos_factors()
comps = [] if nonzero(ca): comps.append(n)
if nonzero(sa): comps.append(e)
return tuple(comps)
n, e = self.components sa, ca, _, _ = target.get_sin_cos_factors()
if nonzero(ca): data = base_seismogram[n].data * ca else: data = 0.0
if nonzero(sa): data = data + base_seismogram[e].data * sa
return data
return (self.c, )
return base_seismogram[self.c].data.copy()
(los_fac[:, 0] * -base_statics['displacement.d'] + los_fac[:, 1] * base_statics['displacement.e'] + los_fac[:, 2] * base_statics['displacement.n'])
'displacement': [VectorRule('displacement')], 'rotation': [VectorRule('rotation')], 'velocity': [ VectorRule('velocity'), VectorRule('displacement', differentiate=1)], 'acceleration': [ VectorRule('acceleration'), VectorRule('velocity', differentiate=1), VectorRule('displacement', differentiate=2)], 'pore_pressure': [ScalarRule('pore_pressure')], 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 'darcy_velocity': [VectorRule('darcy_velocity')], }
'displacement': [StaticDisplacement()] }
source, store_targets, components, dsource_cache, nthreads)
e = SeismosizerError( 'Seismosizer failed with return code %i\n%s' % ( tr.err, str( OutOfBoundsContext( source=source, target=store_targets[iseis], distance=source.distance_to( store_targets[iseis]), components=components)))) raise e
seismogram, source, target) except SeismosizerError as e: result = e
dsource_cache = {}
for w in work: _, _, isources, itargets = w
sources = [psources[isource] for isource in isources] targets = [ptargets[itarget] for itarget in itargets]
components = set() for target in targets: rule = engine.get_rule(sources[0], target) components.update(rule.required_components(target))
for isource, source in zip(isources, sources): for itarget, target in zip(itargets, targets):
try: base_seismogram, tcounters = engine.base_seismogram( source, target, components, dsource_cache, nthreads) except meta.OutOfBounds as e: e.context = OutOfBoundsContext( source=sources[0], target=targets[0], distance=sources[0].distance_to(targets[0]), components=components) raise
n_records_stacked = 0 t_optimize = 0.0 t_stack = 0.0
for _, tr in base_seismogram.items(): n_records_stacked += tr.n_records_stacked t_optimize += tr.t_optimize t_stack += tr.t_stack
try: result = engine._post_process_dynamic( base_seismogram, source, target) result.n_records_stacked = n_records_stacked result.n_shared_stacking = len(sources) *\ len(targets) result.t_optimize = t_optimize result.t_stack = t_stack except SeismosizerError as e: result = e
tcounters.append(xtime()) yield (isource, itarget, result), tcounters
.required_components(target)
source, target, components, nthreads) except meta.OutOfBounds as e: e.context = OutOfBoundsContext( source=sources[0], target=targets[0], distance=float('nan'), components=components) raise base_statics, source, target)
''' Offline synthetic seismogram calculator.
:param use_env: if ``True``, fill :py:attr:`store_superdirs` and :py:attr:`store_dirs` with paths set in environment variables GF_STORE_SUPERDIRS and GF_STORE_DIRS. :param use_config: if ``True``, fill :py:attr:`store_superdirs` and :py:attr:`store_dirs` with paths set in the user's config file.
The config file can be found at :file:`~/.pyrocko/config.pf`
.. code-block :: python
gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] gf_store_superdirs: ['/home/pyrocko/gf_stores/'] '''
String.T(), help='directories which are searched for Green\'s function stores')
String.T(), help='additional individual Green\'s function store directories')
optional=True, help='default store ID to be used when a request does not provide ' 'one')
self.store_superdirs.extend(env_store_superdirs.split(':'))
self.store_dirs.extend(env_store_dirs.split(':'))
raise TypeError("{} of {} is not of type list".format( sdir, self.__class__.__name__))
all(os.path.isfile(pjoin(store_dir, x)) for x in ('index', 'traces', 'config'))
logger.warning('store_superdir not available: %s' % d) continue
else: raise DuplicateStoreId( 'GF store ID %s is used in (at least) two ' 'different stores. Locations are: %s and %s' % (store_id, self._id_to_store_dir[store_id], store_dir))
''' Lookup directory given a GF store ID. '''
raise NoSuchStore(store_id, self.iter_store_dirs())
''' Get list of available store IDs. '''
else: raise NoDefaultStoreSet() else: self._effective_default_store_id = self.default_store_id
''' Get a store from the engine.
:param store_id: identifier of the store (optional) :returns: :py:class:`~pyrocko.gf.store.Store` object
If no ``store_id`` is provided the store associated with the :py:gattr:`default_store_id` is returned. Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is undefined. '''
store = self.get_store(store_id) return store.config
''' Close and remove ids from cashed stores. ''' store_ids = [] for store_id, store_ in self._open_stores.items(): store_.close() store_ids.append(store_id)
for store_id in store_ids: self._open_stores.pop(store_id)
except KeyError: pass
raise BadRequest( 'no rule to calculate "%s" with GFs from store "%s" ' 'for source model "%s"' % ( target.effective_quantity(), target.store_id, source.__class__.__name__))
nthreads=0):
logging.warning('Targets have different interpolation schemes!' ' Choosing %s for all targets.' % target.interpolation)
(t.tmin for t in targets), dtype=num.float, count=len(targets)) (t.tmax for t in targets), dtype=num.float, count=len(targets))
source, store_, dsource_cache, target)
deltat = 1. / target.sample_rate else:
base_source, receivers, components, deltat=deltat, itmin=itmin, nsamples=nsamples, interpolation=target.interpolation, optimization=target.optimization, nthreads=nthreads)
nthreads):
tcounters = [xtime()]
store_ = self.get_store(target.store_id) receiver = target.receiver(store_)
if target.tmin and target.tmax is not None: rate = store_.config.sample_rate itmin = int(num.floor(target.tmin * rate)) itmax = int(num.ceil(target.tmax * rate)) nsamples = itmax - itmin + 1 else: itmin = None nsamples = None
tcounters.append(xtime()) base_source = self._cached_discretize_basesource( source, store_, dsource_cache, target)
tcounters.append(xtime())
if target.sample_rate is not None: deltat = 1. / target.sample_rate else: deltat = None
base_seismogram = store_.seismogram( base_source, receiver, components, deltat=deltat, itmin=itmin, nsamples=nsamples, interpolation=target.interpolation, optimization=target.optimization, nthreads=nthreads)
tcounters.append(xtime())
base_seismogram = store.make_same_span(base_seismogram)
tcounters.append(xtime())
return base_seismogram, tcounters
else:
base_source, target, itsnapshot, components, target.interpolation, nthreads)
deltat, 0.0)
# repeat end point to prevent boundary effects
codes=target.codes, data=data[:-amplitudes.size], deltat=deltat, tmin=tmin)
''' Process a request.
::
process(**kwargs) process(request, **kwargs) process(sources, targets, **kwargs)
The request can be given a a :py:class:`Request` object, or such an object is created using ``Request(**kwargs)`` for convenience.
:returns: :py:class:`Response` object '''
raise BadRequest('invalid arguments')
kwargs['request'] = args[0]
# make sure stores are open before fork()
enumerate(request.sources)) enumerate(request.targets))
# Processing dynamic targets through # parimap(process_subrequest_dynamic)
else: _process_dynamic = process_dynamic
(i, nsub, [source_index[source] for source in m[k][0]], [target_index[target] for target in m[k][1] if not isinstance(target, StaticTarget)]) for (i, k) in enumerate(skeys)]
work_dynamic, request.sources, request.targets, self, nthreads):
status_callback(isub, nsub)
# Processing static targets through process_static (i, nsub, [source_index[source] for source in m[k][0]], [target_index[target] for target in m[k][1] if isinstance(target, StaticTarget)]) for (i, k) in enumerate(skeys)]
work_static, request.sources, request.targets, self, nthreads=nthreads):
status_callback(isub, nsub)
status_callback(nsub, nsub)
s.t_perc_discretize_source, s.t_perc_make_base_seismogram, s.t_perc_make_same_span, s.t_perc_post_process) = perc_dyn else:
s.t_perc_static_discretize_basesource, s.t_perc_static_sum_statics, s.t_perc_static_post_process) = perc_static
(rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) (rs1.ru_inblock + rc1.ru_inblock) - (rs0.ru_inblock + rc0.ru_inblock))
request=request, results_list=results_list, stats=s)
''' Client for remote synthetic seismogram calculator. '''
if request is None: request = Request(**kwargs)
return ws.seismosizer(url=self.url, site=self.site, request=request)
global g_engine
if d not in g_engine.store_superdirs: g_engine.store_superdirs.append(d)
return num.fromiter((getattr(s, k) for s in self), dtype=num.float)
raise NotImplementedError( 'this method should be implemented in subclass')
raise NotImplementedError( 'this method should be implemented in subclass')
self.sources.append(s)
return iter(self.sources)
return len(self.sources)
raise Exception('Invalid parameter "%s" for source type "%s"' % (ks[0], self.base.__class__.__name__))
for param in self.ordered_params()]
Source, SourceWithMagnitude, SourceWithDerivedMagnitude, ExplosionSource, RectangularExplosionSource, DCSource, CLVDSource, VLVDSource, MTSource, RectangularSource, PseudoDynamicRupture, DoubleDCSource, RingfaultSource, CombiSource, SFSource, PorePressurePointSource, PorePressureLineSource, ]
STF, BoxcarSTF, TriangularSTF, HalfSinusoidSTF, ResonatorSTF, ]
SeismosizerError BadRequest NoSuchStore DerivedMagnitudeError STFMode '''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' Request ProcessingStats Response Engine LocalEngine RemoteEngine source_classes get_engine Range SourceGroup SourceList SourceGrid '''.split() |