1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28from collections import defaultdict 

29from functools import cmp_to_key 

30import time 

31import math 

32import os 

33import re 

34import logging 

35try: 

36 import resource 

37except ImportError: 

38 resource = None 

39from hashlib import sha1 

40 

41import numpy as num 

42from scipy.interpolate import RegularGridInterpolator 

43 

44from pyrocko.guts import (Object, Float, String, StringChoice, List, 

45 Timestamp, Int, SObject, ArgumentError, Dict, 

46 ValidationError, Bool) 

47from pyrocko.guts_array import Array 

48 

49from pyrocko import moment_tensor as pmt 

50from pyrocko import trace, util, config, model, eikonal_ext 

51from pyrocko.orthodrome import ne_to_latlon 

52from pyrocko.model import Location 

53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

54 okada_ext, invert_fault_dislocations_bem 

55 

56from . import meta, store, ws 

57from .tractions import TractionField, DirectedTractions 

58from .targets import Target, StaticTarget, SatelliteTarget 

59 

60pjoin = os.path.join 

61 

62guts_prefix = 'pf' 

63 

64d2r = math.pi / 180. 

65r2d = 180. / math.pi 

66km = 1e3 

67 

68logger = logging.getLogger('pyrocko.gf.seismosizer') 

69 

70 

71def cmp_none_aware(a, b): 

72 if isinstance(a, tuple) and isinstance(b, tuple): 

73 for xa, xb in zip(a, b): 

74 rv = cmp_none_aware(xa, xb) 

75 if rv != 0: 

76 return rv 

77 

78 return 0 

79 

80 anone = a is None 

81 bnone = b is None 

82 

83 if anone and bnone: 

84 return 0 

85 

86 if anone: 

87 return -1 

88 

89 if bnone: 

90 return 1 

91 

92 return bool(a > b) - bool(a < b) 

93 

94 

95def xtime(): 

96 return time.time() 

97 

98 

99class SeismosizerError(Exception): 

100 pass 

101 

102 

103class BadRequest(SeismosizerError): 

104 pass 

105 

106 

107class DuplicateStoreId(Exception): 

108 pass 

109 

110 

111class NoDefaultStoreSet(Exception): 

112 pass 

113 

114 

115class ConversionError(Exception): 

116 pass 

117 

118 

119class NoSuchStore(BadRequest): 

120 

121 def __init__(self, store_id=None, dirs=None): 

122 BadRequest.__init__(self) 

123 self.store_id = store_id 

124 self.dirs = dirs 

125 

126 def __str__(self): 

127 if self.store_id is not None: 

128 rstr = 'no GF store with id "%s" found.' % self.store_id 

129 else: 

130 rstr = 'GF store not found.' 

131 

132 if self.dirs is not None: 

133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

134 return rstr 

135 

136 

137def ufloat(s): 

138 units = { 

139 'k': 1e3, 

140 'M': 1e6, 

141 } 

142 

143 factor = 1.0 

144 if s and s[-1] in units: 

145 factor = units[s[-1]] 

146 s = s[:-1] 

147 if not s: 

148 raise ValueError("unit without a number: '%s'" % s) 

149 

150 return float(s) * factor 

151 

152 

153def ufloat_or_none(s): 

154 if s: 

155 return ufloat(s) 

156 else: 

157 return None 

158 

159 

160def int_or_none(s): 

161 if s: 

162 return int(s) 

163 else: 

164 return None 

165 

166 

167def nonzero(x, eps=1e-15): 

168 return abs(x) > eps 

169 

170 

171def permudef(ln, j=0): 

172 if j < len(ln): 

173 k, v = ln[j] 

174 for y in v: 

175 ln[j] = k, y 

176 for s in permudef(ln, j + 1): 

177 yield s 

178 

179 ln[j] = k, v 

180 return 

181 else: 

182 yield ln 

183 

184 

185def arr(x): 

186 return num.atleast_1d(num.asarray(x)) 

187 

188 

189def discretize_rect_source(deltas, deltat, time, north, east, depth, 

190 strike, dip, length, width, 

191 anchor, velocity=None, stf=None, 

192 nucleation_x=None, nucleation_y=None, 

193 decimation_factor=1, pointsonly=False, 

194 plane_coords=False, 

195 aggressive_oversampling=False): 

196 

197 if stf is None: 

198 stf = STF() 

199 

200 if not velocity and not pointsonly: 

201 raise AttributeError('velocity is required in time mode') 

202 

203 mindeltagf = float(num.min(deltas)) 

204 if velocity: 

205 mindeltagf = min(mindeltagf, deltat * velocity) 

206 

207 ln = length 

208 wd = width 

209 

210 if aggressive_oversampling: 

211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

213 else: 

214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 

217 n = int(nl * nw) 

218 

219 dl = ln / nl 

220 dw = wd / nw 

221 

222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

224 

225 points = num.zeros((n, 3)) 

226 points[:, 0] = num.tile(xl, nw) 

227 points[:, 1] = num.repeat(xw, nl) 

228 

229 if nucleation_x is not None: 

230 dist_x = num.abs(nucleation_x - points[:, 0]) 

231 else: 

232 dist_x = num.zeros(n) 

233 

234 if nucleation_y is not None: 

235 dist_y = num.abs(nucleation_y - points[:, 1]) 

236 else: 

237 dist_y = num.zeros(n) 

238 

239 dist = num.sqrt(dist_x**2 + dist_y**2) 

240 times = dist / velocity 

241 

242 anch_x, anch_y = map_anchor[anchor] 

243 

244 points[:, 0] -= anch_x * 0.5 * length 

245 points[:, 1] -= anch_y * 0.5 * width 

246 

247 if plane_coords: 

248 return points, dl, dw, nl, nw 

249 

250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

251 points = num.dot(rotmat.T, points.T).T 

252 

253 points[:, 0] += north 

254 points[:, 1] += east 

255 points[:, 2] += depth 

256 

257 if pointsonly: 

258 return points, dl, dw, nl, nw 

259 

260 xtau, amplitudes = stf.discretize_t(deltat, time) 

261 nt = xtau.size 

262 

263 points2 = num.repeat(points, nt, axis=0) 

264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

265 amplitudes2 = num.tile(amplitudes, n) 

266 

267 return points2, times2, amplitudes2, dl, dw, nl, nw 

268 

269 

270def check_rect_source_discretisation(points2, nl, nw, store): 

271 # We assume a non-rotated fault plane 

272 N_CRITICAL = 8 

273 points = points2.T.reshape((3, nl, nw)) 

274 if points.size <= N_CRITICAL: 

275 logger.warning('RectangularSource is defined by only %d sub-sources!' 

276 % points.size) 

277 return True 

278 

279 distances = num.sqrt( 

280 (points[0, 0, :] - points[0, 1, :])**2 + 

281 (points[1, 0, :] - points[1, 1, :])**2 + 

282 (points[2, 0, :] - points[2, 1, :])**2) 

283 

284 depths = points[2, 0, :] 

285 vs_profile = store.config.get_vs( 

286 lat=0., lon=0., 

287 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

288 interpolation='multilinear') 

289 

290 min_wavelength = vs_profile * (store.config.deltat * 2) 

291 if not num.all(min_wavelength > distances / 2): 

292 return False 

293 return True 

294 

295 

296def outline_rect_source(strike, dip, length, width, anchor): 

297 ln = length 

298 wd = width 

299 points = num.array( 

300 [[-0.5 * ln, -0.5 * wd, 0.], 

301 [0.5 * ln, -0.5 * wd, 0.], 

302 [0.5 * ln, 0.5 * wd, 0.], 

303 [-0.5 * ln, 0.5 * wd, 0.], 

304 [-0.5 * ln, -0.5 * wd, 0.]]) 

305 

306 anch_x, anch_y = map_anchor[anchor] 

307 points[:, 0] -= anch_x * 0.5 * length 

308 points[:, 1] -= anch_y * 0.5 * width 

309 

310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

311 

312 return num.dot(rotmat.T, points.T).T 

313 

314 

315def from_plane_coords( 

316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

317 lat=0., lon=0., 

318 north_shift=0, east_shift=0, 

319 anchor='top', cs='xy'): 

320 

321 ln = length 

322 wd = width 

323 x_abs = [] 

324 y_abs = [] 

325 if not isinstance(x_plane_coords, list): 

326 x_plane_coords = [x_plane_coords] 

327 y_plane_coords = [y_plane_coords] 

328 

329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

330 points = num.array( 

331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

336 

337 anch_x, anch_y = map_anchor[anchor] 

338 points[:, 0] -= anch_x * 0.5 * length 

339 points[:, 1] -= anch_y * 0.5 * width 

340 

341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

342 

343 points = num.dot(rotmat.T, points.T).T 

344 points[:, 0] += north_shift 

345 points[:, 1] += east_shift 

346 points[:, 2] += depth 

347 if cs in ('latlon', 'lonlat'): 

348 latlon = ne_to_latlon(lat, lon, 

349 points[:, 0], points[:, 1]) 

350 latlon = num.array(latlon).T 

351 x_abs.append(latlon[1:2, 1]) 

352 y_abs.append(latlon[2:3, 0]) 

353 if cs == 'xy': 

354 x_abs.append(points[1:2, 1]) 

355 y_abs.append(points[2:3, 0]) 

356 

357 if cs == 'lonlat': 

358 return y_abs, x_abs 

359 else: 

360 return x_abs, y_abs 

361 

362 

363def points_on_rect_source( 

364 strike, dip, length, width, anchor, 

365 discretized_basesource=None, points_x=None, points_y=None): 

366 

367 ln = length 

368 wd = width 

369 

370 if isinstance(points_x, list) or isinstance(points_x, float): 

371 points_x = num.array([points_x]) 

372 if isinstance(points_y, list) or isinstance(points_y, float): 

373 points_y = num.array([points_y]) 

374 

375 if discretized_basesource: 

376 ds = discretized_basesource 

377 

378 nl_patches = ds.nl + 1 

379 nw_patches = ds.nw + 1 

380 

381 npoints = nl_patches * nw_patches 

382 points = num.zeros((npoints, 3)) 

383 ln_patches = num.array([il for il in range(nl_patches)]) 

384 wd_patches = num.array([iw for iw in range(nw_patches)]) 

385 

386 points_ln =\ 

387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

388 points_wd =\ 

389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

390 

391 for il in range(nl_patches): 

392 for iw in range(nw_patches): 

393 points[il * nw_patches + iw, :] = num.array([ 

394 points_ln[il] * ln * 0.5, 

395 points_wd[iw] * wd * 0.5, 0.0]) 

396 

397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

398 points = num.zeros(shape=((len(points_x), 3))) 

399 for i, (x, y) in enumerate(zip(points_x, points_y)): 

400 points[i, :] = num.array( 

401 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

402 

403 anch_x, anch_y = map_anchor[anchor] 

404 

405 points[:, 0] -= anch_x * 0.5 * ln 

406 points[:, 1] -= anch_y * 0.5 * wd 

407 

408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

409 

410 return num.dot(rotmat.T, points.T).T 

411 

412 

413class InvalidGridDef(Exception): 

414 pass 

415 

416 

417class Range(SObject): 

418 ''' 

419 Convenient range specification. 

420 

421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

422 

423 Range('0 .. 10k : 1k') 

424 Range(start=0., stop=10e3, step=1e3) 

425 Range(0, 10e3, 1e3) 

426 Range('0 .. 10k @ 11') 

427 Range(start=0., stop=10*km, n=11) 

428 

429 Range(0, 10e3, n=11) 

430 Range(values=[x*1e3 for x in range(11)]) 

431 

432 Depending on the use context, it can be possible to omit any part of the 

433 specification. E.g. in the context of extracting a subset of an already 

434 existing range, the existing range's specification values would be filled 

435 in where missing. 

436 

437 The values are distributed with equal spacing, unless the ``spacing`` 

438 argument is modified. The values can be created offset or relative to an 

439 external base value with the ``relative`` argument if the use context 

440 supports this. 

441 

442 The range specification can be expressed with a short string 

443 representation:: 

444 

445 'start .. stop @ num | spacing, relative' 

446 'start .. stop : step | spacing, relative' 

447 

448 most parts of the expression can be omitted if not needed. Whitespace is 

449 allowed for readability but can also be omitted. 

450 ''' 

451 

452 start = Float.T(optional=True) 

453 stop = Float.T(optional=True) 

454 step = Float.T(optional=True) 

455 n = Int.T(optional=True) 

456 values = Array.T(optional=True, dtype=float, shape=(None,)) 

457 

458 spacing = StringChoice.T( 

459 choices=['lin', 'log', 'symlog'], 

460 default='lin', 

461 optional=True) 

462 

463 relative = StringChoice.T( 

464 choices=['', 'add', 'mult'], 

465 default='', 

466 optional=True) 

467 

468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

470 r'(\|(?P<stuff>.+))?$') 

471 

472 def __init__(self, *args, **kwargs): 

473 d = {} 

474 if len(args) == 1: 

475 d = self.parse(args[0]) 

476 elif len(args) in (2, 3): 

477 d['start'], d['stop'] = [float(x) for x in args[:2]] 

478 if len(args) == 3: 

479 d['step'] = float(args[2]) 

480 

481 for k, v in kwargs.items(): 

482 if k in d: 

483 raise ArgumentError('%s specified more than once' % k) 

484 

485 d[k] = v 

486 

487 SObject.__init__(self, **d) 

488 

489 def __str__(self): 

490 def sfloat(x): 

491 if x is not None: 

492 return '%g' % x 

493 else: 

494 return '' 

495 

496 if self.values: 

497 return ','.join('%g' % x for x in self.values) 

498 

499 if self.start is None and self.stop is None: 

500 s0 = '' 

501 else: 

502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

503 

504 s1 = '' 

505 if self.step is not None: 

506 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

507 elif self.n is not None: 

508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

509 

510 if self.spacing == 'lin' and self.relative == '': 

511 s2 = '' 

512 else: 

513 x = [] 

514 if self.spacing != 'lin': 

515 x.append(self.spacing) 

516 

517 if self.relative != '': 

518 x.append(self.relative) 

519 

520 s2 = ' | %s' % ','.join(x) 

521 

522 return s0 + s1 + s2 

523 

524 @classmethod 

525 def parse(cls, s): 

526 s = re.sub(r'\s+', '', s) 

527 m = cls.pattern.match(s) 

528 if not m: 

529 try: 

530 vals = [ufloat(x) for x in s.split(',')] 

531 except Exception: 

532 raise InvalidGridDef( 

533 '"%s" is not a valid range specification' % s) 

534 

535 return dict(values=num.array(vals, dtype=float)) 

536 

537 d = m.groupdict() 

538 try: 

539 start = ufloat_or_none(d['start']) 

540 stop = ufloat_or_none(d['stop']) 

541 step = ufloat_or_none(d['step']) 

542 n = int_or_none(d['n']) 

543 except Exception: 

544 raise InvalidGridDef( 

545 '"%s" is not a valid range specification' % s) 

546 

547 spacing = 'lin' 

548 relative = '' 

549 

550 if d['stuff'] is not None: 

551 t = d['stuff'].split(',') 

552 for x in t: 

553 if x in cls.spacing.choices: 

554 spacing = x 

555 elif x and x in cls.relative.choices: 

556 relative = x 

557 else: 

558 raise InvalidGridDef( 

559 '"%s" is not a valid range specification' % s) 

560 

561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

562 relative=relative) 

563 

564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

565 if self.values: 

566 return self.values 

567 

568 start = self.start 

569 stop = self.stop 

570 step = self.step 

571 n = self.n 

572 

573 swap = step is not None and step < 0. 

574 if start is None: 

575 start = [mi, ma][swap] 

576 if stop is None: 

577 stop = [ma, mi][swap] 

578 if step is None and inc is not None: 

579 step = [inc, -inc][ma < mi] 

580 

581 if start is None or stop is None: 

582 raise InvalidGridDef( 

583 'Cannot use range specification "%s" without start ' 

584 'and stop in this context' % self) 

585 

586 if step is None and n is None: 

587 step = stop - start 

588 

589 if n is None: 

590 if (step < 0) != (stop - start < 0): 

591 raise InvalidGridDef( 

592 'Range specification "%s" has inconsistent ordering ' 

593 '(step < 0 => stop > start)' % self) 

594 

595 n = int(round((stop - start) / step)) + 1 

596 stop2 = start + (n - 1) * step 

597 if abs(stop - stop2) > eps: 

598 n = int(math.floor((stop - start) / step)) + 1 

599 stop = start + (n - 1) * step 

600 else: 

601 stop = stop2 

602 

603 if start == stop: 

604 n = 1 

605 

606 if self.spacing == 'lin': 

607 vals = num.linspace(start, stop, n) 

608 

609 elif self.spacing in ('log', 'symlog'): 

610 if start > 0. and stop > 0.: 

611 vals = num.exp(num.linspace(num.log(start), 

612 num.log(stop), n)) 

613 elif start < 0. and stop < 0.: 

614 vals = -num.exp(num.linspace(num.log(-start), 

615 num.log(-stop), n)) 

616 else: 

617 raise InvalidGridDef( 

618 'Log ranges should not include or cross zero ' 

619 '(in range specification "%s").' % self) 

620 

621 if self.spacing == 'symlog': 

622 nvals = - vals 

623 vals = num.concatenate((nvals[::-1], vals)) 

624 

625 if self.relative in ('add', 'mult') and base is None: 

626 raise InvalidGridDef( 

627 'Cannot use relative range specification in this context.') 

628 

629 vals = self.make_relative(base, vals) 

630 

631 return list(map(float, vals)) 

632 

633 def make_relative(self, base, vals): 

634 if self.relative == 'add': 

635 vals += base 

636 

637 if self.relative == 'mult': 

638 vals *= base 

639 

640 return vals 

641 

642 

643class GridDefElement(Object): 

644 

645 param = meta.StringID.T() 

646 rs = Range.T() 

647 

648 def __init__(self, shorthand=None, **kwargs): 

649 if shorthand is not None: 

650 t = shorthand.split('=') 

651 if len(t) != 2: 

652 raise InvalidGridDef( 

653 'Invalid grid specification element: %s' % shorthand) 

654 

655 sp, sr = t[0].strip(), t[1].strip() 

656 

657 kwargs['param'] = sp 

658 kwargs['rs'] = Range(sr) 

659 

660 Object.__init__(self, **kwargs) 

661 

662 def shorthand(self): 

663 return self.param + ' = ' + str(self.rs) 

664 

665 

666class GridDef(Object): 

667 

668 elements = List.T(GridDefElement.T()) 

669 

670 def __init__(self, shorthand=None, **kwargs): 

671 if shorthand is not None: 

672 t = shorthand.splitlines() 

673 tt = [] 

674 for x in t: 

675 x = x.strip() 

676 if x: 

677 tt.extend(x.split(';')) 

678 

679 elements = [] 

680 for se in tt: 

681 elements.append(GridDef(se)) 

682 

683 kwargs['elements'] = elements 

684 

685 Object.__init__(self, **kwargs) 

686 

687 def shorthand(self): 

688 return '; '.join(str(x) for x in self.elements) 

689 

690 

691class Cloneable(object): 

692 

693 def __iter__(self): 

694 return iter(self.T.propnames) 

695 

696 def __getitem__(self, k): 

697 if k not in self.keys(): 

698 raise KeyError(k) 

699 

700 return getattr(self, k) 

701 

702 def __setitem__(self, k, v): 

703 if k not in self.keys(): 

704 raise KeyError(k) 

705 

706 return setattr(self, k, v) 

707 

708 def clone(self, **kwargs): 

709 ''' 

710 Make a copy of the object. 

711 

712 A new object of the same class is created and initialized with the 

713 parameters of the object on which this method is called on. If 

714 ``kwargs`` are given, these are used to override any of the 

715 initialization parameters. 

716 ''' 

717 

718 d = dict(self) 

719 for k in d: 

720 v = d[k] 

721 if isinstance(v, Cloneable): 

722 d[k] = v.clone() 

723 

724 d.update(kwargs) 

725 return self.__class__(**d) 

726 

727 @classmethod 

728 def keys(cls): 

729 ''' 

730 Get list of the source model's parameter names. 

731 ''' 

732 

733 return cls.T.propnames 

734 

735 

736class STF(Object, Cloneable): 

737 

738 ''' 

739 Base class for source time functions. 

740 ''' 

741 

742 def __init__(self, effective_duration=None, **kwargs): 

743 if effective_duration is not None: 

744 kwargs['duration'] = effective_duration / \ 

745 self.factor_duration_to_effective() 

746 

747 Object.__init__(self, **kwargs) 

748 

749 @classmethod 

750 def factor_duration_to_effective(cls): 

751 return 1.0 

752 

753 def centroid_time(self, tref): 

754 return tref 

755 

756 @property 

757 def effective_duration(self): 

758 return self.duration * self.factor_duration_to_effective() 

759 

760 def discretize_t(self, deltat, tref): 

761 tl = math.floor(tref / deltat) * deltat 

762 th = math.ceil(tref / deltat) * deltat 

763 if tl == th: 

764 return num.array([tl], dtype=float), num.ones(1) 

765 else: 

766 return ( 

767 num.array([tl, th], dtype=float), 

768 num.array([th - tref, tref - tl], dtype=float) / deltat) 

769 

770 def base_key(self): 

771 return (type(self).__name__,) 

772 

773 

774g_unit_pulse = STF() 

775 

776 

777def sshift(times, amplitudes, tshift, deltat): 

778 

779 t0 = math.floor(tshift / deltat) * deltat 

780 t1 = math.ceil(tshift / deltat) * deltat 

781 if t0 == t1: 

782 return times, amplitudes 

783 

784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

785 

786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

788 

789 times2 = num.arange(times.size + 1, dtype=float) * \ 

790 deltat + times[0] + t0 

791 

792 return times2, amplitudes2 

793 

794 

795class BoxcarSTF(STF): 

796 

797 ''' 

798 Boxcar type source time function. 

799 

800 .. figure :: /static/stf-BoxcarSTF.svg 

801 :width: 40% 

802 :align: center 

803 :alt: boxcar source time function 

804 ''' 

805 

806 duration = Float.T( 

807 default=0.0, 

808 help='duration of the boxcar') 

809 

810 anchor = Float.T( 

811 default=0.0, 

812 help='anchor point with respect to source.time: (' 

813 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

816 

817 @classmethod 

818 def factor_duration_to_effective(cls): 

819 return 1.0 

820 

821 def centroid_time(self, tref): 

822 return tref - 0.5 * self.duration * self.anchor 

823 

824 def discretize_t(self, deltat, tref): 

825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

827 tmin = round(tmin_stf / deltat) * deltat 

828 tmax = round(tmax_stf / deltat) * deltat 

829 nt = int(round((tmax - tmin) / deltat)) + 1 

830 times = num.linspace(tmin, tmax, nt) 

831 amplitudes = num.ones_like(times) 

832 if times.size > 1: 

833 t_edges = num.linspace( 

834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

835 t = tmin_stf + self.duration * num.array( 

836 [0.0, 0.0, 1.0, 1.0], dtype=float) 

837 f = num.array([0., 1., 1., 0.], dtype=float) 

838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

839 amplitudes /= num.sum(amplitudes) 

840 

841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

842 

843 return sshift(times, amplitudes, -tshift, deltat) 

844 

845 def base_key(self): 

846 return (type(self).__name__, self.duration, self.anchor) 

847 

848 

849class TriangularSTF(STF): 

850 

851 ''' 

852 Triangular type source time function. 

853 

854 .. figure :: /static/stf-TriangularSTF.svg 

855 :width: 40% 

856 :align: center 

857 :alt: triangular source time function 

858 ''' 

859 

860 duration = Float.T( 

861 default=0.0, 

862 help='baseline of the triangle') 

863 

864 peak_ratio = Float.T( 

865 default=0.5, 

866 help='fraction of time compared to duration, ' 

867 'when the maximum amplitude is reached') 

868 

869 anchor = Float.T( 

870 default=0.0, 

871 help='anchor point with respect to source.time: (' 

872 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

875 

876 @classmethod 

877 def factor_duration_to_effective(cls, peak_ratio=None): 

878 if peak_ratio is None: 

879 peak_ratio = cls.peak_ratio.default() 

880 

881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

882 

883 def __init__(self, effective_duration=None, **kwargs): 

884 if effective_duration is not None: 

885 kwargs['duration'] = effective_duration / \ 

886 self.factor_duration_to_effective( 

887 kwargs.get('peak_ratio', None)) 

888 

889 STF.__init__(self, **kwargs) 

890 

891 @property 

892 def centroid_ratio(self): 

893 ra = self.peak_ratio 

894 rb = 1.0 - ra 

895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

896 

897 def centroid_time(self, tref): 

898 ca = self.centroid_ratio 

899 cb = 1.0 - ca 

900 if self.anchor <= 0.: 

901 return tref - ca * self.duration * self.anchor 

902 else: 

903 return tref - cb * self.duration * self.anchor 

904 

905 @property 

906 def effective_duration(self): 

907 return self.duration * self.factor_duration_to_effective( 

908 self.peak_ratio) 

909 

910 def tminmax_stf(self, tref): 

911 ca = self.centroid_ratio 

912 cb = 1.0 - ca 

913 if self.anchor <= 0.: 

914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

915 tmax_stf = tmin_stf + self.duration 

916 else: 

917 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

918 tmin_stf = tmax_stf - self.duration 

919 

920 return tmin_stf, tmax_stf 

921 

922 def discretize_t(self, deltat, tref): 

923 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

924 

925 tmin = round(tmin_stf / deltat) * deltat 

926 tmax = round(tmax_stf / deltat) * deltat 

927 nt = int(round((tmax - tmin) / deltat)) + 1 

928 if nt > 1: 

929 t_edges = num.linspace( 

930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

931 t = tmin_stf + self.duration * num.array( 

932 [0.0, self.peak_ratio, 1.0], dtype=float) 

933 f = num.array([0., 1., 0.], dtype=float) 

934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

935 amplitudes /= num.sum(amplitudes) 

936 else: 

937 amplitudes = num.ones(1) 

938 

939 times = num.linspace(tmin, tmax, nt) 

940 return times, amplitudes 

941 

942 def base_key(self): 

943 return ( 

944 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

945 

946 

947class HalfSinusoidSTF(STF): 

948 

949 ''' 

950 Half sinusoid type source time function. 

951 

952 .. figure :: /static/stf-HalfSinusoidSTF.svg 

953 :width: 40% 

954 :align: center 

955 :alt: half-sinusouid source time function 

956 ''' 

957 

958 duration = Float.T( 

959 default=0.0, 

960 help='duration of the half-sinusoid (baseline)') 

961 

962 anchor = Float.T( 

963 default=0.0, 

964 help='anchor point with respect to source.time: (' 

965 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

968 

969 exponent = Int.T( 

970 default=1, 

971 help='set to 2 to use square of the half-period sinusoidal function.') 

972 

973 def __init__(self, effective_duration=None, **kwargs): 

974 if effective_duration is not None: 

975 kwargs['duration'] = effective_duration / \ 

976 self.factor_duration_to_effective( 

977 kwargs.get('exponent', 1)) 

978 

979 STF.__init__(self, **kwargs) 

980 

981 @classmethod 

982 def factor_duration_to_effective(cls, exponent): 

983 if exponent == 1: 

984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

985 elif exponent == 2: 

986 return math.sqrt(math.pi**2 - 6) / math.pi 

987 else: 

988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

989 

990 @property 

991 def effective_duration(self): 

992 return self.duration * self.factor_duration_to_effective(self.exponent) 

993 

994 def centroid_time(self, tref): 

995 return tref - 0.5 * self.duration * self.anchor 

996 

997 def discretize_t(self, deltat, tref): 

998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1000 tmin = round(tmin_stf / deltat) * deltat 

1001 tmax = round(tmax_stf / deltat) * deltat 

1002 nt = int(round((tmax - tmin) / deltat)) + 1 

1003 if nt > 1: 

1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1006 

1007 if self.exponent == 1: 

1008 fint = -num.cos( 

1009 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1010 

1011 elif self.exponent == 2: 

1012 fint = (t_edges - tmin_stf) / self.duration \ 

1013 - 1.0 / (2.0 * math.pi) * num.sin( 

1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1015 else: 

1016 raise ValueError( 

1017 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1018 

1019 amplitudes = fint[1:] - fint[:-1] 

1020 amplitudes /= num.sum(amplitudes) 

1021 else: 

1022 amplitudes = num.ones(1) 

1023 

1024 times = num.linspace(tmin, tmax, nt) 

1025 return times, amplitudes 

1026 

1027 def base_key(self): 

1028 return (type(self).__name__, self.duration, self.anchor) 

1029 

1030 

1031class SmoothRampSTF(STF): 

1032 ''' 

1033 Smooth-ramp type source time function for near-field displacement. 

1034 Based on moment function of double-couple point source proposed by Bruestle 

1035 and Mueller (PEPI, 1983). 

1036 

1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1038 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1039 312-324. 

1040 

1041 .. figure :: /static/stf-SmoothRampSTF.svg 

1042 :width: 40% 

1043 :alt: smooth ramp source time function 

1044 ''' 

1045 duration = Float.T( 

1046 default=0.0, 

1047 help='duration of the ramp (baseline)') 

1048 

1049 rise_ratio = Float.T( 

1050 default=0.5, 

1051 help='fraction of time compared to duration, ' 

1052 'when the maximum amplitude is reached') 

1053 

1054 anchor = Float.T( 

1055 default=0.0, 

1056 help='anchor point with respect to source.time: (' 

1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1060 

1061 def discretize_t(self, deltat, tref): 

1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1064 tmin = round(tmin_stf / deltat) * deltat 

1065 tmax = round(tmax_stf / deltat) * deltat 

1066 D = round((tmax - tmin) / deltat) * deltat 

1067 nt = int(round(D / deltat)) + 1 

1068 times = num.linspace(tmin, tmax, nt) 

1069 if nt > 1: 

1070 rise_time = self.rise_ratio * self.duration 

1071 amplitudes = num.ones_like(times) 

1072 tp = tmin + rise_time 

1073 ii = num.where(times <= tp) 

1074 t_inc = times[ii] 

1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1078 

1079 amplitudes /= num.sum(amplitudes) 

1080 else: 

1081 amplitudes = num.ones(1) 

1082 

1083 return times, amplitudes 

1084 

1085 def base_key(self): 

1086 return (type(self).__name__, 

1087 self.duration, self.rise_ratio, self.anchor) 

1088 

1089 

1090class ResonatorSTF(STF): 

1091 ''' 

1092 Simple resonator like source time function. 

1093 

1094 .. math :: 

1095 

1096 f(t) = 0 for t < 0 

1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1098 

1099 

1100 .. figure :: /static/stf-SmoothRampSTF.svg 

1101 :width: 40% 

1102 :alt: smooth ramp source time function 

1103 

1104 ''' 

1105 

1106 duration = Float.T( 

1107 default=0.0, 

1108 help='decay time') 

1109 

1110 frequency = Float.T( 

1111 default=1.0, 

1112 help='resonance frequency') 

1113 

1114 def discretize_t(self, deltat, tref): 

1115 tmin_stf = tref 

1116 tmax_stf = tref + self.duration * 3 

1117 tmin = math.floor(tmin_stf / deltat) * deltat 

1118 tmax = math.ceil(tmax_stf / deltat) * deltat 

1119 times = util.arange2(tmin, tmax, deltat) 

1120 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1122 

1123 return times, amplitudes 

1124 

1125 def base_key(self): 

1126 return (type(self).__name__, 

1127 self.duration, self.frequency) 

1128 

1129 

1130class STFMode(StringChoice): 

1131 choices = ['pre', 'post'] 

1132 

1133 

1134class Source(Location, Cloneable): 

1135 ''' 

1136 Base class for all source models. 

1137 ''' 

1138 

1139 name = String.T(optional=True, default='') 

1140 

1141 time = Timestamp.T( 

1142 default=Timestamp.D('1970-01-01 00:00:00'), 

1143 help='source origin time.') 

1144 

1145 stf = STF.T( 

1146 optional=True, 

1147 help='source time function.') 

1148 

1149 stf_mode = STFMode.T( 

1150 default='post', 

1151 help='whether to apply source time function in pre or ' 

1152 'post-processing.') 

1153 

1154 def __init__(self, **kwargs): 

1155 Location.__init__(self, **kwargs) 

1156 

1157 def update(self, **kwargs): 

1158 ''' 

1159 Change some of the source models parameters. 

1160 

1161 Example:: 

1162 

1163 >>> from pyrocko import gf 

1164 >>> s = gf.DCSource() 

1165 >>> s.update(strike=66., dip=33.) 

1166 >>> print(s) 

1167 --- !pf.DCSource 

1168 depth: 0.0 

1169 time: 1970-01-01 00:00:00 

1170 magnitude: 6.0 

1171 strike: 66.0 

1172 dip: 33.0 

1173 rake: 0.0 

1174 

1175 ''' 

1176 

1177 for (k, v) in kwargs.items(): 

1178 self[k] = v 

1179 

1180 def grid(self, **variables): 

1181 ''' 

1182 Create grid of source model variations. 

1183 

1184 :returns: :py:class:`SourceGrid` instance. 

1185 

1186 Example:: 

1187 

1188 >>> from pyrocko import gf 

1189 >>> base = DCSource() 

1190 >>> R = gf.Range 

1191 >>> for s in base.grid(R(' 

1192 

1193 ''' 

1194 return SourceGrid(base=self, variables=variables) 

1195 

1196 def base_key(self): 

1197 ''' 

1198 Get key to decide about source discretization / GF stack sharing. 

1199 

1200 When two source models differ only in amplitude and origin time, the 

1201 discretization and the GF stacking can be done only once for a unit 

1202 amplitude and a zero origin time and the amplitude and origin times of 

1203 the seismograms can be applied during post-processing of the synthetic 

1204 seismogram. 

1205 

1206 For any derived parameterized source model, this method is called to 

1207 decide if discretization and stacking of the source should be shared. 

1208 When two source models return an equal vector of values discretization 

1209 is shared. 

1210 ''' 

1211 return (self.depth, self.lat, self.north_shift, 

1212 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1213 self.effective_stf_pre().base_key() 

1214 

1215 def get_factor(self): 

1216 ''' 

1217 Get the scaling factor to be applied during post-processing. 

1218 

1219 Discretization of the base seismogram is usually done for a unit 

1220 amplitude, because a common factor can be efficiently multiplied to 

1221 final seismograms. This eliminates to do repeat the stacking when 

1222 creating seismograms for a series of source models only differing in 

1223 amplitude. 

1224 

1225 This method should return the scaling factor to apply in the 

1226 post-processing (often this is simply the scalar moment of the source). 

1227 ''' 

1228 

1229 return 1.0 

1230 

1231 def effective_stf_pre(self): 

1232 ''' 

1233 Return the STF applied before stacking of the Green's functions. 

1234 

1235 This STF is used during discretization of the parameterized source 

1236 models, i.e. to produce a temporal distribution of point sources. 

1237 

1238 Handling of the STF before stacking of the GFs is less efficient but 

1239 allows to use different source time functions for different parts of 

1240 the source. 

1241 ''' 

1242 

1243 if self.stf is not None and self.stf_mode == 'pre': 

1244 return self.stf 

1245 else: 

1246 return g_unit_pulse 

1247 

1248 def effective_stf_post(self): 

1249 ''' 

1250 Return the STF applied after stacking of the Green's fuctions. 

1251 

1252 This STF is used in the post-processing of the synthetic seismograms. 

1253 

1254 Handling of the STF after stacking of the GFs is usually more efficient 

1255 but is only possible when a common STF is used for all subsources. 

1256 ''' 

1257 

1258 if self.stf is not None and self.stf_mode == 'post': 

1259 return self.stf 

1260 else: 

1261 return g_unit_pulse 

1262 

1263 def _dparams_base(self): 

1264 return dict(times=arr(self.time), 

1265 lat=self.lat, lon=self.lon, 

1266 north_shifts=arr(self.north_shift), 

1267 east_shifts=arr(self.east_shift), 

1268 depths=arr(self.depth)) 

1269 

1270 def _hash(self): 

1271 sha = sha1() 

1272 for k in self.base_key(): 

1273 sha.update(str(k).encode()) 

1274 return sha.hexdigest() 

1275 

1276 def _dparams_base_repeated(self, times): 

1277 if times is None: 

1278 return self._dparams_base() 

1279 

1280 nt = times.size 

1281 north_shifts = num.repeat(self.north_shift, nt) 

1282 east_shifts = num.repeat(self.east_shift, nt) 

1283 depths = num.repeat(self.depth, nt) 

1284 return dict(times=times, 

1285 lat=self.lat, lon=self.lon, 

1286 north_shifts=north_shifts, 

1287 east_shifts=east_shifts, 

1288 depths=depths) 

1289 

1290 def pyrocko_event(self, store=None, target=None, **kwargs): 

1291 duration = None 

1292 if self.stf: 

1293 duration = self.stf.effective_duration 

1294 

1295 return model.Event( 

1296 lat=self.lat, 

1297 lon=self.lon, 

1298 north_shift=self.north_shift, 

1299 east_shift=self.east_shift, 

1300 time=self.time, 

1301 name=self.name, 

1302 depth=self.depth, 

1303 duration=duration, 

1304 **kwargs) 

1305 

1306 def geometry(self, **kwargs): 

1307 raise NotImplementedError 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 

1527 bulk_moduli = store.config.get_bulk_moduli( 

1528 self.lat, self.lon, 

1529 points=points, 

1530 interpolation=interpolation)[0] 

1531 except meta.OutOfBounds: 

1532 raise DerivedMagnitudeError( 

1533 'Could not get shear modulus at source position.') 

1534 

1535 return float(2. * shear_moduli + bulk_moduli) 

1536 

1537 def get_factor(self): 

1538 return 1.0 

1539 

1540 def discretize_basesource(self, store, target=None): 

1541 times, amplitudes = self.effective_stf_pre().discretize_t( 

1542 store.config.deltat, self.time) 

1543 

1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1545 

1546 if self.volume_change is not None: 

1547 if self.volume_change < 0.: 

1548 amplitudes *= -1 

1549 

1550 return meta.DiscretizedExplosionSource( 

1551 m0s=amplitudes, 

1552 **self._dparams_base_repeated(times)) 

1553 

1554 def pyrocko_moment_tensor(self, store=None, target=None): 

1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1557 

1558 

1559class RectangularExplosionSource(ExplosionSource): 

1560 ''' 

1561 Rectangular or line explosion source. 

1562 ''' 

1563 

1564 discretized_source_class = meta.DiscretizedExplosionSource 

1565 

1566 strike = Float.T( 

1567 default=0.0, 

1568 help='strike direction in [deg], measured clockwise from north') 

1569 

1570 dip = Float.T( 

1571 default=90.0, 

1572 help='dip angle in [deg], measured downward from horizontal') 

1573 

1574 length = Float.T( 

1575 default=0., 

1576 help='length of rectangular source area [m]') 

1577 

1578 width = Float.T( 

1579 default=0., 

1580 help='width of rectangular source area [m]') 

1581 

1582 anchor = StringChoice.T( 

1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1584 'bottom_left', 'bottom_right'], 

1585 default='center', 

1586 optional=True, 

1587 help='Anchor point for positioning the plane, can be: top, center or' 

1588 'bottom and also top_left, top_right,bottom_left,' 

1589 'bottom_right, center_left and center right') 

1590 

1591 nucleation_x = Float.T( 

1592 optional=True, 

1593 help='horizontal position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = left edge, +1 = right edge)') 

1595 

1596 nucleation_y = Float.T( 

1597 optional=True, 

1598 help='down-dip position of rupture nucleation in normalized fault ' 

1599 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1600 

1601 velocity = Float.T( 

1602 default=3500., 

1603 help='speed of explosion front [m/s]') 

1604 

1605 aggressive_oversampling = Bool.T( 

1606 default=False, 

1607 help='Aggressive oversampling for basesource discretization. ' 

1608 "When using 'multilinear' interpolation oversampling has" 

1609 ' practically no effect.') 

1610 

1611 def base_key(self): 

1612 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1613 self.width, self.nucleation_x, 

1614 self.nucleation_y, self.velocity, 

1615 self.anchor) 

1616 

1617 def discretize_basesource(self, store, target=None): 

1618 

1619 if self.nucleation_x is not None: 

1620 nucx = self.nucleation_x * 0.5 * self.length 

1621 else: 

1622 nucx = None 

1623 

1624 if self.nucleation_y is not None: 

1625 nucy = self.nucleation_y * 0.5 * self.width 

1626 else: 

1627 nucy = None 

1628 

1629 stf = self.effective_stf_pre() 

1630 

1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1632 store.config.deltas, store.config.deltat, 

1633 self.time, self.north_shift, self.east_shift, self.depth, 

1634 self.strike, self.dip, self.length, self.width, self.anchor, 

1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1636 

1637 amplitudes /= num.sum(amplitudes) 

1638 amplitudes *= self.get_moment(store, target) 

1639 

1640 return meta.DiscretizedExplosionSource( 

1641 lat=self.lat, 

1642 lon=self.lon, 

1643 times=times, 

1644 north_shifts=points[:, 0], 

1645 east_shifts=points[:, 1], 

1646 depths=points[:, 2], 

1647 m0s=amplitudes) 

1648 

1649 def outline(self, cs='xyz'): 

1650 points = outline_rect_source(self.strike, self.dip, self.length, 

1651 self.width, self.anchor) 

1652 

1653 points[:, 0] += self.north_shift 

1654 points[:, 1] += self.east_shift 

1655 points[:, 2] += self.depth 

1656 if cs == 'xyz': 

1657 return points 

1658 elif cs == 'xy': 

1659 return points[:, :2] 

1660 elif cs in ('latlon', 'lonlat'): 

1661 latlon = ne_to_latlon( 

1662 self.lat, self.lon, points[:, 0], points[:, 1]) 

1663 

1664 latlon = num.array(latlon).T 

1665 if cs == 'latlon': 

1666 return latlon 

1667 else: 

1668 return latlon[:, ::-1] 

1669 

1670 def get_nucleation_abs_coord(self, cs='xy'): 

1671 

1672 if self.nucleation_x is None: 

1673 return None, None 

1674 

1675 coords = from_plane_coords(self.strike, self.dip, self.length, 

1676 self.width, self.depth, self.nucleation_x, 

1677 self.nucleation_y, lat=self.lat, 

1678 lon=self.lon, north_shift=self.north_shift, 

1679 east_shift=self.east_shift, cs=cs) 

1680 return coords 

1681 

1682 

1683class DCSource(SourceWithMagnitude): 

1684 ''' 

1685 A double-couple point source. 

1686 ''' 

1687 

1688 strike = Float.T( 

1689 default=0.0, 

1690 help='strike direction in [deg], measured clockwise from north') 

1691 

1692 dip = Float.T( 

1693 default=90.0, 

1694 help='dip angle in [deg], measured downward from horizontal') 

1695 

1696 rake = Float.T( 

1697 default=0.0, 

1698 help='rake angle in [deg], ' 

1699 'measured counter-clockwise from right-horizontal ' 

1700 'in on-plane view') 

1701 

1702 discretized_source_class = meta.DiscretizedMTSource 

1703 

1704 def base_key(self): 

1705 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1706 

1707 def get_factor(self): 

1708 return float(pmt.magnitude_to_moment(self.magnitude)) 

1709 

1710 def discretize_basesource(self, store, target=None): 

1711 mot = pmt.MomentTensor( 

1712 strike=self.strike, dip=self.dip, rake=self.rake) 

1713 

1714 times, amplitudes = self.effective_stf_pre().discretize_t( 

1715 store.config.deltat, self.time) 

1716 return meta.DiscretizedMTSource( 

1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1718 **self._dparams_base_repeated(times)) 

1719 

1720 def pyrocko_moment_tensor(self, store=None, target=None): 

1721 return pmt.MomentTensor( 

1722 strike=self.strike, 

1723 dip=self.dip, 

1724 rake=self.rake, 

1725 scalar_moment=self.moment) 

1726 

1727 def pyrocko_event(self, store=None, target=None, **kwargs): 

1728 return SourceWithMagnitude.pyrocko_event( 

1729 self, store, target, 

1730 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1731 **kwargs) 

1732 

1733 @classmethod 

1734 def from_pyrocko_event(cls, ev, **kwargs): 

1735 d = {} 

1736 mt = ev.moment_tensor 

1737 if mt: 

1738 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1739 d.update( 

1740 strike=float(strike), 

1741 dip=float(dip), 

1742 rake=float(rake), 

1743 magnitude=float(mt.moment_magnitude())) 

1744 

1745 d.update(kwargs) 

1746 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1747 

1748 

1749class CLVDSource(SourceWithMagnitude): 

1750 ''' 

1751 A pure CLVD point source. 

1752 ''' 

1753 

1754 discretized_source_class = meta.DiscretizedMTSource 

1755 

1756 azimuth = Float.T( 

1757 default=0.0, 

1758 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1759 

1760 dip = Float.T( 

1761 default=90., 

1762 help='dip direction of largest dipole, downward from horizontal [deg]') 

1763 

1764 def base_key(self): 

1765 return Source.base_key(self) + (self.azimuth, self.dip) 

1766 

1767 def get_factor(self): 

1768 return float(pmt.magnitude_to_moment(self.magnitude)) 

1769 

1770 @property 

1771 def m6(self): 

1772 a = math.sqrt(4. / 3.) * self.get_factor() 

1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1774 rotmat1 = pmt.euler_to_matrix( 

1775 d2r * (self.dip - 90.), 

1776 d2r * (self.azimuth - 90.), 

1777 0.) 

1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1779 return pmt.to6(m) 

1780 

1781 @property 

1782 def m6_astuple(self): 

1783 return tuple(self.m6.tolist()) 

1784 

1785 def discretize_basesource(self, store, target=None): 

1786 factor = self.get_factor() 

1787 times, amplitudes = self.effective_stf_pre().discretize_t( 

1788 store.config.deltat, self.time) 

1789 return meta.DiscretizedMTSource( 

1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1791 **self._dparams_base_repeated(times)) 

1792 

1793 def pyrocko_moment_tensor(self, store=None, target=None): 

1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1795 

1796 def pyrocko_event(self, store=None, target=None, **kwargs): 

1797 mt = self.pyrocko_moment_tensor(store, target) 

1798 return Source.pyrocko_event( 

1799 self, store, target, 

1800 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1801 magnitude=float(mt.moment_magnitude()), 

1802 **kwargs) 

1803 

1804 

1805class VLVDSource(SourceWithDerivedMagnitude): 

1806 ''' 

1807 Volumetric linear vector dipole source. 

1808 

1809 This source is a parameterization for a restricted moment tensor point 

1810 source, useful to represent dyke or sill like inflation or deflation 

1811 sources. The restriction is such that the moment tensor is rotational 

1812 symmetric. It can be represented by a superposition of a linear vector 

1813 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1814 restricted moment tensor has 4 degrees of freedom: 2 independent 

1815 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1816 

1817 In this parameterization, the isotropic component is controlled by 

1818 ``volume_change``. To define the moment tensor, it must be converted to the 

1819 scalar moment of the the MT's isotropic component. For the conversion, the 

1820 shear modulus at the source's position must be known. This value is 

1821 extracted from the earth model defined in the GF store in use. 

1822 

1823 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1825 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1826 ''' 

1827 

1828 discretized_source_class = meta.DiscretizedMTSource 

1829 

1830 azimuth = Float.T( 

1831 default=0.0, 

1832 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1833 

1834 dip = Float.T( 

1835 default=90., 

1836 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1837 

1838 volume_change = Float.T( 

1839 default=0., 

1840 help='volume change of the inflation/deflation [m^3].') 

1841 

1842 clvd_moment = Float.T( 

1843 default=0., 

1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1845 'controls the sign of the CLVD (the sign of its largest ' 

1846 'eigenvalue).') 

1847 

1848 def get_moment_to_volume_change_ratio(self, store, target): 

1849 if store is None or target is None: 

1850 raise DerivedMagnitudeError( 

1851 'Need earth model to convert between volume change and ' 

1852 'magnitude.') 

1853 

1854 points = num.array( 

1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1856 

1857 try: 

1858 shear_moduli = store.config.get_shear_moduli( 

1859 self.lat, self.lon, 

1860 points=points, 

1861 interpolation=target.interpolation)[0] 

1862 

1863 bulk_moduli = store.config.get_bulk_moduli( 

1864 self.lat, self.lon, 

1865 points=points, 

1866 interpolation=target.interpolation)[0] 

1867 except meta.OutOfBounds: 

1868 raise DerivedMagnitudeError( 

1869 'Could not get shear modulus at source position.') 

1870 

1871 return float(2. * shear_moduli + bulk_moduli) 

1872 

1873 def base_key(self): 

1874 return Source.base_key(self) + \ 

1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1876 

1877 def get_magnitude(self, store=None, target=None): 

1878 mt = self.pyrocko_moment_tensor(store, target) 

1879 return float(pmt.moment_to_magnitude(mt.moment)) 

1880 

1881 def get_m6(self, store, target): 

1882 a = math.sqrt(4. / 3.) * self.clvd_moment 

1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1884 

1885 rotmat1 = pmt.euler_to_matrix( 

1886 d2r * (self.dip - 90.), 

1887 d2r * (self.azimuth - 90.), 

1888 0.) 

1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1890 

1891 m_iso = self.volume_change * \ 

1892 self.get_moment_to_volume_change_ratio(store, target) 

1893 

1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1895 0., 0.,) * math.sqrt(2. / 3) 

1896 

1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1898 return m 

1899 

1900 def get_moment(self, store=None, target=None): 

1901 return float(pmt.magnitude_to_moment( 

1902 self.get_magnitude(store, target))) 

1903 

1904 def get_m6_astuple(self, store, target): 

1905 m6 = self.get_m6(store, target) 

1906 return tuple(m6.tolist()) 

1907 

1908 def discretize_basesource(self, store, target=None): 

1909 times, amplitudes = self.effective_stf_pre().discretize_t( 

1910 store.config.deltat, self.time) 

1911 

1912 m6 = self.get_m6(store, target) 

1913 m6 *= amplitudes / self.get_factor() 

1914 

1915 return meta.DiscretizedMTSource( 

1916 m6s=m6[num.newaxis, :], 

1917 **self._dparams_base_repeated(times)) 

1918 

1919 def pyrocko_moment_tensor(self, store=None, target=None): 

1920 m6_astuple = self.get_m6_astuple(store, target) 

1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1922 

1923 

1924class MTSource(Source): 

1925 ''' 

1926 A moment tensor point source. 

1927 ''' 

1928 

1929 discretized_source_class = meta.DiscretizedMTSource 

1930 

1931 mnn = Float.T( 

1932 default=1., 

1933 help='north-north component of moment tensor in [Nm]') 

1934 

1935 mee = Float.T( 

1936 default=1., 

1937 help='east-east component of moment tensor in [Nm]') 

1938 

1939 mdd = Float.T( 

1940 default=1., 

1941 help='down-down component of moment tensor in [Nm]') 

1942 

1943 mne = Float.T( 

1944 default=0., 

1945 help='north-east component of moment tensor in [Nm]') 

1946 

1947 mnd = Float.T( 

1948 default=0., 

1949 help='north-down component of moment tensor in [Nm]') 

1950 

1951 med = Float.T( 

1952 default=0., 

1953 help='east-down component of moment tensor in [Nm]') 

1954 

1955 def __init__(self, **kwargs): 

1956 if 'm6' in kwargs: 

1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1958 kwargs.pop('m6')): 

1959 kwargs[k] = float(v) 

1960 

1961 Source.__init__(self, **kwargs) 

1962 

1963 @property 

1964 def m6(self): 

1965 return num.array(self.m6_astuple) 

1966 

1967 @property 

1968 def m6_astuple(self): 

1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1970 

1971 @m6.setter 

1972 def m6(self, value): 

1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1974 

1975 def base_key(self): 

1976 return Source.base_key(self) + self.m6_astuple 

1977 

1978 def discretize_basesource(self, store, target=None): 

1979 times, amplitudes = self.effective_stf_pre().discretize_t( 

1980 store.config.deltat, self.time) 

1981 return meta.DiscretizedMTSource( 

1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1983 **self._dparams_base_repeated(times)) 

1984 

1985 def get_magnitude(self, store=None, target=None): 

1986 m6 = self.m6 

1987 return pmt.moment_to_magnitude( 

1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1989 math.sqrt(2.)) 

1990 

1991 def pyrocko_moment_tensor(self, store=None, target=None): 

1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1993 

1994 def pyrocko_event(self, store=None, target=None, **kwargs): 

1995 mt = self.pyrocko_moment_tensor(store, target) 

1996 return Source.pyrocko_event( 

1997 self, store, target, 

1998 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1999 magnitude=float(mt.moment_magnitude()), 

2000 **kwargs) 

2001 

2002 @classmethod 

2003 def from_pyrocko_event(cls, ev, **kwargs): 

2004 d = {} 

2005 mt = ev.moment_tensor 

2006 if mt: 

2007 d.update(m6=tuple(map(float, mt.m6()))) 

2008 else: 

2009 if ev.magnitude is not None: 

2010 mom = pmt.magnitude_to_moment(ev.magnitude) 

2011 v = math.sqrt(2. / 3.) * mom 

2012 d.update(m6=(v, v, v, 0., 0., 0.)) 

2013 

2014 d.update(kwargs) 

2015 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2016 

2017 

2018map_anchor = { 

2019 'center': (0.0, 0.0), 

2020 'center_left': (-1.0, 0.0), 

2021 'center_right': (1.0, 0.0), 

2022 'top': (0.0, -1.0), 

2023 'top_left': (-1.0, -1.0), 

2024 'top_right': (1.0, -1.0), 

2025 'bottom': (0.0, 1.0), 

2026 'bottom_left': (-1.0, 1.0), 

2027 'bottom_right': (1.0, 1.0)} 

2028 

2029 

2030class RectangularSource(SourceWithDerivedMagnitude): 

2031 ''' 

2032 Classical Haskell source model modified for bilateral rupture. 

2033 ''' 

2034 

2035 discretized_source_class = meta.DiscretizedMTSource 

2036 

2037 magnitude = Float.T( 

2038 optional=True, 

2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2040 

2041 strike = Float.T( 

2042 default=0.0, 

2043 help='strike direction in [deg], measured clockwise from north') 

2044 

2045 dip = Float.T( 

2046 default=90.0, 

2047 help='dip angle in [deg], measured downward from horizontal') 

2048 

2049 rake = Float.T( 

2050 default=0.0, 

2051 help='rake angle in [deg], ' 

2052 'measured counter-clockwise from right-horizontal ' 

2053 'in on-plane view') 

2054 

2055 length = Float.T( 

2056 default=0., 

2057 help='length of rectangular source area [m]') 

2058 

2059 width = Float.T( 

2060 default=0., 

2061 help='width of rectangular source area [m]') 

2062 

2063 anchor = StringChoice.T( 

2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2065 'bottom_left', 'bottom_right'], 

2066 default='center', 

2067 optional=True, 

2068 help='Anchor point for positioning the plane, can be: ``top, center ' 

2069 'bottom, top_left, top_right,bottom_left,' 

2070 'bottom_right, center_left, center right``.') 

2071 

2072 nucleation_x = Float.T( 

2073 optional=True, 

2074 help='horizontal position of rupture nucleation in normalized fault ' 

2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2076 

2077 nucleation_y = Float.T( 

2078 optional=True, 

2079 help='down-dip position of rupture nucleation in normalized fault ' 

2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2081 

2082 velocity = Float.T( 

2083 default=3500., 

2084 help='speed of rupture front [m/s]') 

2085 

2086 slip = Float.T( 

2087 optional=True, 

2088 help='Slip on the rectangular source area [m]') 

2089 

2090 opening_fraction = Float.T( 

2091 default=0., 

2092 help='Determines fraction of slip related to opening. ' 

2093 '(``-1``: pure tensile closing, ' 

2094 '``0``: pure shear, ' 

2095 '``1``: pure tensile opening)') 

2096 

2097 decimation_factor = Int.T( 

2098 optional=True, 

2099 default=1, 

2100 help='Sub-source decimation factor, a larger decimation will' 

2101 ' make the result inaccurate but shorten the necessary' 

2102 ' computation time (use for testing puposes only).') 

2103 

2104 aggressive_oversampling = Bool.T( 

2105 default=False, 

2106 help='Aggressive oversampling for basesource discretization. ' 

2107 "When using 'multilinear' interpolation oversampling has" 

2108 ' practically no effect.') 

2109 

2110 def __init__(self, **kwargs): 

2111 if 'moment' in kwargs: 

2112 mom = kwargs.pop('moment') 

2113 if 'magnitude' not in kwargs: 

2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2115 

2116 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2117 

2118 def base_key(self): 

2119 return SourceWithDerivedMagnitude.base_key(self) + ( 

2120 self.magnitude, 

2121 self.slip, 

2122 self.strike, 

2123 self.dip, 

2124 self.rake, 

2125 self.length, 

2126 self.width, 

2127 self.nucleation_x, 

2128 self.nucleation_y, 

2129 self.velocity, 

2130 self.decimation_factor, 

2131 self.anchor) 

2132 

2133 def check_conflicts(self): 

2134 if self.magnitude is not None and self.slip is not None: 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude and slip are both defined.') 

2137 

2138 def get_magnitude(self, store=None, target=None): 

2139 self.check_conflicts() 

2140 if self.magnitude is not None: 

2141 return self.magnitude 

2142 

2143 elif self.slip is not None: 

2144 if None in (store, target): 

2145 raise DerivedMagnitudeError( 

2146 'Magnitude for a rectangular source with slip defined ' 

2147 'can only be derived when earth model and target ' 

2148 'interpolation method are available.') 

2149 

2150 amplitudes = self._discretize(store, target)[2] 

2151 if amplitudes.ndim == 2: 

2152 # CLVD component has no net moment, leave out 

2153 return float(pmt.moment_to_magnitude( 

2154 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2155 else: 

2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2157 

2158 else: 

2159 return float(pmt.moment_to_magnitude(1.0)) 

2160 

2161 def get_factor(self): 

2162 return 1.0 

2163 

2164 def get_slip_tensile(self): 

2165 return self.slip * self.opening_fraction 

2166 

2167 def get_slip_shear(self): 

2168 return self.slip - abs(self.get_slip_tensile) 

2169 

2170 def _discretize(self, store, target): 

2171 if self.nucleation_x is not None: 

2172 nucx = self.nucleation_x * 0.5 * self.length 

2173 else: 

2174 nucx = None 

2175 

2176 if self.nucleation_y is not None: 

2177 nucy = self.nucleation_y * 0.5 * self.width 

2178 else: 

2179 nucy = None 

2180 

2181 stf = self.effective_stf_pre() 

2182 

2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2184 store.config.deltas, store.config.deltat, 

2185 self.time, self.north_shift, self.east_shift, self.depth, 

2186 self.strike, self.dip, self.length, self.width, self.anchor, 

2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2188 decimation_factor=self.decimation_factor, 

2189 aggressive_oversampling=self.aggressive_oversampling) 

2190 

2191 if self.slip is not None: 

2192 if target is not None: 

2193 interpolation = target.interpolation 

2194 else: 

2195 interpolation = 'nearest_neighbor' 

2196 logger.warning( 

2197 'no target information available, will use ' 

2198 '"nearest_neighbor" interpolation when extracting shear ' 

2199 'modulus from earth model') 

2200 

2201 shear_moduli = store.config.get_shear_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_slip = self.get_slip_tensile() 

2207 shear_slip = self.slip - abs(tensile_slip) 

2208 

2209 amplitudes_total = [shear_moduli * shear_slip] 

2210 if tensile_slip != 0: 

2211 bulk_moduli = store.config.get_bulk_moduli( 

2212 self.lat, self.lon, 

2213 points=points, 

2214 interpolation=interpolation) 

2215 

2216 tensile_iso = bulk_moduli * tensile_slip 

2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2218 

2219 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2220 

2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2222 amplitudes * dl * dw 

2223 

2224 else: 

2225 # normalization to retain total moment 

2226 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2227 moment = self.get_moment(store, target) 

2228 

2229 amplitudes_total = [ 

2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2231 if self.opening_fraction != 0.: 

2232 amplitudes_total.append( 

2233 amplitudes_norm * self.opening_fraction * moment) 

2234 

2235 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2236 

2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2238 

2239 def discretize_basesource(self, store, target=None): 

2240 

2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2242 store, target) 

2243 

2244 mot = pmt.MomentTensor( 

2245 strike=self.strike, dip=self.dip, rake=self.rake) 

2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2247 

2248 if amplitudes.ndim == 1: 

2249 m6s[:, :] *= amplitudes[:, num.newaxis] 

2250 elif amplitudes.ndim == 2: 

2251 # shear MT components 

2252 rotmat1 = pmt.euler_to_matrix( 

2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2255 

2256 if amplitudes.shape[0] == 2: 

2257 # tensile MT components - moment/magnitude input 

2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2259 rot_tensile = pmt.to6( 

2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2261 

2262 m6s_tensile = rot_tensile[ 

2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2264 m6s += m6s_tensile 

2265 

2266 elif amplitudes.shape[0] == 3: 

2267 # tensile MT components - slip input 

2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2270 

2271 rot_iso = pmt.to6( 

2272 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2273 rot_clvd = pmt.to6( 

2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2275 

2276 m6s_iso = rot_iso[ 

2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2278 m6s_clvd = rot_clvd[ 

2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2280 m6s += m6s_iso + m6s_clvd 

2281 else: 

2282 raise ValueError('Unknwown amplitudes shape!') 

2283 else: 

2284 raise ValueError( 

2285 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2286 

2287 ds = meta.DiscretizedMTSource( 

2288 lat=self.lat, 

2289 lon=self.lon, 

2290 times=times, 

2291 north_shifts=points[:, 0], 

2292 east_shifts=points[:, 1], 

2293 depths=points[:, 2], 

2294 m6s=m6s, 

2295 dl=dl, 

2296 dw=dw, 

2297 nl=nl, 

2298 nw=nw) 

2299 

2300 return ds 

2301 

2302 def xy_to_coord(self, x, y, cs='xyz'): 

2303 ln, wd = self.length, self.width 

2304 strike, dip = self.strike, self.dip 

2305 

2306 def array_check(variable): 

2307 if not isinstance(variable, num.ndarray): 

2308 return num.array(variable) 

2309 else: 

2310 return variable 

2311 

2312 x, y = array_check(x), array_check(y) 

2313 

2314 if x.shape[0] != y.shape[0]: 

2315 raise ValueError('Shapes of x and y mismatch') 

2316 

2317 x, y = x * 0.5 * ln, y * 0.5 * wd 

2318 

2319 points = num.hstack(( 

2320 x.reshape(-1, 1), y.reshape(-1, 1), num.zeros((x.shape[0], 1)))) 

2321 

2322 anch_x, anch_y = map_anchor[self.anchor] 

2323 points[:, 0] -= anch_x * 0.5 * ln 

2324 points[:, 1] -= anch_y * 0.5 * wd 

2325 

2326 rotmat = num.asarray( 

2327 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

2328 

2329 points_rot = num.dot(rotmat.T, points.T).T 

2330 

2331 points_rot[:, 0] += self.north_shift 

2332 points_rot[:, 1] += self.east_shift 

2333 points_rot[:, 2] += self.depth 

2334 

2335 if cs == 'xyz': 

2336 return points_rot 

2337 elif cs == 'xy': 

2338 return points_rot[:, :2] 

2339 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2340 latlon = ne_to_latlon( 

2341 self.lat, self.lon, points_rot[:, 0], points_rot[:, 1]) 

2342 latlon = num.array(latlon).T 

2343 if cs == 'latlon': 

2344 return latlon 

2345 elif cs == 'lonlat': 

2346 return latlon[:, ::-1] 

2347 else: 

2348 return num.concatenate( 

2349 (latlon, points_rot[:, 2].reshape((len(points_rot), 1))), 

2350 axis=1) 

2351 

2352 def outline(self, cs='xyz'): 

2353 x = num.array([-1., 1., 1., -1., -1.]) 

2354 y = num.array([-1., -1., 1., 1., -1.]) 

2355 

2356 return self.xy_to_coord(x, y, cs=cs) 

2357 

2358 def points_on_source(self, cs='xyz', **kwargs): 

2359 

2360 points = points_on_rect_source( 

2361 self.strike, self.dip, self.length, self.width, 

2362 self.anchor, **kwargs) 

2363 

2364 points[:, 0] += self.north_shift 

2365 points[:, 1] += self.east_shift 

2366 points[:, 2] += self.depth 

2367 if cs == 'xyz': 

2368 return points 

2369 elif cs == 'xy': 

2370 return points[:, :2] 

2371 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2372 latlon = ne_to_latlon( 

2373 self.lat, self.lon, points[:, 0], points[:, 1]) 

2374 

2375 latlon = num.array(latlon).T 

2376 if cs == 'latlon': 

2377 return latlon 

2378 elif cs == 'lonlat': 

2379 return latlon[:, ::-1] 

2380 else: 

2381 return num.concatenate( 

2382 (latlon, points[:, 2].reshape((len(points), 1))), 

2383 axis=1) 

2384 

2385 def geometry(self, *args, **kwargs): 

2386 from pyrocko.model import Geometry 

2387 

2388 ds = self.discretize_basesource(*args, **kwargs) 

2389 nx, ny = ds.nl, ds.nw 

2390 

2391 def patch_outlines_xy(nx, ny): 

2392 points = num.zeros((nx * ny, 2)) 

2393 points[:, 0] = num.tile(num.linspace(-1., 1., nx), ny) 

2394 points[:, 1] = num.repeat(num.linspace(-1., 1., ny), nx) 

2395 

2396 return points 

2397 

2398 points_ds = patch_outlines_xy(nx + 1, ny + 1) 

2399 npoints = (nx + 1) * (ny + 1) 

2400 

2401 vertices = num.hstack(( 

2402 num.ones((npoints, 1)) * self.lat, 

2403 num.ones((npoints, 1)) * self.lon, 

2404 self.xy_to_coord(points_ds[:, 0], points_ds[:, 1], cs='xyz'))) 

2405 

2406 faces = num.array([[ 

2407 iy * (nx + 1) + ix, 

2408 iy * (nx + 1) + ix + 1, 

2409 (iy + 1) * (nx + 1) + ix + 1, 

2410 (iy + 1) * (nx + 1) + ix, 

2411 iy * (nx + 1) + ix] 

2412 for iy in range(ny) for ix in range(nx)]) 

2413 

2414 xyz = self.outline('xyz') 

2415 latlon = num.ones((5, 2)) * num.array([self.lat, self.lon]) 

2416 patchverts = num.hstack((latlon, xyz)) 

2417 

2418 geom = Geometry() 

2419 geom.setup(vertices, faces) 

2420 geom.set_outlines([patchverts]) 

2421 

2422 if self.stf: 

2423 geom.times = num.unique(ds.times) 

2424 

2425 if self.nucleation_x is not None and self.nucleation_y is not None: 

2426 geom.add_property('t_arrival', ds.times) 

2427 

2428 geom.add_property( 

2429 'moment', ds.moments().reshape(ds.nl*ds.nw, -1)) 

2430 

2431 geom.add_property( 

2432 'slip', num.ones_like(ds.times) * self.slip) 

2433 

2434 return geom 

2435 

2436 def get_nucleation_abs_coord(self, cs='xy'): 

2437 

2438 if self.nucleation_x is None: 

2439 return None, None 

2440 

2441 coords = from_plane_coords(self.strike, self.dip, self.length, 

2442 self.width, self.depth, self.nucleation_x, 

2443 self.nucleation_y, lat=self.lat, 

2444 lon=self.lon, north_shift=self.north_shift, 

2445 east_shift=self.east_shift, cs=cs) 

2446 return coords 

2447 

2448 def pyrocko_moment_tensor(self, store=None, target=None): 

2449 return pmt.MomentTensor( 

2450 strike=self.strike, 

2451 dip=self.dip, 

2452 rake=self.rake, 

2453 scalar_moment=self.get_moment(store, target)) 

2454 

2455 def pyrocko_event(self, store=None, target=None, **kwargs): 

2456 return SourceWithDerivedMagnitude.pyrocko_event( 

2457 self, store, target, 

2458 **kwargs) 

2459 

2460 @classmethod 

2461 def from_pyrocko_event(cls, ev, **kwargs): 

2462 d = {} 

2463 mt = ev.moment_tensor 

2464 if mt: 

2465 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2466 d.update( 

2467 strike=float(strike), 

2468 dip=float(dip), 

2469 rake=float(rake), 

2470 magnitude=float(mt.moment_magnitude())) 

2471 

2472 d.update(kwargs) 

2473 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2474 

2475 

2476class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2477 ''' 

2478 Combined Eikonal and Okada quasi-dynamic rupture model. 

2479 

2480 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2481 Note: attribute `stf` is not used so far, but kept for future applications. 

2482 ''' 

2483 

2484 discretized_source_class = meta.DiscretizedMTSource 

2485 

2486 strike = Float.T( 

2487 default=0.0, 

2488 help='Strike direction in [deg], measured clockwise from north.') 

2489 

2490 dip = Float.T( 

2491 default=0.0, 

2492 help='Dip angle in [deg], measured downward from horizontal.') 

2493 

2494 length = Float.T( 

2495 default=10. * km, 

2496 help='Length of rectangular source area in [m].') 

2497 

2498 width = Float.T( 

2499 default=5. * km, 

2500 help='Width of rectangular source area in [m].') 

2501 

2502 anchor = StringChoice.T( 

2503 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2504 'bottom_left', 'bottom_right'], 

2505 default='center', 

2506 optional=True, 

2507 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2508 'bottom, top_left, top_right, bottom_left, ' 

2509 'bottom_right, center_left, center_right``.') 

2510 

2511 nucleation_x__ = Array.T( 

2512 default=num.array([0.]), 

2513 dtype=num.float64, 

2514 serialize_as='list', 

2515 help='Horizontal position of rupture nucleation in normalized fault ' 

2516 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2517 

2518 nucleation_y__ = Array.T( 

2519 default=num.array([0.]), 

2520 dtype=num.float64, 

2521 serialize_as='list', 

2522 help='Down-dip position of rupture nucleation in normalized fault ' 

2523 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2524 

2525 nucleation_time__ = Array.T( 

2526 optional=True, 

2527 help='Time in [s] after origin, when nucleation points defined by ' 

2528 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2529 dtype=num.float64, 

2530 serialize_as='list') 

2531 

2532 gamma = Float.T( 

2533 default=0.8, 

2534 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2535 r':math:`v_r = \gamma * v_s`.') 

2536 

2537 nx = Int.T( 

2538 default=2, 

2539 help='Number of discrete source patches in x direction (along ' 

2540 'strike).') 

2541 

2542 ny = Int.T( 

2543 default=2, 

2544 help='Number of discrete source patches in y direction (down dip).') 

2545 

2546 slip = Float.T( 

2547 optional=True, 

2548 help='Maximum slip of the rectangular source [m]. ' 

2549 'Setting the slip the tractions/stress field ' 

2550 'will be normalized to accomodate the desired maximum slip.') 

2551 

2552 rake = Float.T( 

2553 optional=True, 

2554 help='Rake angle in [deg], ' 

2555 'measured counter-clockwise from right-horizontal ' 

2556 'in on-plane view. Rake is translated into homogenous tractions ' 

2557 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2558 'with tractions parameter.') 

2559 

2560 patches = List.T( 

2561 OkadaSource.T(), 

2562 optional=True, 

2563 help='List of all boundary elements/sub faults/fault patches.') 

2564 

2565 patch_mask__ = Array.T( 

2566 dtype=bool, 

2567 serialize_as='list', 

2568 shape=(None,), 

2569 optional=True, 

2570 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2571 'leaves the patch in the calculation, False excludes the patch.') 

2572 

2573 tractions = TractionField.T( 

2574 optional=True, 

2575 help='Traction field the rupture plane is exposed to. See the ' 

2576 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2577 'If ``tractions=None`` and ``rake`` is given' 

2578 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2579 ' be used.') 

2580 

2581 coef_mat = Array.T( 

2582 optional=True, 

2583 help='Coefficient matrix linking traction and dislocation field.', 

2584 dtype=num.float64, 

2585 shape=(None, None)) 

2586 

2587 eikonal_decimation = Int.T( 

2588 optional=True, 

2589 default=1, 

2590 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2591 ' increase the accuracy of rupture front calculation but' 

2592 ' increases also the computation time.') 

2593 

2594 decimation_factor = Int.T( 

2595 optional=True, 

2596 default=1, 

2597 help='Sub-source decimation factor, a larger decimation will' 

2598 ' make the result inaccurate but shorten the necessary' 

2599 ' computation time (use for testing puposes only).') 

2600 

2601 nthreads = Int.T( 

2602 optional=True, 

2603 default=1, 

2604 help='Number of threads for Okada forward modelling, ' 

2605 'matrix inversion and calculation of point subsources. ' 

2606 'Note: for small/medium matrices 1 thread is most efficient.') 

2607 

2608 pure_shear = Bool.T( 

2609 optional=True, 

2610 default=False, 

2611 help='Calculate only shear tractions and omit tensile tractions.') 

2612 

2613 smooth_rupture = Bool.T( 

2614 default=True, 

2615 help='Smooth the tractions by weighting partially ruptured' 

2616 ' fault patches.') 

2617 

2618 aggressive_oversampling = Bool.T( 

2619 default=False, 

2620 help='Aggressive oversampling for basesource discretization. ' 

2621 "When using 'multilinear' interpolation oversampling has" 

2622 ' practically no effect.') 

2623 

2624 def __init__(self, **kwargs): 

2625 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2626 self._interpolators = {} 

2627 self.check_conflicts() 

2628 

2629 @property 

2630 def nucleation_x(self): 

2631 return self.nucleation_x__ 

2632 

2633 @nucleation_x.setter 

2634 def nucleation_x(self, nucleation_x): 

2635 if isinstance(nucleation_x, list): 

2636 nucleation_x = num.array(nucleation_x) 

2637 

2638 elif not isinstance( 

2639 nucleation_x, num.ndarray) and nucleation_x is not None: 

2640 

2641 nucleation_x = num.array([nucleation_x]) 

2642 self.nucleation_x__ = nucleation_x 

2643 

2644 @property 

2645 def nucleation_y(self): 

2646 return self.nucleation_y__ 

2647 

2648 @nucleation_y.setter 

2649 def nucleation_y(self, nucleation_y): 

2650 if isinstance(nucleation_y, list): 

2651 nucleation_y = num.array(nucleation_y) 

2652 

2653 elif not isinstance(nucleation_y, num.ndarray) \ 

2654 and nucleation_y is not None: 

2655 nucleation_y = num.array([nucleation_y]) 

2656 

2657 self.nucleation_y__ = nucleation_y 

2658 

2659 @property 

2660 def nucleation(self): 

2661 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2662 

2663 if (nucl_x is None) or (nucl_y is None): 

2664 return None 

2665 

2666 assert nucl_x.shape[0] == nucl_y.shape[0] 

2667 

2668 return num.concatenate( 

2669 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2670 

2671 @nucleation.setter 

2672 def nucleation(self, nucleation): 

2673 if isinstance(nucleation, list): 

2674 nucleation = num.array(nucleation) 

2675 

2676 assert nucleation.shape[1] == 2 

2677 

2678 self.nucleation_x = nucleation[:, 0] 

2679 self.nucleation_y = nucleation[:, 1] 

2680 

2681 @property 

2682 def nucleation_time(self): 

2683 return self.nucleation_time__ 

2684 

2685 @nucleation_time.setter 

2686 def nucleation_time(self, nucleation_time): 

2687 if not isinstance(nucleation_time, num.ndarray) \ 

2688 and nucleation_time is not None: 

2689 nucleation_time = num.array([nucleation_time]) 

2690 

2691 self.nucleation_time__ = nucleation_time 

2692 

2693 @property 

2694 def patch_mask(self): 

2695 if (self.patch_mask__ is not None and 

2696 self.patch_mask__.shape == (self.nx * self.ny,)): 

2697 

2698 return self.patch_mask__ 

2699 else: 

2700 return num.ones(self.nx * self.ny, dtype=bool) 

2701 

2702 @patch_mask.setter 

2703 def patch_mask(self, patch_mask): 

2704 if isinstance(patch_mask, list): 

2705 patch_mask = num.array(patch_mask) 

2706 

2707 self.patch_mask__ = patch_mask 

2708 

2709 def get_tractions(self): 

2710 ''' 

2711 Get source traction vectors. 

2712 

2713 If :py:attr:`rake` is given, unit length directed traction vectors 

2714 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2715 else the given :py:attr:`tractions` are used. 

2716 

2717 :returns: 

2718 Traction vectors per patch. 

2719 :rtype: 

2720 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2721 ''' 

2722 

2723 if self.rake is not None: 

2724 if num.isnan(self.rake): 

2725 raise ValueError('Rake must be a real number, not NaN.') 

2726 

2727 logger.warning( 

2728 'Tractions are derived based on the given source rake.') 

2729 tractions = DirectedTractions(rake=self.rake) 

2730 else: 

2731 tractions = self.tractions 

2732 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2733 

2734 def get_scaled_tractions(self, store): 

2735 ''' 

2736 Get traction vectors rescaled to given slip. 

2737 

2738 Opposing to :py:meth:`get_tractions` traction vectors 

2739 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are rescaled to 

2740 the given :py:attr:`slip` before returning. If no :py:attr:`slip` and 

2741 :py:attr:`rake` are provided, the given :py:attr:`tractions` are 

2742 returned without scaling. 

2743 

2744 :param store: 

2745 Green's function database (needs to cover whole region of the 

2746 source). 

2747 :type store: 

2748 :py:class:`~pyrocko.gf.store.Store` 

2749 

2750 :returns: 

2751 Traction vectors per patch. 

2752 :rtype: 

2753 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2754 ''' 

2755 tractions = self.tractions 

2756 factor = 1. 

2757 

2758 if self.rake is not None and self.slip is not None: 

2759 if num.isnan(self.rake): 

2760 raise ValueError('Rake must be a real number, not NaN.') 

2761 

2762 self.discretize_patches(store) 

2763 slip_0t = max(num.linalg.norm( 

2764 self.get_slip(scale_slip=False), 

2765 axis=1)) 

2766 

2767 factor = self.slip / slip_0t 

2768 tractions = DirectedTractions(rake=self.rake) 

2769 

2770 return tractions.get_tractions(self.nx, self.ny, self.patches) * factor 

2771 

2772 def base_key(self): 

2773 return SourceWithDerivedMagnitude.base_key(self) + ( 

2774 self.slip, 

2775 self.strike, 

2776 self.dip, 

2777 self.rake, 

2778 self.length, 

2779 self.width, 

2780 float(self.nucleation_x.mean()), 

2781 float(self.nucleation_y.mean()), 

2782 self.decimation_factor, 

2783 self.anchor, 

2784 self.pure_shear, 

2785 self.gamma, 

2786 tuple(self.patch_mask)) 

2787 

2788 def check_conflicts(self): 

2789 if self.tractions and self.rake: 

2790 raise AttributeError( 

2791 'Tractions and rake are mutually exclusive.') 

2792 if self.tractions is None and self.rake is None: 

2793 self.rake = 0. 

2794 

2795 def get_magnitude(self, store=None, target=None): 

2796 self.check_conflicts() 

2797 if self.slip is not None or self.tractions is not None: 

2798 if store is None: 

2799 raise DerivedMagnitudeError( 

2800 'Magnitude for a rectangular source with slip or ' 

2801 'tractions defined can only be derived when earth model ' 

2802 'is set.') 

2803 

2804 moment_rate, calc_times = self.discretize_basesource( 

2805 store, target=target).get_moment_rate(store.config.deltat) 

2806 

2807 deltat = num.concatenate(( 

2808 (num.diff(calc_times)[0],), 

2809 num.diff(calc_times))) 

2810 

2811 return float(pmt.moment_to_magnitude( 

2812 num.sum(moment_rate * deltat))) 

2813 

2814 else: 

2815 return float(pmt.moment_to_magnitude(1.0)) 

2816 

2817 def get_factor(self): 

2818 return 1.0 

2819 

2820 def outline(self, cs='xyz'): 

2821 ''' 

2822 Get source outline corner coordinates. 

2823 

2824 :param cs: 

2825 :ref:`Output coordinate system <coordinate-system-names>`. 

2826 :type cs: 

2827 optional, str 

2828 

2829 :returns: 

2830 Corner points in desired coordinate system. 

2831 :rtype: 

2832 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2833 ''' 

2834 points = outline_rect_source(self.strike, self.dip, self.length, 

2835 self.width, self.anchor) 

2836 

2837 points[:, 0] += self.north_shift 

2838 points[:, 1] += self.east_shift 

2839 points[:, 2] += self.depth 

2840 if cs == 'xyz': 

2841 return points 

2842 elif cs == 'xy': 

2843 return points[:, :2] 

2844 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2845 latlon = ne_to_latlon( 

2846 self.lat, self.lon, points[:, 0], points[:, 1]) 

2847 

2848 latlon = num.array(latlon).T 

2849 if cs == 'latlon': 

2850 return latlon 

2851 elif cs == 'lonlat': 

2852 return latlon[:, ::-1] 

2853 else: 

2854 return num.concatenate( 

2855 (latlon, points[:, 2].reshape((len(points), 1))), 

2856 axis=1) 

2857 

2858 def points_on_source(self, cs='xyz', **kwargs): 

2859 ''' 

2860 Convert relative plane coordinates to geographical coordinates. 

2861 

2862 Given x and y coordinates (relative source coordinates between -1. 

2863 and 1.) are converted to desired geographical coordinates. Coordinates 

2864 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2865 and ``points_y``. 

2866 

2867 :param cs: 

2868 :ref:`Output coordinate system <coordinate-system-names>`. 

2869 :type cs: 

2870 optional, str 

2871 

2872 :returns: 

2873 Point coordinates in desired coordinate system. 

2874 :rtype: 

2875 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2876 ''' 

2877 points = points_on_rect_source( 

2878 self.strike, self.dip, self.length, self.width, 

2879 self.anchor, **kwargs) 

2880 

2881 points[:, 0] += self.north_shift 

2882 points[:, 1] += self.east_shift 

2883 points[:, 2] += self.depth 

2884 if cs == 'xyz': 

2885 return points 

2886 elif cs == 'xy': 

2887 return points[:, :2] 

2888 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2889 latlon = ne_to_latlon( 

2890 self.lat, self.lon, points[:, 0], points[:, 1]) 

2891 

2892 latlon = num.array(latlon).T 

2893 if cs == 'latlon': 

2894 return latlon 

2895 elif cs == 'lonlat': 

2896 return latlon[:, ::-1] 

2897 else: 

2898 return num.concatenate( 

2899 (latlon, points[:, 2].reshape((len(points), 1))), 

2900 axis=1) 

2901 

2902 def pyrocko_moment_tensor(self, store=None, target=None): 

2903 if store is not None: 

2904 if not self.patches: 

2905 self.discretize_patches(store) 

2906 

2907 data = self.get_slip() 

2908 else: 

2909 data = self.get_tractions() 

2910 

2911 weights = num.linalg.norm(data, axis=1) 

2912 weights /= weights.sum() 

2913 

2914 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2915 rake = num.average(rakes, weights=weights) 

2916 

2917 return pmt.MomentTensor( 

2918 strike=self.strike, 

2919 dip=self.dip, 

2920 rake=rake, 

2921 scalar_moment=self.get_moment(store, target)) 

2922 

2923 def pyrocko_event(self, store=None, target=None, **kwargs): 

2924 return SourceWithDerivedMagnitude.pyrocko_event( 

2925 self, store, target, 

2926 **kwargs) 

2927 

2928 @classmethod 

2929 def from_pyrocko_event(cls, ev, **kwargs): 

2930 d = {} 

2931 mt = ev.moment_tensor 

2932 if mt: 

2933 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2934 d.update( 

2935 strike=float(strike), 

2936 dip=float(dip), 

2937 rake=float(rake)) 

2938 

2939 d.update(kwargs) 

2940 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2941 

2942 def _discretize_points(self, store, *args, **kwargs): 

2943 ''' 

2944 Discretize source plane with equal vertical and horizontal spacing. 

2945 

2946 Additional ``*args`` and ``**kwargs`` are passed to 

2947 :py:meth:`points_on_source`. 

2948 

2949 :param store: 

2950 Green's function database (needs to cover whole region of the 

2951 source). 

2952 :type store: 

2953 :py:class:`~pyrocko.gf.store.Store` 

2954 

2955 :returns: 

2956 Number of points in strike and dip direction, distance 

2957 between adjacent points, coordinates (latlondepth) and coordinates 

2958 (xy on fault) for discrete points. 

2959 :rtype: 

2960 (int, int, float, :py:class:`~numpy.ndarray`, 

2961 :py:class:`~numpy.ndarray`). 

2962 ''' 

2963 anch_x, anch_y = map_anchor[self.anchor] 

2964 

2965 npoints = int(self.width // km) + 1 

2966 points = num.zeros((npoints, 3)) 

2967 points[:, 1] = num.linspace(-1., 1., npoints) 

2968 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2969 

2970 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2971 points = num.dot(rotmat.T, points.T).T 

2972 points[:, 2] += self.depth 

2973 

2974 vs_min = store.config.get_vs( 

2975 self.lat, self.lon, points, 

2976 interpolation='nearest_neighbor') 

2977 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2978 

2979 oversampling = 10. 

2980 delta_l = self.length / (self.nx * oversampling) 

2981 delta_w = self.width / (self.ny * oversampling) 

2982 

2983 delta = self.eikonal_decimation * num.min([ 

2984 store.config.deltat * vr_min / oversampling, 

2985 delta_l, delta_w] + [ 

2986 deltas for deltas in store.config.deltas]) 

2987 

2988 delta = delta_w / num.ceil(delta_w / delta) 

2989 

2990 nx = int(num.ceil(self.length / delta)) + 1 

2991 ny = int(num.ceil(self.width / delta)) + 1 

2992 

2993 rem_l = (nx-1)*delta - self.length 

2994 lim_x = rem_l / self.length 

2995 

2996 points_xy = num.zeros((nx * ny, 2)) 

2997 points_xy[:, 0] = num.repeat( 

2998 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2999 points_xy[:, 1] = num.tile( 

3000 num.linspace(-1., 1., ny), nx) 

3001 

3002 points = self.points_on_source( 

3003 points_x=points_xy[:, 0], 

3004 points_y=points_xy[:, 1], 

3005 **kwargs) 

3006 

3007 return nx, ny, delta, points, points_xy 

3008 

3009 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

3010 points=None): 

3011 ''' 

3012 Get rupture velocity for discrete points on source plane. 

3013 

3014 :param store: 

3015 Green's function database (needs to cover the whole region of the 

3016 source) 

3017 :type store: 

3018 optional, :py:class:`~pyrocko.gf.store.Store` 

3019 

3020 :param interpolation: 

3021 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3022 and ``'multilinear'``). 

3023 :type interpolation: 

3024 optional, str 

3025 

3026 :param points: 

3027 Coordinates on fault (-1.:1.) of discrete points. 

3028 :type points: 

3029 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

3030 

3031 :returns: 

3032 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

3033 points. 

3034 :rtype: 

3035 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

3036 ''' 

3037 

3038 if points is None: 

3039 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

3040 

3041 return store.config.get_vs( 

3042 self.lat, self.lon, 

3043 points=points, 

3044 interpolation=interpolation) * self.gamma 

3045 

3046 def discretize_time( 

3047 self, store, interpolation='nearest_neighbor', 

3048 vr=None, times=None, *args, **kwargs): 

3049 ''' 

3050 Get rupture start time for discrete points on source plane. 

3051 

3052 :param store: 

3053 Green's function database (needs to cover whole region of the 

3054 source) 

3055 :type store: 

3056 :py:class:`~pyrocko.gf.store.Store` 

3057 

3058 :param interpolation: 

3059 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3060 and ``'multilinear'``). 

3061 :type interpolation: 

3062 optional, str 

3063 

3064 :param vr: 

3065 Array, containing rupture user defined rupture velocity values. 

3066 :type vr: 

3067 optional, :py:class:`~numpy.ndarray` 

3068 

3069 :param times: 

3070 Array, containing zeros, where rupture is starting, real positive 

3071 numbers at later secondary nucleation points and -1, where time 

3072 will be calculated. If not given, rupture starts at nucleation_x, 

3073 nucleation_y. Times are given for discrete points with equal 

3074 horizontal and vertical spacing. 

3075 :type times: 

3076 optional, :py:class:`~numpy.ndarray` 

3077 

3078 :returns: 

3079 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

3080 rupture propagation time of discrete points. 

3081 :rtype: 

3082 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

3083 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

3084 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

3085 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

3086 ''' 

3087 nx, ny, delta, points, points_xy = self._discretize_points( 

3088 store, cs='xyz') 

3089 

3090 if vr is None or vr.shape != tuple((nx, ny)): 

3091 if vr: 

3092 logger.warning( 

3093 'Given rupture velocities are not in right shape: ' 

3094 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

3095 vr = self._discretize_rupture_v(store, interpolation, points)\ 

3096 .reshape(nx, ny) 

3097 

3098 if vr.shape != tuple((nx, ny)): 

3099 logger.warning( 

3100 'Given rupture velocities are not in right shape. Therefore' 

3101 ' standard rupture velocity array is used.') 

3102 

3103 def initialize_times(): 

3104 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

3105 

3106 if nucl_x.shape != nucl_y.shape: 

3107 raise ValueError( 

3108 'Nucleation coordinates have different shape.') 

3109 

3110 dist_points = num.array([ 

3111 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

3112 for x, y in zip(nucl_x, nucl_y)]) 

3113 nucl_indices = num.argmin(dist_points, axis=1) 

3114 

3115 if self.nucleation_time is None: 

3116 nucl_times = num.zeros_like(nucl_indices) 

3117 else: 

3118 if self.nucleation_time.shape == nucl_x.shape: 

3119 nucl_times = self.nucleation_time 

3120 else: 

3121 raise ValueError( 

3122 'Nucleation coordinates and times have different ' 

3123 'shapes') 

3124 

3125 t = num.full(nx * ny, -1.) 

3126 t[nucl_indices] = nucl_times 

3127 return t.reshape(nx, ny) 

3128 

3129 if times is None: 

3130 times = initialize_times() 

3131 elif times.shape != tuple((nx, ny)): 

3132 times = initialize_times() 

3133 logger.warning( 

3134 'Given times are not in right shape. Therefore standard time' 

3135 ' array is used.') 

3136 

3137 eikonal_ext.eikonal_solver_fmm_cartesian( 

3138 speeds=vr, times=times, delta=delta) 

3139 

3140 return points, points_xy, vr, times 

3141 

3142 def get_vr_time_interpolators( 

3143 self, store, interpolation='nearest_neighbor', force=False, 

3144 **kwargs): 

3145 ''' 

3146 Get interpolators for rupture velocity and rupture time. 

3147 

3148 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3149 

3150 :param store: 

3151 Green's function database (needs to cover whole region of the 

3152 source). 

3153 :type store: 

3154 :py:class:`~pyrocko.gf.store.Store` 

3155 

3156 :param interpolation: 

3157 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3158 and ``'multilinear'``). 

3159 :type interpolation: 

3160 optional, str 

3161 

3162 :param force: 

3163 Force recalculation of the interpolators (e.g. after change of 

3164 nucleation point locations/times). Default is ``False``. 

3165 :type force: 

3166 optional, bool 

3167 ''' 

3168 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3169 if interpolation not in interp_map: 

3170 raise TypeError( 

3171 'Interpolation method %s not available' % interpolation) 

3172 

3173 if not self._interpolators.get(interpolation, False) or force: 

3174 _, points_xy, vr, times = self.discretize_time( 

3175 store, **kwargs) 

3176 

3177 if self.length <= 0.: 

3178 raise ValueError( 

3179 'length must be larger then 0. not %g' % self.length) 

3180 

3181 if self.width <= 0.: 

3182 raise ValueError( 

3183 'width must be larger then 0. not %g' % self.width) 

3184 

3185 nx, ny = times.shape 

3186 anch_x, anch_y = map_anchor[self.anchor] 

3187 

3188 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3189 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3190 

3191 ascont = num.ascontiguousarray 

3192 

3193 self._interpolators[interpolation] = ( 

3194 nx, ny, times, vr, 

3195 RegularGridInterpolator( 

3196 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3197 times, 

3198 method=interp_map[interpolation]), 

3199 RegularGridInterpolator( 

3200 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3201 vr, 

3202 method=interp_map[interpolation])) 

3203 

3204 return self._interpolators[interpolation] 

3205 

3206 def discretize_patches( 

3207 self, store, interpolation='nearest_neighbor', force=False, 

3208 grid_shape=(), 

3209 **kwargs): 

3210 ''' 

3211 Get rupture start time and OkadaSource elements for points on rupture. 

3212 

3213 All source elements and their corresponding center points are 

3214 calculated and stored in the :py:attr:`patches` attribute. 

3215 

3216 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3217 

3218 :param store: 

3219 Green's function database (needs to cover whole region of the 

3220 source). 

3221 :type store: 

3222 :py:class:`~pyrocko.gf.store.Store` 

3223 

3224 :param interpolation: 

3225 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3226 and ``'multilinear'``). 

3227 :type interpolation: 

3228 optional, str 

3229 

3230 :param force: 

3231 Force recalculation of the vr and time interpolators ( e.g. after 

3232 change of nucleation point locations/times). Default is ``False``. 

3233 :type force: 

3234 optional, bool 

3235 

3236 :param grid_shape: 

3237 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3238 or grid_shape should be set. 

3239 :type grid_shape: 

3240 optional, tuple of int 

3241 ''' 

3242 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3243 self.get_vr_time_interpolators( 

3244 store, 

3245 interpolation=interpolation, force=force, **kwargs) 

3246 anch_x, anch_y = map_anchor[self.anchor] 

3247 

3248 al = self.length / 2. 

3249 aw = self.width / 2. 

3250 al1 = -(al + anch_x * al) 

3251 al2 = al - anch_x * al 

3252 aw1 = -aw + anch_y * aw 

3253 aw2 = aw + anch_y * aw 

3254 assert num.abs([al1, al2]).sum() == self.length 

3255 assert num.abs([aw1, aw2]).sum() == self.width 

3256 

3257 def get_lame(*a, **kw): 

3258 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3259 lamb = store.config.get_vp(*a, **kw)**2 \ 

3260 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3261 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3262 

3263 shear_mod, poisson = get_lame( 

3264 self.lat, self.lon, 

3265 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3266 interpolation=interpolation) 

3267 

3268 okada_src = OkadaSource( 

3269 lat=self.lat, lon=self.lon, 

3270 strike=self.strike, dip=self.dip, 

3271 north_shift=self.north_shift, east_shift=self.east_shift, 

3272 depth=self.depth, 

3273 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3274 poisson=poisson.mean(), 

3275 shearmod=shear_mod.mean(), 

3276 opening=kwargs.get('opening', 0.)) 

3277 

3278 if not (self.nx and self.ny): 

3279 if grid_shape: 

3280 self.nx, self.ny = grid_shape 

3281 else: 

3282 self.nx = nx 

3283 self.ny = ny 

3284 

3285 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3286 

3287 shear_mod, poisson = get_lame( 

3288 self.lat, self.lon, 

3289 num.array([src.source_patch()[:3] for src in source_disc]), 

3290 interpolation=interpolation) 

3291 

3292 if (self.nx, self.ny) != (nx, ny): 

3293 times_interp = time_interpolator( 

3294 num.ascontiguousarray(source_points[:, :2])) 

3295 vr_interp = vr_interpolator( 

3296 num.ascontiguousarray(source_points[:, :2])) 

3297 else: 

3298 times_interp = times.T.ravel() 

3299 vr_interp = vr.T.ravel() 

3300 

3301 for isrc, src in enumerate(source_disc): 

3302 src.vr = vr_interp[isrc] 

3303 src.time = times_interp[isrc] + self.time 

3304 

3305 self.patches = source_disc 

3306 

3307 def discretize_basesource(self, store, target=None): 

3308 ''' 

3309 Prepare source for synthetic waveform calculation. 

3310 

3311 :param store: 

3312 Green's function database (needs to cover whole region of the 

3313 source). 

3314 :type store: 

3315 :py:class:`~pyrocko.gf.store.Store` 

3316 

3317 :param target: 

3318 Target information. 

3319 :type target: 

3320 optional, :py:class:`~pyrocko.gf.targets.Target` 

3321 

3322 :returns: 

3323 Source discretized by a set of moment tensors and times. 

3324 :rtype: 

3325 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3326 ''' 

3327 if not target: 

3328 interpolation = 'nearest_neighbor' 

3329 else: 

3330 interpolation = target.interpolation 

3331 

3332 if not self.patches: 

3333 self.discretize_patches(store, interpolation) 

3334 

3335 if self.coef_mat is None: 

3336 self.calc_coef_mat() 

3337 

3338 delta_slip, slip_times = self.get_delta_slip(store) 

3339 npatches = self.nx * self.ny 

3340 ntimes = slip_times.size 

3341 

3342 anch_x, anch_y = map_anchor[self.anchor] 

3343 

3344 pln = self.length / self.nx 

3345 pwd = self.width / self.ny 

3346 

3347 patch_coords = num.array([ 

3348 (p.ix, p.iy) 

3349 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3350 

3351 # boundary condition is zero-slip 

3352 # is not valid to avoid unwished interpolation effects 

3353 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3354 slip_grid[1:-1, 1:-1, :, :] = \ 

3355 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3356 

3357 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3358 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3359 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3360 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3361 

3362 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3363 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3364 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3365 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3366 

3367 def make_grid(patch_parameter): 

3368 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3369 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3370 

3371 grid[0, 0] = grid[1, 1] 

3372 grid[0, -1] = grid[1, -2] 

3373 grid[-1, 0] = grid[-2, 1] 

3374 grid[-1, -1] = grid[-2, -2] 

3375 

3376 grid[1:-1, 0] = grid[1:-1, 1] 

3377 grid[1:-1, -1] = grid[1:-1, -2] 

3378 grid[0, 1:-1] = grid[1, 1:-1] 

3379 grid[-1, 1:-1] = grid[-2, 1:-1] 

3380 

3381 return grid 

3382 

3383 lamb = self.get_patch_attribute('lamb') 

3384 mu = self.get_patch_attribute('shearmod') 

3385 

3386 lamb_grid = make_grid(lamb) 

3387 mu_grid = make_grid(mu) 

3388 

3389 coords_x = num.zeros(self.nx + 2) 

3390 coords_x[1:-1] = patch_coords[:, 0, 0] 

3391 coords_x[0] = coords_x[1] - pln / 2 

3392 coords_x[-1] = coords_x[-2] + pln / 2 

3393 

3394 coords_y = num.zeros(self.ny + 2) 

3395 coords_y[1:-1] = patch_coords[0, :, 1] 

3396 coords_y[0] = coords_y[1] - pwd / 2 

3397 coords_y[-1] = coords_y[-2] + pwd / 2 

3398 

3399 slip_interp = RegularGridInterpolator( 

3400 (coords_x, coords_y, slip_times), 

3401 slip_grid, method='nearest') 

3402 

3403 lamb_interp = RegularGridInterpolator( 

3404 (coords_x, coords_y), 

3405 lamb_grid, method='nearest') 

3406 

3407 mu_interp = RegularGridInterpolator( 

3408 (coords_x, coords_y), 

3409 mu_grid, method='nearest') 

3410 

3411 # discretize basesources 

3412 mindeltagf = min(tuple( 

3413 (self.length / self.nx, self.width / self.ny) + 

3414 tuple(store.config.deltas))) 

3415 

3416 nl = int((1. / self.decimation_factor) * 

3417 num.ceil(pln / mindeltagf)) + 1 

3418 nw = int((1. / self.decimation_factor) * 

3419 num.ceil(pwd / mindeltagf)) + 1 

3420 nsrc_patch = int(nl * nw) 

3421 dl = pln / nl 

3422 dw = pwd / nw 

3423 

3424 patch_area = dl * dw 

3425 

3426 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3427 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3428 

3429 base_coords = num.zeros((nsrc_patch, 3)) 

3430 base_coords[:, 0] = num.tile(xl, nw) 

3431 base_coords[:, 1] = num.repeat(xw, nl) 

3432 base_coords = num.tile(base_coords, (npatches, 1)) 

3433 

3434 center_coords = num.zeros((npatches, 3)) 

3435 center_coords[:, 0] = num.repeat( 

3436 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3437 center_coords[:, 1] = num.tile( 

3438 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3439 

3440 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3441 nbaselocs = base_coords.shape[0] 

3442 

3443 base_interp = base_coords.repeat(ntimes, axis=0) 

3444 

3445 base_times = num.tile(slip_times, nbaselocs) 

3446 base_interp[:, 0] -= anch_x * self.length / 2 

3447 base_interp[:, 1] -= anch_y * self.width / 2 

3448 base_interp[:, 2] = base_times 

3449 

3450 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3451 store, interpolation=interpolation) 

3452 

3453 time_eikonal_max = time_interpolator.values.max() 

3454 

3455 nbasesrcs = base_interp.shape[0] 

3456 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3457 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3458 mu = mu_interp(base_interp[:, :2]).ravel() 

3459 

3460 if False: 

3461 try: 

3462 import matplotlib.pyplot as plt 

3463 coords = base_coords.copy() 

3464 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3465 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3466 plt.show() 

3467 except AttributeError: 

3468 pass 

3469 

3470 base_interp[:, 2] = 0. 

3471 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3472 base_interp = num.dot(rotmat.T, base_interp.T).T 

3473 base_interp[:, 0] += self.north_shift 

3474 base_interp[:, 1] += self.east_shift 

3475 base_interp[:, 2] += self.depth 

3476 

3477 slip_strike = delta_slip[:, :, 0].ravel() 

3478 slip_dip = delta_slip[:, :, 1].ravel() 

3479 slip_norm = delta_slip[:, :, 2].ravel() 

3480 

3481 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3482 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3483 

3484 m6s = okada_ext.patch2m6( 

3485 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3486 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3487 rakes=slip_rake, 

3488 disl_shear=slip_shear, 

3489 disl_norm=slip_norm, 

3490 lamb=lamb, 

3491 mu=mu, 

3492 nthreads=self.nthreads) 

3493 

3494 m6s *= patch_area 

3495 

3496 dl = -self.patches[0].al1 + self.patches[0].al2 

3497 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3498 

3499 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3500 

3501 ds = meta.DiscretizedMTSource( 

3502 lat=self.lat, 

3503 lon=self.lon, 

3504 times=base_times + self.time, 

3505 north_shifts=base_interp[:, 0], 

3506 east_shifts=base_interp[:, 1], 

3507 depths=base_interp[:, 2], 

3508 m6s=m6s, 

3509 dl=dl, 

3510 dw=dw, 

3511 nl=self.nx, 

3512 nw=self.ny) 

3513 

3514 return ds 

3515 

3516 def calc_coef_mat(self): 

3517 ''' 

3518 Calculate coefficients connecting tractions and dislocations. 

3519 ''' 

3520 if not self.patches: 

3521 raise ValueError( 

3522 'Patches are needed. Please calculate them first.') 

3523 

3524 self.coef_mat = make_okada_coefficient_matrix( 

3525 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3526 

3527 def get_patch_attribute(self, attr): 

3528 ''' 

3529 Get patch attributes. 

3530 

3531 :param attr: 

3532 Name of selected attribute (see 

3533 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3534 :type attr: 

3535 str 

3536 

3537 :returns: 

3538 Array with attribute value for each fault patch. 

3539 :rtype: 

3540 :py:class:`~numpy.ndarray` 

3541 

3542 ''' 

3543 if not self.patches: 

3544 raise ValueError( 

3545 'Patches are needed. Please calculate them first.') 

3546 return num.array([getattr(p, attr) for p in self.patches]) 

3547 

3548 def get_slip( 

3549 self, 

3550 time=None, 

3551 scale_slip=True, 

3552 interpolation='nearest_neighbor', 

3553 **kwargs): 

3554 ''' 

3555 Get slip per subfault patch for given time after rupture start. 

3556 

3557 :param time: 

3558 Time after origin [s], for which slip is computed. If not 

3559 given, final static slip is returned. 

3560 :type time: 

3561 optional, float > 0. 

3562 

3563 :param scale_slip: 

3564 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3565 to fit the given maximum slip. 

3566 :type scale_slip: 

3567 optional, bool 

3568 

3569 :param interpolation: 

3570 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3571 and ``'multilinear'``). 

3572 :type interpolation: 

3573 optional, str 

3574 

3575 :returns: 

3576 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3577 for each source patch. 

3578 :rtype: 

3579 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3580 ''' 

3581 

3582 if self.patches is None: 

3583 raise ValueError( 

3584 'Please discretize the source first (discretize_patches())') 

3585 npatches = len(self.patches) 

3586 tractions = self.get_tractions() 

3587 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3588 

3589 time_patch = time 

3590 if time is None: 

3591 time_patch = time_patch_max 

3592 

3593 if self.coef_mat is None: 

3594 self.calc_coef_mat() 

3595 

3596 if tractions.shape != (npatches, 3): 

3597 raise AttributeError( 

3598 'The traction vector is of invalid shape.' 

3599 ' Required shape is (npatches, 3)') 

3600 

3601 patch_mask = num.ones(npatches, dtype=bool) 

3602 if self.patch_mask is not None: 

3603 patch_mask = self.patch_mask 

3604 

3605 times = self.get_patch_attribute('time') - self.time 

3606 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3607 relevant_sources = num.nonzero(times <= time_patch)[0] 

3608 disloc_est = num.zeros_like(tractions) 

3609 

3610 if self.smooth_rupture: 

3611 patch_activation = num.zeros(npatches) 

3612 

3613 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3614 self.get_vr_time_interpolators( 

3615 store, interpolation=interpolation) 

3616 

3617 # Getting the native Eikonal grid, bit hackish 

3618 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3619 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3620 times_eikonal = time_interpolator.values 

3621 

3622 time_max = time 

3623 if time is None: 

3624 time_max = times_eikonal.max() 

3625 

3626 for ip, p in enumerate(self.patches): 

3627 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3628 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3629 

3630 idx_length = num.logical_and( 

3631 points_x >= ul[0], points_x <= lr[0]) 

3632 idx_width = num.logical_and( 

3633 points_y >= ul[1], points_y <= lr[1]) 

3634 

3635 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3636 if times_patch.size == 0: 

3637 raise AttributeError('could not use smooth_rupture') 

3638 

3639 patch_activation[ip] = \ 

3640 (times_patch <= time_max).sum() / times_patch.size 

3641 

3642 if time_patch == 0 and time_patch != time_patch_max: 

3643 patch_activation[ip] = 0. 

3644 

3645 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3646 

3647 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3648 

3649 if relevant_sources.size == 0: 

3650 return disloc_est 

3651 

3652 indices_disl = num.repeat(relevant_sources * 3, 3) 

3653 indices_disl[1::3] += 1 

3654 indices_disl[2::3] += 2 

3655 

3656 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3657 stress_field=tractions[relevant_sources, :].ravel(), 

3658 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3659 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3660 epsilon=None, 

3661 **kwargs) 

3662 

3663 if self.smooth_rupture: 

3664 disloc_est *= patch_activation[:, num.newaxis] 

3665 

3666 if scale_slip and self.slip is not None: 

3667 disloc_tmax = num.zeros(npatches) 

3668 

3669 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3670 indices_disl[1::3] += 1 

3671 indices_disl[2::3] += 2 

3672 

3673 disloc_tmax[patch_mask] = num.linalg.norm( 

3674 invert_fault_dislocations_bem( 

3675 stress_field=tractions[patch_mask, :].ravel(), 

3676 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3677 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3678 epsilon=None, 

3679 **kwargs), axis=1) 

3680 

3681 disloc_tmax_max = disloc_tmax.max() 

3682 if disloc_tmax_max == 0.: 

3683 logger.warning( 

3684 'slip scaling not performed. Maximum slip is 0.') 

3685 

3686 disloc_est *= self.slip / disloc_tmax_max 

3687 

3688 return disloc_est 

3689 

3690 def get_delta_slip( 

3691 self, 

3692 store=None, 

3693 deltat=None, 

3694 delta=True, 

3695 interpolation='nearest_neighbor', 

3696 **kwargs): 

3697 ''' 

3698 Get slip change snapshots. 

3699 

3700 The time interval, within which the slip changes are computed is 

3701 determined by the sampling rate of the Green's function database or 

3702 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3703 

3704 :param store: 

3705 Green's function database (needs to cover whole region of of the 

3706 source). Its sampling interval is used as time increment for slip 

3707 difference calculation. Either ``deltat`` or ``store`` should be 

3708 given. 

3709 :type store: 

3710 optional, :py:class:`~pyrocko.gf.store.Store` 

3711 

3712 :param deltat: 

3713 Time interval for slip difference calculation [s]. Either 

3714 ``deltat`` or ``store`` should be given. 

3715 :type deltat: 

3716 optional, float 

3717 

3718 :param delta: 

3719 If ``True``, slip differences between two time steps are given. If 

3720 ``False``, cumulative slip for all time steps. 

3721 :type delta: 

3722 optional, bool 

3723 

3724 :param interpolation: 

3725 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3726 and ``'multilinear'``). 

3727 :type interpolation: 

3728 optional, str 

3729 

3730 :returns: 

3731 Displacement changes(:math:`\\Delta u_{strike}, 

3732 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3733 time; corner times, for which delta slip is computed. The order of 

3734 displacement changes array is: 

3735 

3736 .. math:: 

3737 

3738 &[[\\\\ 

3739 &[\\Delta u_{strike, patch1, t1}, 

3740 \\Delta u_{dip, patch1, t1}, 

3741 \\Delta u_{tensile, patch1, t1}],\\\\ 

3742 &[\\Delta u_{strike, patch1, t2}, 

3743 \\Delta u_{dip, patch1, t2}, 

3744 \\Delta u_{tensile, patch1, t2}]\\\\ 

3745 &], [\\\\ 

3746 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3747 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3748 

3749 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3750 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3751 ''' 

3752 if store and deltat: 

3753 raise AttributeError( 

3754 'Argument collision. ' 

3755 'Please define only the store or the deltat argument.') 

3756 

3757 if store: 

3758 deltat = store.config.deltat 

3759 

3760 if not deltat: 

3761 raise AttributeError('Please give a GF store or set deltat.') 

3762 

3763 npatches = len(self.patches) 

3764 

3765 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3766 store, interpolation=interpolation) 

3767 tmax = time_interpolator.values.max() 

3768 

3769 calc_times = num.arange(0., tmax + deltat, deltat) 

3770 calc_times[calc_times > tmax] = tmax 

3771 

3772 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3773 

3774 for itime, t in enumerate(calc_times): 

3775 disloc_est[:, itime, :] = self.get_slip( 

3776 time=t, scale_slip=False, **kwargs) 

3777 

3778 if self.slip: 

3779 disloc_tmax = num.linalg.norm( 

3780 self.get_slip(scale_slip=False, time=tmax), 

3781 axis=1) 

3782 

3783 disloc_tmax_max = disloc_tmax.max() 

3784 if disloc_tmax_max == 0.: 

3785 logger.warning( 

3786 'Slip scaling not performed. Maximum slip is 0.') 

3787 else: 

3788 disloc_est *= self.slip / disloc_tmax_max 

3789 

3790 if not delta: 

3791 return disloc_est, calc_times 

3792 

3793 # if we have only one timestep there is no gradient 

3794 if calc_times.size > 1: 

3795 disloc_init = disloc_est[:, 0, :] 

3796 disloc_est = num.diff(disloc_est, axis=1) 

3797 disloc_est = num.concatenate(( 

3798 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3799 

3800 calc_times = calc_times 

3801 

3802 return disloc_est, calc_times 

3803 

3804 def get_slip_rate(self, *args, **kwargs): 

3805 ''' 

3806 Get slip rate inverted from patches. 

3807 

3808 The time interval, within which the slip rates are computed is 

3809 determined by the sampling rate of the Green's function database or 

3810 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3811 :py:meth:`get_delta_slip`. 

3812 

3813 :returns: 

3814 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3815 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3816 for each source patch and time; corner times, for which slip rate 

3817 is computed. The order of sliprate array is: 

3818 

3819 .. math:: 

3820 

3821 &[[\\\\ 

3822 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3823 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3824 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3825 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3826 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3827 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3828 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3829 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3830 

3831 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3832 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3833 ''' 

3834 ddisloc_est, calc_times = self.get_delta_slip( 

3835 *args, delta=True, **kwargs) 

3836 

3837 dt = num.concatenate( 

3838 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3839 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3840 

3841 return slip_rate, calc_times 

3842 

3843 def get_moment_rate_patches(self, *args, **kwargs): 

3844 ''' 

3845 Get scalar seismic moment rate for each patch individually. 

3846 

3847 Additional ``*args`` and ``**kwargs`` are passed to 

3848 :py:meth:`get_slip_rate`. 

3849 

3850 :returns: 

3851 Seismic moment rate for each source patch and time; corner times, 

3852 for which patch moment rate is computed based on slip rate. The 

3853 order of the moment rate array is: 

3854 

3855 .. math:: 

3856 

3857 &[\\\\ 

3858 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3859 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3860 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3861 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3862 &[...]]\\\\ 

3863 

3864 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3865 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3866 ''' 

3867 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3868 

3869 shear_mod = self.get_patch_attribute('shearmod') 

3870 p_length = self.get_patch_attribute('length') 

3871 p_width = self.get_patch_attribute('width') 

3872 

3873 dA = p_length * p_width 

3874 

3875 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3876 

3877 return mom_rate, calc_times 

3878 

3879 def get_moment_rate(self, store, target=None, deltat=None): 

3880 ''' 

3881 Get seismic source moment rate for the total source (STF). 

3882 

3883 :param store: 

3884 Green's function database (needs to cover whole region of of the 

3885 source). Its ``deltat`` [s] is used as time increment for slip 

3886 difference calculation. Either ``deltat`` or ``store`` should be 

3887 given. 

3888 :type store: 

3889 :py:class:`~pyrocko.gf.store.Store` 

3890 

3891 :param target: 

3892 Target information, needed for interpolation method. 

3893 :type target: 

3894 optional, :py:class:`~pyrocko.gf.targets.Target` 

3895 

3896 :param deltat: 

3897 Time increment for slip difference calculation [s]. If not given 

3898 ``store.deltat`` is used. 

3899 :type deltat: 

3900 optional, float 

3901 

3902 :return: 

3903 Seismic moment rate [Nm/s] for each time; corner times, for which 

3904 moment rate is computed. The order of the moment rate array is: 

3905 

3906 .. math:: 

3907 

3908 &[\\\\ 

3909 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3910 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3911 &...]\\\\ 

3912 

3913 :rtype: 

3914 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3915 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3916 ''' 

3917 if not deltat: 

3918 deltat = store.config.deltat 

3919 return self.discretize_basesource( 

3920 store, target=target).get_moment_rate(deltat) 

3921 

3922 def get_moment(self, *args, **kwargs): 

3923 ''' 

3924 Get seismic cumulative moment. 

3925 

3926 Additional ``*args`` and ``**kwargs`` are passed to 

3927 :py:meth:`get_magnitude`. 

3928 

3929 :returns: 

3930 Cumulative seismic moment in [Nm]. 

3931 :rtype: 

3932 float 

3933 ''' 

3934 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3935 *args, **kwargs))) 

3936 

3937 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3938 ''' 

3939 Rescale source slip based on given target magnitude or seismic moment. 

3940 

3941 Rescale the maximum source slip to fit the source moment magnitude or 

3942 seismic moment to the given target values. Either ``magnitude`` or 

3943 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3944 :py:meth:`get_moment`. 

3945 

3946 :param magnitude: 

3947 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3948 [Hanks and Kanamori, 1979] 

3949 :type magnitude: 

3950 optional, float 

3951 

3952 :param moment: 

3953 Target seismic moment :math:`M_0` [Nm]. 

3954 :type moment: 

3955 optional, float 

3956 ''' 

3957 if self.slip is None: 

3958 self.slip = 1. 

3959 logger.warning('No slip found for rescaling. ' 

3960 'An initial slip of 1 m is assumed.') 

3961 

3962 if magnitude is None and moment is None: 

3963 raise ValueError( 

3964 'Either target magnitude or moment need to be given.') 

3965 

3966 moment_init = self.get_moment(**kwargs) 

3967 

3968 if magnitude is not None: 

3969 moment = pmt.magnitude_to_moment(magnitude) 

3970 

3971 self.slip *= moment / moment_init 

3972 

3973 def get_centroid(self, store, *args, **kwargs): 

3974 ''' 

3975 Centroid of the pseudo dynamic rupture model. 

3976 

3977 The centroid location and time are derived from the locations and times 

3978 of the individual patches weighted with their moment contribution. 

3979 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3980 

3981 :param store: 

3982 Green's function database (needs to cover whole region of of the 

3983 source). Its ``deltat`` [s] is used as time increment for slip 

3984 difference calculation. Either ``deltat`` or ``store`` should be 

3985 given. 

3986 :type store: 

3987 :py:class:`~pyrocko.gf.store.Store` 

3988 

3989 :returns: 

3990 The centroid location and associated moment tensor. 

3991 :rtype: 

3992 :py:class:`pyrocko.model.Event` 

3993 ''' 

3994 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3995 t_max = time.values.max() 

3996 

3997 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3998 

3999 moment = num.sum(moment_rate * times, axis=1) 

4000 weights = moment / moment.sum() 

4001 

4002 norths = self.get_patch_attribute('north_shift') 

4003 easts = self.get_patch_attribute('east_shift') 

4004 depths = self.get_patch_attribute('depth') 

4005 

4006 centroid_n = num.sum(weights * norths) 

4007 centroid_e = num.sum(weights * easts) 

4008 centroid_d = num.sum(weights * depths) 

4009 

4010 centroid_lat, centroid_lon = ne_to_latlon( 

4011 self.lat, self.lon, centroid_n, centroid_e) 

4012 

4013 moment_rate_, times = self.get_moment_rate(store) 

4014 delta_times = num.concatenate(( 

4015 [times[1] - times[0]], 

4016 num.diff(times))) 

4017 moment_src = delta_times * moment_rate 

4018 

4019 centroid_t = num.sum( 

4020 moment_src / num.sum(moment_src) * times) + self.time 

4021 

4022 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

4023 

4024 return model.Event( 

4025 lat=centroid_lat, 

4026 lon=centroid_lon, 

4027 depth=centroid_d, 

4028 time=centroid_t, 

4029 moment_tensor=mt, 

4030 magnitude=mt.magnitude, 

4031 duration=t_max) 

4032 

4033 def get_coulomb_failure_stress( 

4034 self, 

4035 receiver_points, 

4036 friction, 

4037 pressure, 

4038 strike, 

4039 dip, 

4040 rake, 

4041 time=None, 

4042 *args, 

4043 **kwargs): 

4044 ''' 

4045 Calculate Coulomb failure stress change CFS. 

4046 

4047 The function obtains the Coulomb failure stress change :math:`\\Delta 

4048 \\sigma_C` at arbitrary receiver points with a commonly oriented 

4049 receiver plane assuming: 

4050 

4051 .. math:: 

4052 

4053 \\Delta \\sigma_C = \\sigma_S - \\mu (\\sigma_N - \\Delta p) 

4054 

4055 with the shear stress :math:`\\sigma_S`, the coefficient of friction 

4056 :math:`\\mu`, the normal stress :math:`\\sigma_N`, and the pore fluid 

4057 pressure change :math:`\\Delta p`. Each receiver point is characterized 

4058 by its geographical coordinates, and depth. The required receiver plane 

4059 orientation is defined by ``strike``, ``dip``, and ``rake``. The 

4060 Coulomb failure stress change is calculated for a given time after 

4061 rupture origin time. 

4062 

4063 :param receiver_points: 

4064 Location of the receiver points in Northing, Easting, and depth in 

4065 [m]. 

4066 :type receiver_points: 

4067 :py:class:`~numpy.ndarray`: ``(n_receiver, 3)`` 

4068 

4069 :param friction: 

4070 Coefficient of friction. 

4071 :type friction: 

4072 float 

4073 

4074 :param pressure: 

4075 Pore pressure change in [Pa]. 

4076 :type pressure: 

4077 float 

4078 

4079 :param strike: 

4080 Strike of the receiver plane in [deg]. 

4081 :type strike: 

4082 float 

4083 

4084 :param dip: 

4085 Dip of the receiver plane in [deg]. 

4086 :type dip: 

4087 float 

4088 

4089 :param rake: 

4090 Rake of the receiver plane in [deg]. 

4091 :type rake: 

4092 float 

4093 

4094 :param time: 

4095 Time after origin [s], for which the resulting :math:`\\Delta 

4096 \\Sigma_c` is computed. If not given, :math:`\\Delta \\Sigma_c` is 

4097 derived based on the final static slip. 

4098 :type time: 

4099 optional, float > 0. 

4100 

4101 :returns: 

4102 The Coulomb failure stress change :math:`\\Delta \\Sigma_c` at each 

4103 receiver point in [Pa]. 

4104 :rtype: 

4105 :py:class:`~numpy.ndarray`: ``(n_receiver,)`` 

4106 ''' 

4107 # dislocation at given time 

4108 source_slip = self.get_slip(time=time, scale_slip=True) 

4109 

4110 # source planes 

4111 source_patches = num.array([ 

4112 src.source_patch() for src in self.patches]) 

4113 

4114 # earth model 

4115 lambda_mean = num.mean([src.lamb for src in self.patches]) 

4116 mu_mean = num.mean([src.shearmod for src in self.patches]) 

4117 

4118 # Dislocation and spatial derivatives from okada in NED 

4119 results = okada_ext.okada( 

4120 source_patches, 

4121 source_slip, 

4122 receiver_points, 

4123 lambda_mean, 

4124 mu_mean, 

4125 rotate_sdn=False, # TODO Check 

4126 stack_sources=0, # TODO Check 

4127 *args, **kwargs) 

4128 # shape (n_sources, n_receivers, n_components) 

4129 

4130 # resolve stress tensor (sum!) 

4131 diag_ind = [0, 4, 8] 

4132 kron = num.zeros(9) 

4133 kron[diag_ind] = 1. 

4134 kron = kron[num.newaxis, num.newaxis, :] 

4135 

4136 eps = 0.5 * ( 

4137 results[:, :, 3:] + 

4138 results[:, :, (3, 6, 9, 4, 7, 10, 5, 8, 11)]) 

4139 

4140 dilatation \ 

4141 = eps[:, :, diag_ind].sum(axis=-1)[:, :, num.newaxis] 

4142 

4143 stress = kron*lambda_mean*dilatation + 2.*mu_mean*eps 

4144 # stress shape (n_sources, n_receivers, n_stress_components) 

4145 

4146 # superposed stress of all sources at receiver locations 

4147 stress_sum = num.sum(stress, axis=0) 

4148 # stress_sum shape: (n_receivers, n_stress_components) 

4149 

4150 # get shear and normal stress from stress tensor 

4151 st0 = d2r * strike 

4152 di0 = d2r * dip 

4153 ra0 = d2r * rake 

4154 

4155 n_rec = receiver_points.shape[0] 

4156 stress_normal = num.zeros(n_rec) 

4157 tau = num.zeros(n_rec) 

4158 

4159 # TODO speed up and remove for loop 

4160 for irec in range(n_rec): 

4161 ns = num.zeros(3) 

4162 rst = num.zeros(3) 

4163 rdi = num.zeros(3) 

4164 

4165 ns[0] = num.sin(di0) * num.cos(st0 + 0.5 * num.pi) 

4166 ns[1] = num.sin(di0) * num.sin(st0 + 0.5 * num.pi) 

4167 ns[2] = -num.cos(di0) 

4168 

4169 rst[0] = num.cos(st0) 

4170 rst[1] = num.sin(st0) 

4171 rst[2] = 0.0 

4172 

4173 rdi[0] = num.cos(di0) * num.cos(st0 + 0.5 * num.pi) 

4174 rdi[1] = num.cos(di0) * num.sin(st0 + 0.5 * num.pi) 

4175 rdi[2] = num.sin(di0) 

4176 

4177 ts = rst * num.cos(ra0) - rdi * num.sin(ra0) 

4178 

4179 # TODO speed up and remove for loops 

4180 for j in range(3): 

4181 for i in range(3): 

4182 # TODO Check 

4183 stress_normal[irec] += \ 

4184 ns[i] * stress_sum[irec, j*3 + i] * ns[j] 

4185 tau[irec] += ts[i] * stress_sum[irec, j*3 + i] * ns[j] 

4186 

4187 # calculate cfs using formula above and return 

4188 return tau + friction * (stress_normal + pressure) 

4189 

4190 

4191class DoubleDCSource(SourceWithMagnitude): 

4192 ''' 

4193 Two double-couple point sources separated in space and time. 

4194 Moment share between the sub-sources is controlled by the 

4195 parameter mix. 

4196 The position of the subsources is dependent on the moment 

4197 distribution between the two sources. Depth, east and north 

4198 shift are given for the centroid between the two double-couples. 

4199 The subsources will positioned according to their moment shares 

4200 around this centroid position. 

4201 This is done according to their delta parameters, which are 

4202 therefore in relation to that centroid. 

4203 Note that depth of the subsources therefore can be 

4204 depth+/-delta_depth. For shallow earthquakes therefore 

4205 the depth has to be chosen deeper to avoid sampling 

4206 above surface. 

4207 ''' 

4208 

4209 strike1 = Float.T( 

4210 default=0.0, 

4211 help='strike direction in [deg], measured clockwise from north') 

4212 

4213 dip1 = Float.T( 

4214 default=90.0, 

4215 help='dip angle in [deg], measured downward from horizontal') 

4216 

4217 azimuth = Float.T( 

4218 default=0.0, 

4219 help='azimuth to second double-couple [deg], ' 

4220 'measured at first, clockwise from north') 

4221 

4222 rake1 = Float.T( 

4223 default=0.0, 

4224 help='rake angle in [deg], ' 

4225 'measured counter-clockwise from right-horizontal ' 

4226 'in on-plane view') 

4227 

4228 strike2 = Float.T( 

4229 default=0.0, 

4230 help='strike direction in [deg], measured clockwise from north') 

4231 

4232 dip2 = Float.T( 

4233 default=90.0, 

4234 help='dip angle in [deg], measured downward from horizontal') 

4235 

4236 rake2 = Float.T( 

4237 default=0.0, 

4238 help='rake angle in [deg], ' 

4239 'measured counter-clockwise from right-horizontal ' 

4240 'in on-plane view') 

4241 

4242 delta_time = Float.T( 

4243 default=0.0, 

4244 help='separation of double-couples in time (t2-t1) [s]') 

4245 

4246 delta_depth = Float.T( 

4247 default=0.0, 

4248 help='difference in depth (z2-z1) [m]') 

4249 

4250 distance = Float.T( 

4251 default=0.0, 

4252 help='distance between the two double-couples [m]') 

4253 

4254 mix = Float.T( 

4255 default=0.5, 

4256 help='how to distribute the moment to the two doublecouples ' 

4257 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

4258 

4259 stf1 = STF.T( 

4260 optional=True, 

4261 help='Source time function of subsource 1 ' 

4262 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4263 

4264 stf2 = STF.T( 

4265 optional=True, 

4266 help='Source time function of subsource 2 ' 

4267 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4268 

4269 discretized_source_class = meta.DiscretizedMTSource 

4270 

4271 def base_key(self): 

4272 return ( 

4273 self.time, self.depth, self.lat, self.north_shift, 

4274 self.lon, self.east_shift, type(self).__name__) + \ 

4275 self.effective_stf1_pre().base_key() + \ 

4276 self.effective_stf2_pre().base_key() + ( 

4277 self.strike1, self.dip1, self.rake1, 

4278 self.strike2, self.dip2, self.rake2, 

4279 self.delta_time, self.delta_depth, 

4280 self.azimuth, self.distance, self.mix) 

4281 

4282 def get_factor(self): 

4283 return self.moment 

4284 

4285 def effective_stf1_pre(self): 

4286 return self.stf1 or self.stf or g_unit_pulse 

4287 

4288 def effective_stf2_pre(self): 

4289 return self.stf2 or self.stf or g_unit_pulse 

4290 

4291 def effective_stf_post(self): 

4292 return g_unit_pulse 

4293 

4294 def split(self): 

4295 a1 = 1.0 - self.mix 

4296 a2 = self.mix 

4297 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4298 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4299 

4300 dc1 = DCSource( 

4301 lat=self.lat, 

4302 lon=self.lon, 

4303 time=self.time - self.delta_time * a2, 

4304 north_shift=self.north_shift - delta_north * a2, 

4305 east_shift=self.east_shift - delta_east * a2, 

4306 depth=self.depth - self.delta_depth * a2, 

4307 moment=self.moment * a1, 

4308 strike=self.strike1, 

4309 dip=self.dip1, 

4310 rake=self.rake1, 

4311 stf=self.stf1 or self.stf) 

4312 

4313 dc2 = DCSource( 

4314 lat=self.lat, 

4315 lon=self.lon, 

4316 time=self.time + self.delta_time * a1, 

4317 north_shift=self.north_shift + delta_north * a1, 

4318 east_shift=self.east_shift + delta_east * a1, 

4319 depth=self.depth + self.delta_depth * a1, 

4320 moment=self.moment * a2, 

4321 strike=self.strike2, 

4322 dip=self.dip2, 

4323 rake=self.rake2, 

4324 stf=self.stf2 or self.stf) 

4325 

4326 return [dc1, dc2] 

4327 

4328 def discretize_basesource(self, store, target=None): 

4329 a1 = 1.0 - self.mix 

4330 a2 = self.mix 

4331 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4332 rake=self.rake1, scalar_moment=a1) 

4333 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4334 rake=self.rake2, scalar_moment=a2) 

4335 

4336 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4337 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4338 

4339 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4340 store.config.deltat, self.time - self.delta_time * a2) 

4341 

4342 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4343 store.config.deltat, self.time + self.delta_time * a1) 

4344 

4345 nt1 = times1.size 

4346 nt2 = times2.size 

4347 

4348 ds = meta.DiscretizedMTSource( 

4349 lat=self.lat, 

4350 lon=self.lon, 

4351 times=num.concatenate((times1, times2)), 

4352 north_shifts=num.concatenate(( 

4353 num.repeat(self.north_shift - delta_north * a2, nt1), 

4354 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4355 east_shifts=num.concatenate(( 

4356 num.repeat(self.east_shift - delta_east * a2, nt1), 

4357 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4358 depths=num.concatenate(( 

4359 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4360 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4361 m6s=num.vstack(( 

4362 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4363 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4364 

4365 return ds 

4366 

4367 def pyrocko_moment_tensor(self, store=None, target=None): 

4368 a1 = 1.0 - self.mix 

4369 a2 = self.mix 

4370 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4371 rake=self.rake1, 

4372 scalar_moment=a1 * self.moment) 

4373 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4374 rake=self.rake2, 

4375 scalar_moment=a2 * self.moment) 

4376 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4377 

4378 def pyrocko_event(self, store=None, target=None, **kwargs): 

4379 return SourceWithMagnitude.pyrocko_event( 

4380 self, store, target, 

4381 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4382 **kwargs) 

4383 

4384 @classmethod 

4385 def from_pyrocko_event(cls, ev, **kwargs): 

4386 d = {} 

4387 mt = ev.moment_tensor 

4388 if mt: 

4389 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4390 d.update( 

4391 strike1=float(strike), 

4392 dip1=float(dip), 

4393 rake1=float(rake), 

4394 strike2=float(strike), 

4395 dip2=float(dip), 

4396 rake2=float(rake), 

4397 mix=0.0, 

4398 magnitude=float(mt.moment_magnitude())) 

4399 

4400 d.update(kwargs) 

4401 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4402 source.stf1 = source.stf 

4403 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4404 source.stf = None 

4405 return source 

4406 

4407 

4408class RingfaultSource(SourceWithMagnitude): 

4409 ''' 

4410 A ring fault with vertical doublecouples. 

4411 ''' 

4412 

4413 diameter = Float.T( 

4414 default=1.0, 

4415 help='diameter of the ring in [m]') 

4416 

4417 sign = Float.T( 

4418 default=1.0, 

4419 help='inside of the ring moves up (+1) or down (-1)') 

4420 

4421 strike = Float.T( 

4422 default=0.0, 

4423 help='strike direction of the ring plane, clockwise from north,' 

4424 ' in [deg]') 

4425 

4426 dip = Float.T( 

4427 default=0.0, 

4428 help='dip angle of the ring plane from horizontal in [deg]') 

4429 

4430 npointsources = Int.T( 

4431 default=360, 

4432 help='number of point sources to use') 

4433 

4434 discretized_source_class = meta.DiscretizedMTSource 

4435 

4436 def base_key(self): 

4437 return Source.base_key(self) + ( 

4438 self.strike, self.dip, self.diameter, self.npointsources) 

4439 

4440 def get_factor(self): 

4441 return self.sign * self.moment 

4442 

4443 def discretize_basesource(self, store=None, target=None): 

4444 n = self.npointsources 

4445 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4446 

4447 points = num.zeros((n, 3)) 

4448 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4449 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4450 

4451 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4452 points = num.dot(rotmat.T, points.T).T # !!! ? 

4453 

4454 points[:, 0] += self.north_shift 

4455 points[:, 1] += self.east_shift 

4456 points[:, 2] += self.depth 

4457 

4458 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4459 scalar_moment=1.0 / n).m()) 

4460 

4461 rotmats = num.transpose( 

4462 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4463 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4464 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4465 

4466 ms = num.zeros((n, 3, 3)) 

4467 for i in range(n): 

4468 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4469 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4470 

4471 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4472 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4473 

4474 times, amplitudes = self.effective_stf_pre().discretize_t( 

4475 store.config.deltat, self.time) 

4476 

4477 nt = times.size 

4478 

4479 return meta.DiscretizedMTSource( 

4480 times=num.tile(times, n), 

4481 lat=self.lat, 

4482 lon=self.lon, 

4483 north_shifts=num.repeat(points[:, 0], nt), 

4484 east_shifts=num.repeat(points[:, 1], nt), 

4485 depths=num.repeat(points[:, 2], nt), 

4486 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4487 amplitudes, n)[:, num.newaxis]) 

4488 

4489 

4490class CombiSource(Source): 

4491 ''' 

4492 Composite source model. 

4493 ''' 

4494 

4495 discretized_source_class = meta.DiscretizedMTSource 

4496 

4497 subsources = List.T(Source.T()) 

4498 

4499 def __init__(self, subsources=[], **kwargs): 

4500 if not subsources: 

4501 raise BadRequest( 

4502 'Need at least one sub-source to create a CombiSource object.') 

4503 

4504 lats = num.array( 

4505 [subsource.lat for subsource in subsources], dtype=float) 

4506 lons = num.array( 

4507 [subsource.lon for subsource in subsources], dtype=float) 

4508 

4509 lat, lon = lats[0], lons[0] 

4510 if not num.all(lats == lat) and num.all(lons == lon): 

4511 subsources = [s.clone() for s in subsources] 

4512 for subsource in subsources[1:]: 

4513 subsource.set_origin(lat, lon) 

4514 

4515 depth = float(num.mean([p.depth for p in subsources])) 

4516 time = float(num.mean([p.time for p in subsources])) 

4517 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4518 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4519 kwargs.update( 

4520 time=time, 

4521 lat=float(lat), 

4522 lon=float(lon), 

4523 north_shift=north_shift, 

4524 east_shift=east_shift, 

4525 depth=depth) 

4526 

4527 Source.__init__(self, subsources=subsources, **kwargs) 

4528 

4529 def get_factor(self): 

4530 return 1.0 

4531 

4532 def discretize_basesource(self, store, target=None): 

4533 dsources = [] 

4534 for sf in self.subsources: 

4535 ds = sf.discretize_basesource(store, target) 

4536 ds.m6s *= sf.get_factor() 

4537 dsources.append(ds) 

4538 

4539 return meta.DiscretizedMTSource.combine(dsources) 

4540 

4541 

4542class SFSource(Source): 

4543 ''' 

4544 A single force point source. 

4545 

4546 Supported GF schemes: `'elastic5'`. 

4547 ''' 

4548 

4549 discretized_source_class = meta.DiscretizedSFSource 

4550 

4551 fn = Float.T( 

4552 default=0., 

4553 help='northward component of single force [N]') 

4554 

4555 fe = Float.T( 

4556 default=0., 

4557 help='eastward component of single force [N]') 

4558 

4559 fd = Float.T( 

4560 default=0., 

4561 help='downward component of single force [N]') 

4562 

4563 def __init__(self, **kwargs): 

4564 Source.__init__(self, **kwargs) 

4565 

4566 def base_key(self): 

4567 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4568 

4569 def get_factor(self): 

4570 return 1.0 

4571 

4572 def discretize_basesource(self, store, target=None): 

4573 times, amplitudes = self.effective_stf_pre().discretize_t( 

4574 store.config.deltat, self.time) 

4575 forces = amplitudes[:, num.newaxis] * num.array( 

4576 [[self.fn, self.fe, self.fd]], dtype=float) 

4577 

4578 return meta.DiscretizedSFSource(forces=forces, 

4579 **self._dparams_base_repeated(times)) 

4580 

4581 def pyrocko_event(self, store=None, target=None, **kwargs): 

4582 return Source.pyrocko_event( 

4583 self, store, target, 

4584 **kwargs) 

4585 

4586 @classmethod 

4587 def from_pyrocko_event(cls, ev, **kwargs): 

4588 d = {} 

4589 d.update(kwargs) 

4590 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4591 

4592 

4593class PorePressurePointSource(Source): 

4594 ''' 

4595 Excess pore pressure point source. 

4596 

4597 For poro-elastic initial value problem where an excess pore pressure is 

4598 brought into a small source volume. 

4599 ''' 

4600 

4601 discretized_source_class = meta.DiscretizedPorePressureSource 

4602 

4603 pp = Float.T( 

4604 default=1.0, 

4605 help='initial excess pore pressure in [Pa]') 

4606 

4607 def base_key(self): 

4608 return Source.base_key(self) 

4609 

4610 def get_factor(self): 

4611 return self.pp 

4612 

4613 def discretize_basesource(self, store, target=None): 

4614 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4615 **self._dparams_base()) 

4616 

4617 

4618class PorePressureLineSource(Source): 

4619 ''' 

4620 Excess pore pressure line source. 

4621 

4622 The line source is centered at (north_shift, east_shift, depth). 

4623 ''' 

4624 

4625 discretized_source_class = meta.DiscretizedPorePressureSource 

4626 

4627 pp = Float.T( 

4628 default=1.0, 

4629 help='initial excess pore pressure in [Pa]') 

4630 

4631 length = Float.T( 

4632 default=0.0, 

4633 help='length of the line source [m]') 

4634 

4635 azimuth = Float.T( 

4636 default=0.0, 

4637 help='azimuth direction, clockwise from north [deg]') 

4638 

4639 dip = Float.T( 

4640 default=90., 

4641 help='dip direction, downward from horizontal [deg]') 

4642 

4643 def base_key(self): 

4644 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4645 

4646 def get_factor(self): 

4647 return self.pp 

4648 

4649 def discretize_basesource(self, store, target=None): 

4650 

4651 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4652 

4653 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4654 

4655 sa = math.sin(self.azimuth * d2r) 

4656 ca = math.cos(self.azimuth * d2r) 

4657 sd = math.sin(self.dip * d2r) 

4658 cd = math.cos(self.dip * d2r) 

4659 

4660 points = num.zeros((n, 3)) 

4661 points[:, 0] = self.north_shift + a * ca * cd 

4662 points[:, 1] = self.east_shift + a * sa * cd 

4663 points[:, 2] = self.depth + a * sd 

4664 

4665 return meta.DiscretizedPorePressureSource( 

4666 times=util.num_full(n, self.time), 

4667 lat=self.lat, 

4668 lon=self.lon, 

4669 north_shifts=points[:, 0], 

4670 east_shifts=points[:, 1], 

4671 depths=points[:, 2], 

4672 pp=num.ones(n) / n) 

4673 

4674 

4675class Request(Object): 

4676 ''' 

4677 Synthetic seismogram computation request. 

4678 

4679 :: 

4680 

4681 Request(**kwargs) 

4682 Request(sources, targets, **kwargs) 

4683 ''' 

4684 

4685 sources = List.T( 

4686 Source.T(), 

4687 help='list of sources for which to produce synthetics.') 

4688 

4689 targets = List.T( 

4690 Target.T(), 

4691 help='list of targets for which to produce synthetics.') 

4692 

4693 @classmethod 

4694 def args2kwargs(cls, args): 

4695 if len(args) not in (0, 2, 3): 

4696 raise BadRequest('Invalid arguments.') 

4697 

4698 if len(args) == 2: 

4699 return dict(sources=args[0], targets=args[1]) 

4700 else: 

4701 return {} 

4702 

4703 def __init__(self, *args, **kwargs): 

4704 kwargs.update(self.args2kwargs(args)) 

4705 sources = kwargs.pop('sources', []) 

4706 targets = kwargs.pop('targets', []) 

4707 

4708 if isinstance(sources, Source): 

4709 sources = [sources] 

4710 

4711 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4712 targets = [targets] 

4713 

4714 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4715 

4716 @property 

4717 def targets_dynamic(self): 

4718 return [t for t in self.targets if isinstance(t, Target)] 

4719 

4720 @property 

4721 def targets_static(self): 

4722 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4723 

4724 @property 

4725 def has_dynamic(self): 

4726 return True if len(self.targets_dynamic) > 0 else False 

4727 

4728 @property 

4729 def has_statics(self): 

4730 return True if len(self.targets_static) > 0 else False 

4731 

4732 def subsources_map(self): 

4733 m = defaultdict(list) 

4734 for source in self.sources: 

4735 m[source.base_key()].append(source) 

4736 

4737 return m 

4738 

4739 def subtargets_map(self): 

4740 m = defaultdict(list) 

4741 for target in self.targets: 

4742 m[target.base_key()].append(target) 

4743 

4744 return m 

4745 

4746 def subrequest_map(self): 

4747 ms = self.subsources_map() 

4748 mt = self.subtargets_map() 

4749 m = {} 

4750 for (ks, ls) in ms.items(): 

4751 for (kt, lt) in mt.items(): 

4752 m[ks, kt] = (ls, lt) 

4753 

4754 return m 

4755 

4756 

4757class ProcessingStats(Object): 

4758 t_perc_get_store_and_receiver = Float.T(default=0.) 

4759 t_perc_discretize_source = Float.T(default=0.) 

4760 t_perc_make_base_seismogram = Float.T(default=0.) 

4761 t_perc_make_same_span = Float.T(default=0.) 

4762 t_perc_post_process = Float.T(default=0.) 

4763 t_perc_optimize = Float.T(default=0.) 

4764 t_perc_stack = Float.T(default=0.) 

4765 t_perc_static_get_store = Float.T(default=0.) 

4766 t_perc_static_discretize_basesource = Float.T(default=0.) 

4767 t_perc_static_sum_statics = Float.T(default=0.) 

4768 t_perc_static_post_process = Float.T(default=0.) 

4769 t_wallclock = Float.T(default=0.) 

4770 t_cpu = Float.T(default=0.) 

4771 n_read_blocks = Int.T(default=0) 

4772 n_results = Int.T(default=0) 

4773 n_subrequests = Int.T(default=0) 

4774 n_stores = Int.T(default=0) 

4775 n_records_stacked = Int.T(default=0) 

4776 

4777 

4778class Response(Object): 

4779 ''' 

4780 Resonse object to a synthetic seismogram computation request. 

4781 ''' 

4782 

4783 request = Request.T() 

4784 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4785 stats = ProcessingStats.T() 

4786 

4787 def pyrocko_traces(self): 

4788 ''' 

4789 Return a list of requested 

4790 :class:`~pyrocko.trace.Trace` instances. 

4791 ''' 

4792 

4793 traces = [] 

4794 for results in self.results_list: 

4795 for result in results: 

4796 if not isinstance(result, meta.Result): 

4797 continue 

4798 traces.append(result.trace.pyrocko_trace()) 

4799 

4800 return traces 

4801 

4802 def kite_scenes(self): 

4803 ''' 

4804 Return a list of requested 

4805 :class:`~kite.scenes` instances. 

4806 ''' 

4807 kite_scenes = [] 

4808 for results in self.results_list: 

4809 for result in results: 

4810 if isinstance(result, meta.KiteSceneResult): 

4811 sc = result.get_scene() 

4812 kite_scenes.append(sc) 

4813 

4814 return kite_scenes 

4815 

4816 def static_results(self): 

4817 ''' 

4818 Return a list of requested 

4819 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4820 ''' 

4821 statics = [] 

4822 for results in self.results_list: 

4823 for result in results: 

4824 if not isinstance(result, meta.StaticResult): 

4825 continue 

4826 statics.append(result) 

4827 

4828 return statics 

4829 

4830 def iter_results(self, get='pyrocko_traces'): 

4831 ''' 

4832 Generator function to iterate over results of request. 

4833 

4834 Yields associated :py:class:`Source`, 

4835 :class:`~pyrocko.gf.targets.Target`, 

4836 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4837 ''' 

4838 

4839 for isource, source in enumerate(self.request.sources): 

4840 for itarget, target in enumerate(self.request.targets): 

4841 result = self.results_list[isource][itarget] 

4842 if get == 'pyrocko_traces': 

4843 yield source, target, result.trace.pyrocko_trace() 

4844 elif get == 'results': 

4845 yield source, target, result 

4846 

4847 def snuffle(self, **kwargs): 

4848 ''' 

4849 Open *snuffler* with requested traces. 

4850 ''' 

4851 

4852 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4853 

4854 

4855class Engine(Object): 

4856 ''' 

4857 Base class for synthetic seismogram calculators. 

4858 ''' 

4859 

4860 def get_store_ids(self): 

4861 ''' 

4862 Get list of available GF store IDs 

4863 ''' 

4864 

4865 return [] 

4866 

4867 

4868class Rule(object): 

4869 pass 

4870 

4871 

4872class VectorRule(Rule): 

4873 

4874 def __init__(self, quantity, differentiate=0, integrate=0): 

4875 self.components = [quantity + '.' + c for c in 'ned'] 

4876 self.differentiate = differentiate 

4877 self.integrate = integrate 

4878 

4879 def required_components(self, target): 

4880 n, e, d = self.components 

4881 sa, ca, sd, cd = target.get_sin_cos_factors() 

4882 

4883 comps = [] 

4884 if nonzero(ca * cd): 

4885 comps.append(n) 

4886 

4887 if nonzero(sa * cd): 

4888 comps.append(e) 

4889 

4890 if nonzero(sd): 

4891 comps.append(d) 

4892 

4893 return tuple(comps) 

4894 

4895 def apply_(self, target, base_seismogram): 

4896 n, e, d = self.components 

4897 sa, ca, sd, cd = target.get_sin_cos_factors() 

4898 

4899 if nonzero(ca * cd): 

4900 data = base_seismogram[n].data * (ca * cd) 

4901 deltat = base_seismogram[n].deltat 

4902 else: 

4903 data = 0.0 

4904 

4905 if nonzero(sa * cd): 

4906 data = data + base_seismogram[e].data * (sa * cd) 

4907 deltat = base_seismogram[e].deltat 

4908 

4909 if nonzero(sd): 

4910 data = data + base_seismogram[d].data * sd 

4911 deltat = base_seismogram[d].deltat 

4912 

4913 if self.differentiate: 

4914 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4915 

4916 if self.integrate: 

4917 raise NotImplementedError('Integration is not implemented yet.') 

4918 

4919 return data 

4920 

4921 

4922class HorizontalVectorRule(Rule): 

4923 

4924 def __init__(self, quantity, differentiate=0, integrate=0): 

4925 self.components = [quantity + '.' + c for c in 'ne'] 

4926 self.differentiate = differentiate 

4927 self.integrate = integrate 

4928 

4929 def required_components(self, target): 

4930 n, e = self.components 

4931 sa, ca, _, _ = target.get_sin_cos_factors() 

4932 

4933 comps = [] 

4934 if nonzero(ca): 

4935 comps.append(n) 

4936 

4937 if nonzero(sa): 

4938 comps.append(e) 

4939 

4940 return tuple(comps) 

4941 

4942 def apply_(self, target, base_seismogram): 

4943 n, e = self.components 

4944 sa, ca, _, _ = target.get_sin_cos_factors() 

4945 

4946 if nonzero(ca): 

4947 data = base_seismogram[n].data * ca 

4948 else: 

4949 data = 0.0 

4950 

4951 if nonzero(sa): 

4952 data = data + base_seismogram[e].data * sa 

4953 

4954 if self.differentiate: 

4955 deltat = base_seismogram[e].deltat 

4956 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4957 

4958 if self.integrate: 

4959 raise NotImplementedError('Integration is not implemented yet.') 

4960 

4961 return data 

4962 

4963 

4964class ScalarRule(Rule): 

4965 

4966 def __init__(self, quantity, differentiate=0): 

4967 self.c = quantity 

4968 

4969 def required_components(self, target): 

4970 return (self.c, ) 

4971 

4972 def apply_(self, target, base_seismogram): 

4973 data = base_seismogram[self.c].data.copy() 

4974 deltat = base_seismogram[self.c].deltat 

4975 if self.differentiate: 

4976 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4977 

4978 return data 

4979 

4980 

4981class StaticDisplacement(Rule): 

4982 

4983 def required_components(self, target): 

4984 return tuple(['displacement.%s' % c for c in list('ned')]) 

4985 

4986 def apply_(self, target, base_statics): 

4987 if isinstance(target, SatelliteTarget): 

4988 los_fac = target.get_los_factors() 

4989 base_statics['displacement.los'] =\ 

4990 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4991 los_fac[:, 1] * base_statics['displacement.e'] + 

4992 los_fac[:, 2] * base_statics['displacement.n']) 

4993 return base_statics 

4994 

4995 

4996channel_rules = { 

4997 'displacement': [VectorRule('displacement')], 

4998 'rotation': [VectorRule('rotation')], 

4999 'velocity': [ 

5000 VectorRule('velocity'), 

5001 VectorRule('displacement', differentiate=1)], 

5002 'acceleration': [ 

5003 VectorRule('acceleration'), 

5004 VectorRule('velocity', differentiate=1), 

5005 VectorRule('displacement', differentiate=2)], 

5006 'pore_pressure': [ScalarRule('pore_pressure')], 

5007 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

5008 'darcy_velocity': [VectorRule('darcy_velocity')], 

5009} 

5010 

5011static_rules = { 

5012 'displacement': [StaticDisplacement()] 

5013} 

5014 

5015 

5016class OutOfBoundsContext(Object): 

5017 source = Source.T() 

5018 target = Target.T() 

5019 distance = Float.T() 

5020 components = List.T(String.T()) 

5021 

5022 

5023def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

5024 dsource_cache = {} 

5025 tcounters = list(range(6)) 

5026 

5027 store_ids = set() 

5028 sources = set() 

5029 targets = set() 

5030 

5031 for itarget, target in enumerate(ptargets): 

5032 target._id = itarget 

5033 

5034 for w in work: 

5035 _, _, isources, itargets = w 

5036 

5037 sources.update([psources[isource] for isource in isources]) 

5038 targets.update([ptargets[itarget] for itarget in itargets]) 

5039 

5040 store_ids = set([t.store_id for t in targets]) 

5041 

5042 for isource, source in enumerate(psources): 

5043 

5044 components = set() 

5045 for itarget, target in enumerate(targets): 

5046 rule = engine.get_rule(source, target) 

5047 components.update(rule.required_components(target)) 

5048 

5049 for store_id in store_ids: 

5050 store_targets = [t for t in targets if t.store_id == store_id] 

5051 

5052 sample_rates = set([t.sample_rate for t in store_targets]) 

5053 interpolations = set([t.interpolation for t in store_targets]) 

5054 

5055 base_seismograms = [] 

5056 store_targets_out = [] 

5057 

5058 for samp_rate in sample_rates: 

5059 for interp in interpolations: 

5060 engine_targets = [ 

5061 t for t in store_targets if t.sample_rate == samp_rate 

5062 and t.interpolation == interp] 

5063 

5064 if not engine_targets: 

5065 continue 

5066 

5067 store_targets_out += engine_targets 

5068 

5069 base_seismograms += engine.base_seismograms( 

5070 source, 

5071 engine_targets, 

5072 components, 

5073 dsource_cache, 

5074 nthreads) 

5075 

5076 for iseis, seismogram in enumerate(base_seismograms): 

5077 for tr in seismogram.values(): 

5078 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

5079 e = SeismosizerError( 

5080 'Seismosizer failed with return code %i\n%s' % ( 

5081 tr.err, str( 

5082 OutOfBoundsContext( 

5083 source=source, 

5084 target=store_targets[iseis], 

5085 distance=source.distance_to( 

5086 store_targets[iseis]), 

5087 components=components)))) 

5088 raise e 

5089 

5090 for seismogram, target in zip(base_seismograms, store_targets_out): 

5091 

5092 try: 

5093 result = engine._post_process_dynamic( 

5094 seismogram, source, target) 

5095 except SeismosizerError as e: 

5096 result = e 

5097 

5098 yield (isource, target._id, result), tcounters 

5099 

5100 

5101def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

5102 dsource_cache = {} 

5103 

5104 for w in work: 

5105 _, _, isources, itargets = w 

5106 

5107 sources = [psources[isource] for isource in isources] 

5108 targets = [ptargets[itarget] for itarget in itargets] 

5109 

5110 components = set() 

5111 for target in targets: 

5112 rule = engine.get_rule(sources[0], target) 

5113 components.update(rule.required_components(target)) 

5114 

5115 for isource, source in zip(isources, sources): 

5116 for itarget, target in zip(itargets, targets): 

5117 

5118 try: 

5119 base_seismogram, tcounters = engine.base_seismogram( 

5120 source, target, components, dsource_cache, nthreads) 

5121 except meta.OutOfBounds as e: 

5122 e.context = OutOfBoundsContext( 

5123 source=sources[0], 

5124 target=targets[0], 

5125 distance=sources[0].distance_to(targets[0]), 

5126 components=components) 

5127 raise 

5128 

5129 n_records_stacked = 0 

5130 t_optimize = 0.0 

5131 t_stack = 0.0 

5132 

5133 for _, tr in base_seismogram.items(): 

5134 n_records_stacked += tr.n_records_stacked 

5135 t_optimize += tr.t_optimize 

5136 t_stack += tr.t_stack 

5137 

5138 try: 

5139 result = engine._post_process_dynamic( 

5140 base_seismogram, source, target) 

5141 result.n_records_stacked = n_records_stacked 

5142 result.n_shared_stacking = len(sources) *\ 

5143 len(targets) 

5144 result.t_optimize = t_optimize 

5145 result.t_stack = t_stack 

5146 except SeismosizerError as e: 

5147 result = e 

5148 

5149 tcounters.append(xtime()) 

5150 yield (isource, itarget, result), tcounters 

5151 

5152 

5153def process_static(work, psources, ptargets, engine, nthreads=0): 

5154 for w in work: 

5155 _, _, isources, itargets = w 

5156 

5157 sources = [psources[isource] for isource in isources] 

5158 targets = [ptargets[itarget] for itarget in itargets] 

5159 

5160 for isource, source in zip(isources, sources): 

5161 for itarget, target in zip(itargets, targets): 

5162 components = engine.get_rule(source, target)\ 

5163 .required_components(target) 

5164 

5165 try: 

5166 base_statics, tcounters = engine.base_statics( 

5167 source, target, components, nthreads) 

5168 except meta.OutOfBounds as e: 

5169 e.context = OutOfBoundsContext( 

5170 source=sources[0], 

5171 target=targets[0], 

5172 distance=float('nan'), 

5173 components=components) 

5174 raise 

5175 result = engine._post_process_statics( 

5176 base_statics, source, target) 

5177 tcounters.append(xtime()) 

5178 

5179 yield (isource, itarget, result), tcounters 

5180 

5181 

5182class LocalEngine(Engine): 

5183 ''' 

5184 Offline synthetic seismogram calculator. 

5185 

5186 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

5187 :py:attr:`store_dirs` with paths set in environment variables 

5188 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

5189 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

5190 :py:attr:`store_dirs` with paths set in the user's config file. 

5191 

5192 The config file can be found at :file:`~/.pyrocko/config.pf` 

5193 

5194 .. code-block :: python 

5195 

5196 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

5197 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

5198 ''' 

5199 

5200 store_superdirs = List.T( 

5201 String.T(), 

5202 help="directories which are searched for Green's function stores") 

5203 

5204 store_dirs = List.T( 

5205 String.T(), 

5206 help="additional individual Green's function store directories") 

5207 

5208 default_store_id = String.T( 

5209 optional=True, 

5210 help='default store ID to be used when a request does not provide ' 

5211 'one') 

5212 

5213 def __init__(self, **kwargs): 

5214 use_env = kwargs.pop('use_env', False) 

5215 use_config = kwargs.pop('use_config', False) 

5216 Engine.__init__(self, **kwargs) 

5217 if use_env: 

5218 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

5219 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

5220 if env_store_superdirs: 

5221 self.store_superdirs.extend(env_store_superdirs.split(':')) 

5222 

5223 if env_store_dirs: 

5224 self.store_dirs.extend(env_store_dirs.split(':')) 

5225 

5226 if use_config: 

5227 c = config.config() 

5228 self.store_superdirs.extend(c.gf_store_superdirs) 

5229 self.store_dirs.extend(c.gf_store_dirs) 

5230 

5231 self._check_store_dirs_type() 

5232 self._id_to_store_dir = {} 

5233 self._open_stores = {} 

5234 self._effective_default_store_id = None 

5235 

5236 def _check_store_dirs_type(self): 

5237 for sdir in ['store_dirs', 'store_superdirs']: 

5238 if not isinstance(self.__getattribute__(sdir), list): 

5239 raise TypeError('{} of {} is not of type list'.format( 

5240 sdir, self.__class__.__name__)) 

5241 

5242 def _get_store_id(self, store_dir): 

5243 store_ = store.Store(store_dir) 

5244 store_id = store_.config.id 

5245 store_.close() 

5246 return store_id 

5247 

5248 def _looks_like_store_dir(self, store_dir): 

5249 return os.path.isdir(store_dir) and \ 

5250 all(os.path.isfile(pjoin(store_dir, x)) for x in 

5251 ('index', 'traces', 'config')) 

5252 

5253 def iter_store_dirs(self): 

5254 store_dirs = set() 

5255 for d in self.store_superdirs: 

5256 if not os.path.exists(d): 

5257 logger.warning('store_superdir not available: %s' % d) 

5258 continue 

5259 

5260 for entry in os.listdir(d): 

5261 store_dir = os.path.realpath(pjoin(d, entry)) 

5262 if self._looks_like_store_dir(store_dir): 

5263 store_dirs.add(store_dir) 

5264 

5265 for store_dir in self.store_dirs: 

5266 store_dirs.add(os.path.realpath(store_dir)) 

5267 

5268 return store_dirs 

5269 

5270 def _scan_stores(self): 

5271 for store_dir in self.iter_store_dirs(): 

5272 store_id = self._get_store_id(store_dir) 

5273 if store_id not in self._id_to_store_dir: 

5274 self._id_to_store_dir[store_id] = store_dir 

5275 else: 

5276 if store_dir != self._id_to_store_dir[store_id]: 

5277 raise DuplicateStoreId( 

5278 'GF store ID %s is used in (at least) two ' 

5279 'different stores. Locations are: %s and %s' % 

5280 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5281 

5282 def get_store_dir(self, store_id): 

5283 ''' 

5284 Lookup directory given a GF store ID. 

5285 ''' 

5286 

5287 if store_id not in self._id_to_store_dir: 

5288 self._scan_stores() 

5289 

5290 if store_id not in self._id_to_store_dir: 

5291 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5292 

5293 return self._id_to_store_dir[store_id] 

5294 

5295 def get_store_ids(self): 

5296 ''' 

5297 Get list of available store IDs. 

5298 ''' 

5299 

5300 self._scan_stores() 

5301 return sorted(self._id_to_store_dir.keys()) 

5302 

5303 def effective_default_store_id(self): 

5304 if self._effective_default_store_id is None: 

5305 if self.default_store_id is None: 

5306 store_ids = self.get_store_ids() 

5307 if len(store_ids) == 1: 

5308 self._effective_default_store_id = self.get_store_ids()[0] 

5309 else: 

5310 raise NoDefaultStoreSet() 

5311 else: 

5312 self._effective_default_store_id = self.default_store_id 

5313 

5314 return self._effective_default_store_id 

5315 

5316 def get_store(self, store_id=None): 

5317 ''' 

5318 Get a store from the engine. 

5319 

5320 :param store_id: identifier of the store (optional) 

5321 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5322 

5323 If no ``store_id`` is provided the store 

5324 associated with the :py:gattr:`default_store_id` is returned. 

5325 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5326 undefined. 

5327 ''' 

5328 

5329 if store_id is None: 

5330 store_id = self.effective_default_store_id() 

5331 

5332 if store_id not in self._open_stores: 

5333 store_dir = self.get_store_dir(store_id) 

5334 self._open_stores[store_id] = store.Store(store_dir) 

5335 

5336 return self._open_stores[store_id] 

5337 

5338 def get_store_config(self, store_id): 

5339 store = self.get_store(store_id) 

5340 return store.config 

5341 

5342 def get_store_extra(self, store_id, key): 

5343 store = self.get_store(store_id) 

5344 return store.get_extra(key) 

5345 

5346 def close_cashed_stores(self): 

5347 ''' 

5348 Close and remove ids from cashed stores. 

5349 ''' 

5350 store_ids = [] 

5351 for store_id, store_ in self._open_stores.items(): 

5352 store_.close() 

5353 store_ids.append(store_id) 

5354 

5355 for store_id in store_ids: 

5356 self._open_stores.pop(store_id) 

5357 

5358 def get_rule(self, source, target): 

5359 cprovided = self.get_store(target.store_id).get_provided_components() 

5360 

5361 if isinstance(target, StaticTarget): 

5362 quantity = target.quantity 

5363 available_rules = static_rules 

5364 elif isinstance(target, Target): 

5365 quantity = target.effective_quantity() 

5366 available_rules = channel_rules 

5367 

5368 try: 

5369 for rule in available_rules[quantity]: 

5370 cneeded = rule.required_components(target) 

5371 if all(c in cprovided for c in cneeded): 

5372 return rule 

5373 

5374 except KeyError: 

5375 pass 

5376 

5377 raise BadRequest( 

5378 'No rule to calculate "%s" with GFs from store "%s" ' 

5379 'for source model "%s".' % ( 

5380 target.effective_quantity(), 

5381 target.store_id, 

5382 source.__class__.__name__)) 

5383 

5384 def _cached_discretize_basesource(self, source, store, cache, target): 

5385 if (source, store) not in cache: 

5386 cache[source, store] = source.discretize_basesource(store, target) 

5387 

5388 return cache[source, store] 

5389 

5390 def base_seismograms(self, source, targets, components, dsource_cache, 

5391 nthreads=0): 

5392 

5393 target = targets[0] 

5394 

5395 interp = set([t.interpolation for t in targets]) 

5396 if len(interp) > 1: 

5397 raise BadRequest('Targets have different interpolation schemes.') 

5398 

5399 rates = set([t.sample_rate for t in targets]) 

5400 if len(rates) > 1: 

5401 raise BadRequest('Targets have different sample rates.') 

5402 

5403 store_ = self.get_store(target.store_id) 

5404 receivers = [t.receiver(store_) for t in targets] 

5405 

5406 if target.sample_rate is not None: 

5407 deltat = 1. / target.sample_rate 

5408 rate = target.sample_rate 

5409 else: 

5410 deltat = None 

5411 rate = store_.config.sample_rate 

5412 

5413 tmin = num.fromiter( 

5414 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5415 tmax = num.fromiter( 

5416 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5417 

5418 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax)) 

5419 

5420 itmin = num.zeros_like(tmin, dtype=num.int64) 

5421 itmax = num.zeros_like(tmin, dtype=num.int64) 

5422 nsamples = num.full_like(tmin, -1, dtype=num.int64) 

5423 

5424 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64) 

5425 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64) 

5426 nsamples = itmax - itmin + 1 

5427 nsamples[num.logical_not(mask)] = -1 

5428 

5429 base_source = self._cached_discretize_basesource( 

5430 source, store_, dsource_cache, target) 

5431 

5432 base_seismograms = store_.calc_seismograms( 

5433 base_source, receivers, components, 

5434 deltat=deltat, 

5435 itmin=itmin, nsamples=nsamples, 

5436 interpolation=target.interpolation, 

5437 optimization=target.optimization, 

5438 nthreads=nthreads) 

5439 

5440 for i, base_seismogram in enumerate(base_seismograms): 

5441 base_seismograms[i] = store.make_same_span(base_seismogram) 

5442 

5443 return base_seismograms 

5444 

5445 def base_seismogram(self, source, target, components, dsource_cache, 

5446 nthreads): 

5447 

5448 tcounters = [xtime()] 

5449 

5450 store_ = self.get_store(target.store_id) 

5451 receiver = target.receiver(store_) 

5452 

5453 if target.tmin and target.tmax is not None: 

5454 rate = store_.config.sample_rate 

5455 itmin = int(num.floor(target.tmin * rate)) 

5456 itmax = int(num.ceil(target.tmax * rate)) 

5457 nsamples = itmax - itmin + 1 

5458 else: 

5459 itmin = None 

5460 nsamples = None 

5461 

5462 tcounters.append(xtime()) 

5463 base_source = self._cached_discretize_basesource( 

5464 source, store_, dsource_cache, target) 

5465 

5466 tcounters.append(xtime()) 

5467 

5468 if target.sample_rate is not None: 

5469 deltat = 1. / target.sample_rate 

5470 else: 

5471 deltat = None 

5472 

5473 base_seismogram = store_.seismogram( 

5474 base_source, receiver, components, 

5475 deltat=deltat, 

5476 itmin=itmin, nsamples=nsamples, 

5477 interpolation=target.interpolation, 

5478 optimization=target.optimization, 

5479 nthreads=nthreads) 

5480 

5481 tcounters.append(xtime()) 

5482 

5483 base_seismogram = store.make_same_span(base_seismogram) 

5484 

5485 tcounters.append(xtime()) 

5486 

5487 return base_seismogram, tcounters 

5488 

5489 def base_statics(self, source, target, components, nthreads): 

5490 tcounters = [xtime()] 

5491 store_ = self.get_store(target.store_id) 

5492 

5493 if target.tsnapshot is not None: 

5494 rate = store_.config.sample_rate 

5495 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5496 else: 

5497 itsnapshot = None 

5498 tcounters.append(xtime()) 

5499 

5500 base_source = source.discretize_basesource(store_, target=target) 

5501 

5502 tcounters.append(xtime()) 

5503 

5504 base_statics = store_.statics( 

5505 base_source, 

5506 target, 

5507 itsnapshot, 

5508 components, 

5509 target.interpolation, 

5510 nthreads) 

5511 

5512 tcounters.append(xtime()) 

5513 

5514 return base_statics, tcounters 

5515 

5516 def _post_process_dynamic(self, base_seismogram, source, target): 

5517 base_any = next(iter(base_seismogram.values())) 

5518 deltat = base_any.deltat 

5519 itmin = base_any.itmin 

5520 

5521 rule = self.get_rule(source, target) 

5522 data = rule.apply_(target, base_seismogram) 

5523 

5524 factor = source.get_factor() * target.get_factor() 

5525 if factor != 1.0: 

5526 data = data * factor 

5527 

5528 stf = source.effective_stf_post() 

5529 

5530 times, amplitudes = stf.discretize_t( 

5531 deltat, 0.0) 

5532 

5533 # repeat end point to prevent boundary effects 

5534 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5535 padded_data[:data.size] = data 

5536 padded_data[data.size:] = data[-1] 

5537 data = num.convolve(amplitudes, padded_data) 

5538 

5539 tmin = itmin * deltat + times[0] 

5540 

5541 tr = meta.SeismosizerTrace( 

5542 codes=target.codes, 

5543 data=data[:-amplitudes.size], 

5544 deltat=deltat, 

5545 tmin=tmin) 

5546 

5547 return target.post_process(self, source, tr) 

5548 

5549 def _post_process_statics(self, base_statics, source, starget): 

5550 rule = self.get_rule(source, starget) 

5551 data = rule.apply_(starget, base_statics) 

5552 

5553 factor = source.get_factor() 

5554 if factor != 1.0: 

5555 for v in data.values(): 

5556 v *= factor 

5557 

5558 return starget.post_process(self, source, base_statics) 

5559 

5560 def process(self, *args, **kwargs): 

5561 ''' 

5562 Process a request. 

5563 

5564 :: 

5565 

5566 process(**kwargs) 

5567 process(request, **kwargs) 

5568 process(sources, targets, **kwargs) 

5569 

5570 The request can be given a a :py:class:`Request` object, or such an 

5571 object is created using ``Request(**kwargs)`` for convenience. 

5572 

5573 :returns: :py:class:`Response` object 

5574 ''' 

5575 

5576 if len(args) not in (0, 1, 2): 

5577 raise BadRequest('Invalid arguments.') 

5578 

5579 if len(args) == 1: 

5580 kwargs['request'] = args[0] 

5581 

5582 elif len(args) == 2: 

5583 kwargs.update(Request.args2kwargs(args)) 

5584 

5585 request = kwargs.pop('request', None) 

5586 status_callback = kwargs.pop('status_callback', None) 

5587 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5588 

5589 nprocs = kwargs.pop('nprocs', None) 

5590 nthreads = kwargs.pop('nthreads', 1) 

5591 if nprocs is not None: 

5592 nthreads = nprocs 

5593 

5594 if request is None: 

5595 request = Request(**kwargs) 

5596 

5597 if resource: 

5598 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5599 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5600 tt0 = xtime() 

5601 

5602 # make sure stores are open before fork() 

5603 store_ids = set(target.store_id for target in request.targets) 

5604 for store_id in store_ids: 

5605 self.get_store(store_id) 

5606 

5607 source_index = dict((x, i) for (i, x) in 

5608 enumerate(request.sources)) 

5609 target_index = dict((x, i) for (i, x) in 

5610 enumerate(request.targets)) 

5611 

5612 m = request.subrequest_map() 

5613 

5614 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5615 results_list = [] 

5616 

5617 for i in range(len(request.sources)): 

5618 results_list.append([None] * len(request.targets)) 

5619 

5620 tcounters_dyn_list = [] 

5621 tcounters_static_list = [] 

5622 nsub = len(skeys) 

5623 isub = 0 

5624 

5625 # Processing dynamic targets through 

5626 # parimap(process_subrequest_dynamic) 

5627 

5628 if calc_timeseries: 

5629 _process_dynamic = process_dynamic_timeseries 

5630 else: 

5631 _process_dynamic = process_dynamic 

5632 

5633 if request.has_dynamic: 

5634 work_dynamic = [ 

5635 (i, nsub, 

5636 [source_index[source] for source in m[k][0]], 

5637 [target_index[target] for target in m[k][1] 

5638 if not isinstance(target, StaticTarget)]) 

5639 for (i, k) in enumerate(skeys)] 

5640 

5641 for ii_results, tcounters_dyn in _process_dynamic( 

5642 work_dynamic, request.sources, request.targets, self, 

5643 nthreads): 

5644 

5645 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5646 isource, itarget, result = ii_results 

5647 results_list[isource][itarget] = result 

5648 

5649 if status_callback: 

5650 status_callback(isub, nsub) 

5651 

5652 isub += 1 

5653 

5654 # Processing static targets through process_static 

5655 if request.has_statics: 

5656 work_static = [ 

5657 (i, nsub, 

5658 [source_index[source] for source in m[k][0]], 

5659 [target_index[target] for target in m[k][1] 

5660 if isinstance(target, StaticTarget)]) 

5661 for (i, k) in enumerate(skeys)] 

5662 

5663 for ii_results, tcounters_static in process_static( 

5664 work_static, request.sources, request.targets, self, 

5665 nthreads=nthreads): 

5666 

5667 tcounters_static_list.append(num.diff(tcounters_static)) 

5668 isource, itarget, result = ii_results 

5669 results_list[isource][itarget] = result 

5670 

5671 if status_callback: 

5672 status_callback(isub, nsub) 

5673 

5674 isub += 1 

5675 

5676 if status_callback: 

5677 status_callback(nsub, nsub) 

5678 

5679 tt1 = time.time() 

5680 if resource: 

5681 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5682 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5683 

5684 s = ProcessingStats() 

5685 

5686 if request.has_dynamic: 

5687 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5688 t_dyn = float(num.sum(tcumu_dyn)) 

5689 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5690 (s.t_perc_get_store_and_receiver, 

5691 s.t_perc_discretize_source, 

5692 s.t_perc_make_base_seismogram, 

5693 s.t_perc_make_same_span, 

5694 s.t_perc_post_process) = perc_dyn 

5695 else: 

5696 t_dyn = 0. 

5697 

5698 if request.has_statics: 

5699 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5700 t_static = num.sum(tcumu_static) 

5701 perc_static = map(float, tcumu_static / t_static * 100.) 

5702 (s.t_perc_static_get_store, 

5703 s.t_perc_static_discretize_basesource, 

5704 s.t_perc_static_sum_statics, 

5705 s.t_perc_static_post_process) = perc_static 

5706 

5707 s.t_wallclock = tt1 - tt0 

5708 if resource: 

5709 s.t_cpu = ( 

5710 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5711 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5712 s.n_read_blocks = ( 

5713 (rs1.ru_inblock + rc1.ru_inblock) - 

5714 (rs0.ru_inblock + rc0.ru_inblock)) 

5715 

5716 n_records_stacked = 0. 

5717 for results in results_list: 

5718 for result in results: 

5719 if not isinstance(result, meta.Result): 

5720 continue 

5721 shr = float(result.n_shared_stacking) 

5722 n_records_stacked += result.n_records_stacked / shr 

5723 s.t_perc_optimize += result.t_optimize / shr 

5724 s.t_perc_stack += result.t_stack / shr 

5725 s.n_records_stacked = int(n_records_stacked) 

5726 if t_dyn != 0.: 

5727 s.t_perc_optimize /= t_dyn * 100 

5728 s.t_perc_stack /= t_dyn * 100 

5729 

5730 return Response( 

5731 request=request, 

5732 results_list=results_list, 

5733 stats=s) 

5734 

5735 

5736class RemoteEngine(Engine): 

5737 ''' 

5738 Client for remote synthetic seismogram calculator. 

5739 ''' 

5740 

5741 site = String.T(default=ws.g_default_site, optional=True) 

5742 url = String.T(default=ws.g_url, optional=True) 

5743 

5744 def process(self, request=None, status_callback=None, **kwargs): 

5745 

5746 if request is None: 

5747 request = Request(**kwargs) 

5748 

5749 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5750 

5751 

5752g_engine = None 

5753 

5754 

5755def get_engine(store_superdirs=[]): 

5756 global g_engine 

5757 if g_engine is None: 

5758 g_engine = LocalEngine(use_env=True, use_config=True) 

5759 

5760 for d in store_superdirs: 

5761 if d not in g_engine.store_superdirs: 

5762 g_engine.store_superdirs.append(d) 

5763 

5764 return g_engine 

5765 

5766 

5767class SourceGroup(Object): 

5768 

5769 def __getattr__(self, k): 

5770 return num.fromiter((getattr(s, k) for s in self), 

5771 dtype=float) 

5772 

5773 def __iter__(self): 

5774 raise NotImplementedError( 

5775 'This method should be implemented in subclass.') 

5776 

5777 def __len__(self): 

5778 raise NotImplementedError( 

5779 'This method should be implemented in subclass.') 

5780 

5781 

5782class SourceList(SourceGroup): 

5783 sources = List.T(Source.T()) 

5784 

5785 def append(self, s): 

5786 self.sources.append(s) 

5787 

5788 def __iter__(self): 

5789 return iter(self.sources) 

5790 

5791 def __len__(self): 

5792 return len(self.sources) 

5793 

5794 

5795class SourceGrid(SourceGroup): 

5796 

5797 base = Source.T() 

5798 variables = Dict.T(String.T(), Range.T()) 

5799 order = List.T(String.T()) 

5800 

5801 def __len__(self): 

5802 n = 1 

5803 for (k, v) in self.make_coords(self.base): 

5804 n *= len(list(v)) 

5805 

5806 return n 

5807 

5808 def __iter__(self): 

5809 for items in permudef(self.make_coords(self.base)): 

5810 s = self.base.clone(**{k: v for (k, v) in items}) 

5811 s.regularize() 

5812 yield s 

5813 

5814 def ordered_params(self): 

5815 ks = list(self.variables.keys()) 

5816 for k in self.order + list(self.base.keys()): 

5817 if k in ks: 

5818 yield k 

5819 ks.remove(k) 

5820 if ks: 

5821 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5822 (ks[0], self.base.__class__.__name__)) 

5823 

5824 def make_coords(self, base): 

5825 return [(param, self.variables[param].make(base=base[param])) 

5826 for param in self.ordered_params()] 

5827 

5828 

5829source_classes = [ 

5830 Source, 

5831 SourceWithMagnitude, 

5832 SourceWithDerivedMagnitude, 

5833 ExplosionSource, 

5834 RectangularExplosionSource, 

5835 DCSource, 

5836 CLVDSource, 

5837 VLVDSource, 

5838 MTSource, 

5839 RectangularSource, 

5840 PseudoDynamicRupture, 

5841 DoubleDCSource, 

5842 RingfaultSource, 

5843 CombiSource, 

5844 SFSource, 

5845 PorePressurePointSource, 

5846 PorePressureLineSource, 

5847] 

5848 

5849stf_classes = [ 

5850 STF, 

5851 BoxcarSTF, 

5852 TriangularSTF, 

5853 HalfSinusoidSTF, 

5854 ResonatorSTF, 

5855] 

5856 

5857__all__ = ''' 

5858SeismosizerError 

5859BadRequest 

5860NoSuchStore 

5861DerivedMagnitudeError 

5862STFMode 

5863'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5864Request 

5865ProcessingStats 

5866Response 

5867Engine 

5868LocalEngine 

5869RemoteEngine 

5870source_classes 

5871get_engine 

5872Range 

5873SourceGroup 

5874SourceList 

5875SourceGrid 

5876map_anchor 

5877'''.split()