1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28 

29from __future__ import absolute_import, division, print_function 

30 

31from collections import defaultdict 

32from functools import cmp_to_key 

33import time 

34import math 

35import os 

36import re 

37import logging 

38try: 

39 import resource 

40except ImportError: 

41 resource = None 

42from hashlib import sha1 

43 

44import numpy as num 

45from scipy.interpolate import RegularGridInterpolator 

46 

47from pyrocko.guts import (Object, Float, String, StringChoice, List, 

48 Timestamp, Int, SObject, ArgumentError, Dict, 

49 ValidationError, Bool) 

50from pyrocko.guts_array import Array 

51 

52from pyrocko import moment_tensor as pmt 

53from pyrocko import trace, util, config, model, eikonal_ext 

54from pyrocko.orthodrome import ne_to_latlon 

55from pyrocko.model import Location 

56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

57 okada_ext, invert_fault_dislocations_bem 

58 

59from . import meta, store, ws 

60from .tractions import TractionField, DirectedTractions 

61from .targets import Target, StaticTarget, SatelliteTarget 

62 

63pjoin = os.path.join 

64 

65guts_prefix = 'pf' 

66 

67d2r = math.pi / 180. 

68r2d = 180. / math.pi 

69km = 1e3 

70 

71logger = logging.getLogger('pyrocko.gf.seismosizer') 

72 

73 

74def cmp_none_aware(a, b): 

75 if isinstance(a, tuple) and isinstance(b, tuple): 

76 for xa, xb in zip(a, b): 

77 rv = cmp_none_aware(xa, xb) 

78 if rv != 0: 

79 return rv 

80 

81 return 0 

82 

83 anone = a is None 

84 bnone = b is None 

85 

86 if anone and bnone: 

87 return 0 

88 

89 if anone: 

90 return -1 

91 

92 if bnone: 

93 return 1 

94 

95 return bool(a > b) - bool(a < b) 

96 

97 

98def xtime(): 

99 return time.time() 

100 

101 

102class SeismosizerError(Exception): 

103 pass 

104 

105 

106class BadRequest(SeismosizerError): 

107 pass 

108 

109 

110class DuplicateStoreId(Exception): 

111 pass 

112 

113 

114class NoDefaultStoreSet(Exception): 

115 pass 

116 

117 

118class ConversionError(Exception): 

119 pass 

120 

121 

122class NoSuchStore(BadRequest): 

123 

124 def __init__(self, store_id=None, dirs=None): 

125 BadRequest.__init__(self) 

126 self.store_id = store_id 

127 self.dirs = dirs 

128 

129 def __str__(self): 

130 if self.store_id is not None: 

131 rstr = 'no GF store with id "%s" found.' % self.store_id 

132 else: 

133 rstr = 'GF store not found.' 

134 

135 if self.dirs is not None: 

136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

137 return rstr 

138 

139 

140def ufloat(s): 

141 units = { 

142 'k': 1e3, 

143 'M': 1e6, 

144 } 

145 

146 factor = 1.0 

147 if s and s[-1] in units: 

148 factor = units[s[-1]] 

149 s = s[:-1] 

150 if not s: 

151 raise ValueError('unit without a number: \'%s\'' % s) 

152 

153 return float(s) * factor 

154 

155 

156def ufloat_or_none(s): 

157 if s: 

158 return ufloat(s) 

159 else: 

160 return None 

161 

162 

163def int_or_none(s): 

164 if s: 

165 return int(s) 

166 else: 

167 return None 

168 

169 

170def nonzero(x, eps=1e-15): 

171 return abs(x) > eps 

172 

173 

174def permudef(ln, j=0): 

175 if j < len(ln): 

176 k, v = ln[j] 

177 for y in v: 

178 ln[j] = k, y 

179 for s in permudef(ln, j + 1): 

180 yield s 

181 

182 ln[j] = k, v 

183 return 

184 else: 

185 yield ln 

186 

187 

188def arr(x): 

189 return num.atleast_1d(num.asarray(x)) 

190 

191 

192def discretize_rect_source(deltas, deltat, time, north, east, depth, 

193 strike, dip, length, width, 

194 anchor, velocity=None, stf=None, 

195 nucleation_x=None, nucleation_y=None, 

196 decimation_factor=1, pointsonly=False, 

197 plane_coords=False, 

198 aggressive_oversampling=False): 

199 

200 if stf is None: 

201 stf = STF() 

202 

203 if not velocity and not pointsonly: 

204 raise AttributeError('velocity is required in time mode') 

205 

206 mindeltagf = float(num.min(deltas)) 

207 if velocity: 

208 mindeltagf = min(mindeltagf, deltat * velocity) 

209 

210 ln = length 

211 wd = width 

212 

213 if aggressive_oversampling: 

214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 else: 

217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

219 

220 n = int(nl * nw) 

221 

222 dl = ln / nl 

223 dw = wd / nw 

224 

225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

227 

228 points = num.zeros((n, 3), dtype=num.float) 

229 points[:, 0] = num.tile(xl, nw) 

230 points[:, 1] = num.repeat(xw, nl) 

231 

232 if nucleation_x is not None: 

233 dist_x = num.abs(nucleation_x - points[:, 0]) 

234 else: 

235 dist_x = num.zeros(n) 

236 

237 if nucleation_y is not None: 

238 dist_y = num.abs(nucleation_y - points[:, 1]) 

239 else: 

240 dist_y = num.zeros(n) 

241 

242 dist = num.sqrt(dist_x**2 + dist_y**2) 

243 times = dist / velocity 

244 

245 anch_x, anch_y = map_anchor[anchor] 

246 

247 points[:, 0] -= anch_x * 0.5 * length 

248 points[:, 1] -= anch_y * 0.5 * width 

249 

250 if plane_coords: 

251 return points, dl, dw, nl, nw 

252 

253 rotmat = num.asarray( 

254 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

255 points = num.dot(rotmat.T, points.T).T 

256 

257 points[:, 0] += north 

258 points[:, 1] += east 

259 points[:, 2] += depth 

260 

261 if pointsonly: 

262 return points, dl, dw, nl, nw 

263 

264 xtau, amplitudes = stf.discretize_t(deltat, time) 

265 nt = xtau.size 

266 

267 points2 = num.repeat(points, nt, axis=0) 

268 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

269 amplitudes2 = num.tile(amplitudes, n) 

270 

271 return points2, times2, amplitudes2, dl, dw, nl, nw 

272 

273 

274def check_rect_source_discretisation(points2, nl, nw, store): 

275 # We assume a non-rotated fault plane 

276 N_CRITICAL = 8 

277 points = points2.T.reshape((3, nl, nw)) 

278 if points.size <= N_CRITICAL: 

279 logger.warning('RectangularSource is defined by only %d sub-sources!' 

280 % points.size) 

281 return True 

282 

283 distances = num.sqrt( 

284 (points[0, 0, :] - points[0, 1, :])**2 + 

285 (points[1, 0, :] - points[1, 1, :])**2 + 

286 (points[2, 0, :] - points[2, 1, :])**2) 

287 

288 depths = points[2, 0, :] 

289 vs_profile = store.config.get_vs( 

290 lat=0., lon=0., 

291 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

292 interpolation='multilinear') 

293 

294 min_wavelength = vs_profile * (store.config.deltat * 2) 

295 if not num.all(min_wavelength > distances / 2): 

296 return False 

297 return True 

298 

299 

300def outline_rect_source(strike, dip, length, width, anchor): 

301 ln = length 

302 wd = width 

303 points = num.array( 

304 [[-0.5 * ln, -0.5 * wd, 0.], 

305 [0.5 * ln, -0.5 * wd, 0.], 

306 [0.5 * ln, 0.5 * wd, 0.], 

307 [-0.5 * ln, 0.5 * wd, 0.], 

308 [-0.5 * ln, -0.5 * wd, 0.]]) 

309 

310 anch_x, anch_y = map_anchor[anchor] 

311 points[:, 0] -= anch_x * 0.5 * length 

312 points[:, 1] -= anch_y * 0.5 * width 

313 

314 rotmat = num.asarray( 

315 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

316 

317 return num.dot(rotmat.T, points.T).T 

318 

319 

320def from_plane_coords( 

321 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

322 lat=0., lon=0., 

323 north_shift=0, east_shift=0, 

324 anchor='top', cs='xy'): 

325 

326 ln = length 

327 wd = width 

328 x_abs = [] 

329 y_abs = [] 

330 if not isinstance(x_plane_coords, list): 

331 x_plane_coords = [x_plane_coords] 

332 y_plane_coords = [y_plane_coords] 

333 

334 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

335 points = num.array( 

336 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

337 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

338 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

339 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

340 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

341 

342 anch_x, anch_y = map_anchor[anchor] 

343 points[:, 0] -= anch_x * 0.5 * length 

344 points[:, 1] -= anch_y * 0.5 * width 

345 

346 rotmat = num.asarray( 

347 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

348 

349 points = num.dot(rotmat.T, points.T).T 

350 points[:, 0] += north_shift 

351 points[:, 1] += east_shift 

352 points[:, 2] += depth 

353 if cs in ('latlon', 'lonlat'): 

354 latlon = ne_to_latlon(lat, lon, 

355 points[:, 0], points[:, 1]) 

356 latlon = num.array(latlon).T 

357 x_abs.append(latlon[1:2, 1]) 

358 y_abs.append(latlon[2:3, 0]) 

359 if cs == 'xy': 

360 x_abs.append(points[1:2, 1]) 

361 y_abs.append(points[2:3, 0]) 

362 

363 if cs == 'lonlat': 

364 return y_abs, x_abs 

365 else: 

366 return x_abs, y_abs 

367 

368 

369def points_on_rect_source( 

370 strike, dip, length, width, anchor, 

371 discretized_basesource=None, points_x=None, points_y=None): 

372 

373 ln = length 

374 wd = width 

375 

376 if isinstance(points_x, list) or isinstance(points_x, float): 

377 points_x = num.array([points_x]) 

378 if isinstance(points_y, list) or isinstance(points_y, float): 

379 points_y = num.array([points_y]) 

380 

381 if discretized_basesource: 

382 ds = discretized_basesource 

383 

384 nl_patches = ds.nl + 1 

385 nw_patches = ds.nw + 1 

386 

387 npoints = nl_patches * nw_patches 

388 points = num.zeros((npoints, 3)) 

389 ln_patches = num.array([il for il in range(nl_patches)]) 

390 wd_patches = num.array([iw for iw in range(nw_patches)]) 

391 

392 points_ln =\ 

393 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

394 points_wd =\ 

395 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

396 

397 for il in range(nl_patches): 

398 for iw in range(nw_patches): 

399 points[il * nw_patches + iw, :] = num.array([ 

400 points_ln[il] * ln * 0.5, 

401 points_wd[iw] * wd * 0.5, 0.0]) 

402 

403 elif points_x.any() and points_y.any(): 

404 points = num.zeros(shape=((len(points_x), 3))) 

405 for i, (x, y) in enumerate(zip(points_x, points_y)): 

406 points[i, :] = num.array( 

407 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

408 

409 anch_x, anch_y = map_anchor[anchor] 

410 

411 points[:, 0] -= anch_x * 0.5 * ln 

412 points[:, 1] -= anch_y * 0.5 * wd 

413 

414 rotmat = num.asarray( 

415 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

416 

417 return num.dot(rotmat.T, points.T).T 

418 

419 

420class InvalidGridDef(Exception): 

421 pass 

422 

423 

424class Range(SObject): 

425 ''' 

426 Convenient range specification. 

427 

428 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

429 

430 Range('0 .. 10k : 1k') 

431 Range(start=0., stop=10e3, step=1e3) 

432 Range(0, 10e3, 1e3) 

433 Range('0 .. 10k @ 11') 

434 Range(start=0., stop=10*km, n=11) 

435 

436 Range(0, 10e3, n=11) 

437 Range(values=[x*1e3 for x in range(11)]) 

438 

439 Depending on the use context, it can be possible to omit any part of the 

440 specification. E.g. in the context of extracting a subset of an already 

441 existing range, the existing range's specification values would be filled 

442 in where missing. 

443 

444 The values are distributed with equal spacing, unless the ``spacing`` 

445 argument is modified. The values can be created offset or relative to an 

446 external base value with the ``relative`` argument if the use context 

447 supports this. 

448 

449 The range specification can be expressed with a short string 

450 representation:: 

451 

452 'start .. stop @ num | spacing, relative' 

453 'start .. stop : step | spacing, relative' 

454 

455 most parts of the expression can be omitted if not needed. Whitespace is 

456 allowed for readability but can also be omitted. 

457 ''' 

458 

459 start = Float.T(optional=True) 

460 stop = Float.T(optional=True) 

461 step = Float.T(optional=True) 

462 n = Int.T(optional=True) 

463 values = Array.T(optional=True, dtype=float, shape=(None,)) 

464 

465 spacing = StringChoice.T( 

466 choices=['lin', 'log', 'symlog'], 

467 default='lin', 

468 optional=True) 

469 

470 relative = StringChoice.T( 

471 choices=['', 'add', 'mult'], 

472 default='', 

473 optional=True) 

474 

475 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

476 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

477 r'(\|(?P<stuff>.+))?$') 

478 

479 def __init__(self, *args, **kwargs): 

480 d = {} 

481 if len(args) == 1: 

482 d = self.parse(args[0]) 

483 elif len(args) in (2, 3): 

484 d['start'], d['stop'] = [float(x) for x in args[:2]] 

485 if len(args) == 3: 

486 d['step'] = float(args[2]) 

487 

488 for k, v in kwargs.items(): 

489 if k in d: 

490 raise ArgumentError('%s specified more than once' % k) 

491 

492 d[k] = v 

493 

494 SObject.__init__(self, **d) 

495 

496 def __str__(self): 

497 def sfloat(x): 

498 if x is not None: 

499 return '%g' % x 

500 else: 

501 return '' 

502 

503 if self.values: 

504 return ','.join('%g' % x for x in self.values) 

505 

506 if self.start is None and self.stop is None: 

507 s0 = '' 

508 else: 

509 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

510 

511 s1 = '' 

512 if self.step is not None: 

513 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

514 elif self.n is not None: 

515 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

516 

517 if self.spacing == 'lin' and self.relative == '': 

518 s2 = '' 

519 else: 

520 x = [] 

521 if self.spacing != 'lin': 

522 x.append(self.spacing) 

523 

524 if self.relative != '': 

525 x.append(self.relative) 

526 

527 s2 = ' | %s' % ','.join(x) 

528 

529 return s0 + s1 + s2 

530 

531 @classmethod 

532 def parse(cls, s): 

533 s = re.sub(r'\s+', '', s) 

534 m = cls.pattern.match(s) 

535 if not m: 

536 try: 

537 vals = [ufloat(x) for x in s.split(',')] 

538 except Exception: 

539 raise InvalidGridDef( 

540 '"%s" is not a valid range specification' % s) 

541 

542 return dict(values=num.array(vals, dtype=float)) 

543 

544 d = m.groupdict() 

545 try: 

546 start = ufloat_or_none(d['start']) 

547 stop = ufloat_or_none(d['stop']) 

548 step = ufloat_or_none(d['step']) 

549 n = int_or_none(d['n']) 

550 except Exception: 

551 raise InvalidGridDef( 

552 '"%s" is not a valid range specification' % s) 

553 

554 spacing = 'lin' 

555 relative = '' 

556 

557 if d['stuff'] is not None: 

558 t = d['stuff'].split(',') 

559 for x in t: 

560 if x in cls.spacing.choices: 

561 spacing = x 

562 elif x and x in cls.relative.choices: 

563 relative = x 

564 else: 

565 raise InvalidGridDef( 

566 '"%s" is not a valid range specification' % s) 

567 

568 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

569 relative=relative) 

570 

571 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

572 if self.values: 

573 return self.values 

574 

575 start = self.start 

576 stop = self.stop 

577 step = self.step 

578 n = self.n 

579 

580 swap = step is not None and step < 0. 

581 if start is None: 

582 start = [mi, ma][swap] 

583 if stop is None: 

584 stop = [ma, mi][swap] 

585 if step is None and inc is not None: 

586 step = [inc, -inc][ma < mi] 

587 

588 if start is None or stop is None: 

589 raise InvalidGridDef( 

590 'Cannot use range specification "%s" without start ' 

591 'and stop in this context' % self) 

592 

593 if step is None and n is None: 

594 step = stop - start 

595 

596 if n is None: 

597 if (step < 0) != (stop - start < 0): 

598 raise InvalidGridDef( 

599 'Range specification "%s" has inconsistent ordering ' 

600 '(step < 0 => stop > start)' % self) 

601 

602 n = int(round((stop - start) / step)) + 1 

603 stop2 = start + (n - 1) * step 

604 if abs(stop - stop2) > eps: 

605 n = int(math.floor((stop - start) / step)) + 1 

606 stop = start + (n - 1) * step 

607 else: 

608 stop = stop2 

609 

610 if start == stop: 

611 n = 1 

612 

613 if self.spacing == 'lin': 

614 vals = num.linspace(start, stop, n) 

615 

616 elif self.spacing in ('log', 'symlog'): 

617 if start > 0. and stop > 0.: 

618 vals = num.exp(num.linspace(num.log(start), 

619 num.log(stop), n)) 

620 elif start < 0. and stop < 0.: 

621 vals = -num.exp(num.linspace(num.log(-start), 

622 num.log(-stop), n)) 

623 else: 

624 raise InvalidGridDef( 

625 'Log ranges should not include or cross zero ' 

626 '(in range specification "%s").' % self) 

627 

628 if self.spacing == 'symlog': 

629 nvals = - vals 

630 vals = num.concatenate((nvals[::-1], vals)) 

631 

632 if self.relative in ('add', 'mult') and base is None: 

633 raise InvalidGridDef( 

634 'Cannot use relative range specification in this context.') 

635 

636 vals = self.make_relative(base, vals) 

637 

638 return list(map(float, vals)) 

639 

640 def make_relative(self, base, vals): 

641 if self.relative == 'add': 

642 vals += base 

643 

644 if self.relative == 'mult': 

645 vals *= base 

646 

647 return vals 

648 

649 

650class GridDefElement(Object): 

651 

652 param = meta.StringID.T() 

653 rs = Range.T() 

654 

655 def __init__(self, shorthand=None, **kwargs): 

656 if shorthand is not None: 

657 t = shorthand.split('=') 

658 if len(t) != 2: 

659 raise InvalidGridDef( 

660 'Invalid grid specification element: %s' % shorthand) 

661 

662 sp, sr = t[0].strip(), t[1].strip() 

663 

664 kwargs['param'] = sp 

665 kwargs['rs'] = Range(sr) 

666 

667 Object.__init__(self, **kwargs) 

668 

669 def shorthand(self): 

670 return self.param + ' = ' + str(self.rs) 

671 

672 

673class GridDef(Object): 

674 

675 elements = List.T(GridDefElement.T()) 

676 

677 def __init__(self, shorthand=None, **kwargs): 

678 if shorthand is not None: 

679 t = shorthand.splitlines() 

680 tt = [] 

681 for x in t: 

682 x = x.strip() 

683 if x: 

684 tt.extend(x.split(';')) 

685 

686 elements = [] 

687 for se in tt: 

688 elements.append(GridDef(se)) 

689 

690 kwargs['elements'] = elements 

691 

692 Object.__init__(self, **kwargs) 

693 

694 def shorthand(self): 

695 return '; '.join(str(x) for x in self.elements) 

696 

697 

698class Cloneable(object): 

699 

700 def __iter__(self): 

701 return iter(self.T.propnames) 

702 

703 def __getitem__(self, k): 

704 if k not in self.keys(): 

705 raise KeyError(k) 

706 

707 return getattr(self, k) 

708 

709 def __setitem__(self, k, v): 

710 if k not in self.keys(): 

711 raise KeyError(k) 

712 

713 return setattr(self, k, v) 

714 

715 def clone(self, **kwargs): 

716 ''' 

717 Make a copy of the object. 

718 

719 A new object of the same class is created and initialized with the 

720 parameters of the object on which this method is called on. If 

721 ``kwargs`` are given, these are used to override any of the 

722 initialization parameters. 

723 ''' 

724 

725 d = dict(self) 

726 for k in d: 

727 v = d[k] 

728 if isinstance(v, Cloneable): 

729 d[k] = v.clone() 

730 

731 d.update(kwargs) 

732 return self.__class__(**d) 

733 

734 @classmethod 

735 def keys(cls): 

736 ''' 

737 Get list of the source model's parameter names. 

738 ''' 

739 

740 return cls.T.propnames 

741 

742 

743class STF(Object, Cloneable): 

744 

745 ''' 

746 Base class for source time functions. 

747 ''' 

748 

749 def __init__(self, effective_duration=None, **kwargs): 

750 if effective_duration is not None: 

751 kwargs['duration'] = effective_duration / \ 

752 self.factor_duration_to_effective() 

753 

754 Object.__init__(self, **kwargs) 

755 

756 @classmethod 

757 def factor_duration_to_effective(cls): 

758 return 1.0 

759 

760 def centroid_time(self, tref): 

761 return tref 

762 

763 @property 

764 def effective_duration(self): 

765 return self.duration * self.factor_duration_to_effective() 

766 

767 def discretize_t(self, deltat, tref): 

768 tl = math.floor(tref / deltat) * deltat 

769 th = math.ceil(tref / deltat) * deltat 

770 if tl == th: 

771 return num.array([tl], dtype=float), num.ones(1) 

772 else: 

773 return ( 

774 num.array([tl, th], dtype=float), 

775 num.array([th - tref, tref - tl], dtype=float) / deltat) 

776 

777 def base_key(self): 

778 return (type(self).__name__,) 

779 

780 

781g_unit_pulse = STF() 

782 

783 

784def sshift(times, amplitudes, tshift, deltat): 

785 

786 t0 = math.floor(tshift / deltat) * deltat 

787 t1 = math.ceil(tshift / deltat) * deltat 

788 if t0 == t1: 

789 return times, amplitudes 

790 

791 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

792 

793 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

794 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

795 

796 times2 = num.arange(times.size + 1, dtype=float) * \ 

797 deltat + times[0] + t0 

798 

799 return times2, amplitudes2 

800 

801 

802class BoxcarSTF(STF): 

803 

804 ''' 

805 Boxcar type source time function. 

806 

807 .. figure :: /static/stf-BoxcarSTF.svg 

808 :width: 40% 

809 :align: center 

810 :alt: boxcar source time function 

811 ''' 

812 

813 duration = Float.T( 

814 default=0.0, 

815 help='duration of the boxcar') 

816 

817 anchor = Float.T( 

818 default=0.0, 

819 help='anchor point with respect to source.time: (' 

820 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

821 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

822 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

823 

824 @classmethod 

825 def factor_duration_to_effective(cls): 

826 return 1.0 

827 

828 def centroid_time(self, tref): 

829 return tref - 0.5 * self.duration * self.anchor 

830 

831 def discretize_t(self, deltat, tref): 

832 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

833 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

834 tmin = round(tmin_stf / deltat) * deltat 

835 tmax = round(tmax_stf / deltat) * deltat 

836 nt = int(round((tmax - tmin) / deltat)) + 1 

837 times = num.linspace(tmin, tmax, nt) 

838 amplitudes = num.ones_like(times) 

839 if times.size > 1: 

840 t_edges = num.linspace( 

841 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

842 t = tmin_stf + self.duration * num.array( 

843 [0.0, 0.0, 1.0, 1.0], dtype=float) 

844 f = num.array([0., 1., 1., 0.], dtype=float) 

845 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

846 amplitudes /= num.sum(amplitudes) 

847 

848 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

849 

850 return sshift(times, amplitudes, -tshift, deltat) 

851 

852 def base_key(self): 

853 return (type(self).__name__, self.duration, self.anchor) 

854 

855 

856class TriangularSTF(STF): 

857 

858 ''' 

859 Triangular type source time function. 

860 

861 .. figure :: /static/stf-TriangularSTF.svg 

862 :width: 40% 

863 :align: center 

864 :alt: triangular source time function 

865 ''' 

866 

867 duration = Float.T( 

868 default=0.0, 

869 help='baseline of the triangle') 

870 

871 peak_ratio = Float.T( 

872 default=0.5, 

873 help='fraction of time compared to duration, ' 

874 'when the maximum amplitude is reached') 

875 

876 anchor = Float.T( 

877 default=0.0, 

878 help='anchor point with respect to source.time: (' 

879 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

880 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

881 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

882 

883 @classmethod 

884 def factor_duration_to_effective(cls, peak_ratio=None): 

885 if peak_ratio is None: 

886 peak_ratio = cls.peak_ratio.default() 

887 

888 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

889 

890 def __init__(self, effective_duration=None, **kwargs): 

891 if effective_duration is not None: 

892 kwargs['duration'] = effective_duration / \ 

893 self.factor_duration_to_effective( 

894 kwargs.get('peak_ratio', None)) 

895 

896 STF.__init__(self, **kwargs) 

897 

898 @property 

899 def centroid_ratio(self): 

900 ra = self.peak_ratio 

901 rb = 1.0 - ra 

902 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

903 

904 def centroid_time(self, tref): 

905 ca = self.centroid_ratio 

906 cb = 1.0 - ca 

907 if self.anchor <= 0.: 

908 return tref - ca * self.duration * self.anchor 

909 else: 

910 return tref - cb * self.duration * self.anchor 

911 

912 @property 

913 def effective_duration(self): 

914 return self.duration * self.factor_duration_to_effective( 

915 self.peak_ratio) 

916 

917 def tminmax_stf(self, tref): 

918 ca = self.centroid_ratio 

919 cb = 1.0 - ca 

920 if self.anchor <= 0.: 

921 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

922 tmax_stf = tmin_stf + self.duration 

923 else: 

924 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

925 tmin_stf = tmax_stf - self.duration 

926 

927 return tmin_stf, tmax_stf 

928 

929 def discretize_t(self, deltat, tref): 

930 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

931 

932 tmin = round(tmin_stf / deltat) * deltat 

933 tmax = round(tmax_stf / deltat) * deltat 

934 nt = int(round((tmax - tmin) / deltat)) + 1 

935 if nt > 1: 

936 t_edges = num.linspace( 

937 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

938 t = tmin_stf + self.duration * num.array( 

939 [0.0, self.peak_ratio, 1.0], dtype=float) 

940 f = num.array([0., 1., 0.], dtype=float) 

941 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

942 amplitudes /= num.sum(amplitudes) 

943 else: 

944 amplitudes = num.ones(1) 

945 

946 times = num.linspace(tmin, tmax, nt) 

947 return times, amplitudes 

948 

949 def base_key(self): 

950 return ( 

951 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

952 

953 

954class HalfSinusoidSTF(STF): 

955 

956 ''' 

957 Half sinusoid type source time function. 

958 

959 .. figure :: /static/stf-HalfSinusoidSTF.svg 

960 :width: 40% 

961 :align: center 

962 :alt: half-sinusouid source time function 

963 ''' 

964 

965 duration = Float.T( 

966 default=0.0, 

967 help='duration of the half-sinusoid (baseline)') 

968 

969 anchor = Float.T( 

970 default=0.0, 

971 help='anchor point with respect to source.time: (' 

972 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

973 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

974 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

975 

976 exponent = Int.T( 

977 default=1, 

978 help='set to 2 to use square of the half-period sinusoidal function.') 

979 

980 def __init__(self, effective_duration=None, **kwargs): 

981 if effective_duration is not None: 

982 kwargs['duration'] = effective_duration / \ 

983 self.factor_duration_to_effective( 

984 kwargs.get('exponent', 1)) 

985 

986 STF.__init__(self, **kwargs) 

987 

988 @classmethod 

989 def factor_duration_to_effective(cls, exponent): 

990 if exponent == 1: 

991 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

992 elif exponent == 2: 

993 return math.sqrt(math.pi**2 - 6) / math.pi 

994 else: 

995 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

996 

997 @property 

998 def effective_duration(self): 

999 return self.duration * self.factor_duration_to_effective(self.exponent) 

1000 

1001 def centroid_time(self, tref): 

1002 return tref - 0.5 * self.duration * self.anchor 

1003 

1004 def discretize_t(self, deltat, tref): 

1005 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1006 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1007 tmin = round(tmin_stf / deltat) * deltat 

1008 tmax = round(tmax_stf / deltat) * deltat 

1009 nt = int(round((tmax - tmin) / deltat)) + 1 

1010 if nt > 1: 

1011 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1012 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1013 

1014 if self.exponent == 1: 

1015 fint = -num.cos( 

1016 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1017 

1018 elif self.exponent == 2: 

1019 fint = (t_edges - tmin_stf) / self.duration \ 

1020 - 1.0 / (2.0 * math.pi) * num.sin( 

1021 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1022 else: 

1023 raise ValueError( 

1024 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1025 

1026 amplitudes = fint[1:] - fint[:-1] 

1027 amplitudes /= num.sum(amplitudes) 

1028 else: 

1029 amplitudes = num.ones(1) 

1030 

1031 times = num.linspace(tmin, tmax, nt) 

1032 return times, amplitudes 

1033 

1034 def base_key(self): 

1035 return (type(self).__name__, self.duration, self.anchor) 

1036 

1037 

1038class SmoothRampSTF(STF): 

1039 ''' 

1040 Smooth-ramp type source time function for near-field displacement. 

1041 Based on moment function of double-couple point source proposed by Bruestle 

1042 and Mueller (PEPI, 1983). 

1043 

1044 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1045 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1046 312-324. 

1047 

1048 .. figure :: /static/stf-SmoothRampSTF.svg 

1049 :width: 40% 

1050 :alt: smooth ramp source time function 

1051 ''' 

1052 duration = Float.T( 

1053 default=0.0, 

1054 help='duration of the ramp (baseline)') 

1055 

1056 rise_ratio = Float.T( 

1057 default=0.5, 

1058 help='fraction of time compared to duration, ' 

1059 'when the maximum amplitude is reached') 

1060 

1061 anchor = Float.T( 

1062 default=0.0, 

1063 help='anchor point with respect to source.time: (' 

1064 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1065 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1066 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1067 

1068 def discretize_t(self, deltat, tref): 

1069 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1070 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1071 tmin = round(tmin_stf / deltat) * deltat 

1072 tmax = round(tmax_stf / deltat) * deltat 

1073 D = round((tmax - tmin) / deltat) * deltat 

1074 nt = int(round(D / deltat)) + 1 

1075 times = num.linspace(tmin, tmax, nt) 

1076 if nt > 1: 

1077 rise_time = self.rise_ratio * self.duration 

1078 amplitudes = num.ones_like(times) 

1079 tp = tmin + rise_time 

1080 ii = num.where(times <= tp) 

1081 t_inc = times[ii] 

1082 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1083 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1084 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1085 

1086 amplitudes /= num.sum(amplitudes) 

1087 else: 

1088 amplitudes = num.ones(1) 

1089 

1090 return times, amplitudes 

1091 

1092 def base_key(self): 

1093 return (type(self).__name__, 

1094 self.duration, self.rise_ratio, self.anchor) 

1095 

1096 

1097class ResonatorSTF(STF): 

1098 ''' 

1099 Simple resonator like source time function. 

1100 

1101 .. math :: 

1102 

1103 f(t) = 0 for t < 0 

1104 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1105 

1106 

1107 .. figure :: /static/stf-SmoothRampSTF.svg 

1108 :width: 40% 

1109 :alt: smooth ramp source time function 

1110 

1111 ''' 

1112 

1113 duration = Float.T( 

1114 default=0.0, 

1115 help='decay time') 

1116 

1117 frequency = Float.T( 

1118 default=1.0, 

1119 help='resonance frequency') 

1120 

1121 def discretize_t(self, deltat, tref): 

1122 tmin_stf = tref 

1123 tmax_stf = tref + self.duration * 3 

1124 tmin = math.floor(tmin_stf / deltat) * deltat 

1125 tmax = math.ceil(tmax_stf / deltat) * deltat 

1126 times = util.arange2(tmin, tmax, deltat) 

1127 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1128 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1129 

1130 return times, amplitudes 

1131 

1132 def base_key(self): 

1133 return (type(self).__name__, 

1134 self.duration, self.frequency) 

1135 

1136 

1137class STFMode(StringChoice): 

1138 choices = ['pre', 'post'] 

1139 

1140 

1141class Source(Location, Cloneable): 

1142 ''' 

1143 Base class for all source models. 

1144 ''' 

1145 

1146 name = String.T(optional=True, default='') 

1147 

1148 time = Timestamp.T( 

1149 default=Timestamp.D('1970-01-01 00:00:00'), 

1150 help='source origin time.') 

1151 

1152 stf = STF.T( 

1153 optional=True, 

1154 help='source time function.') 

1155 

1156 stf_mode = STFMode.T( 

1157 default='post', 

1158 help='whether to apply source time function in pre or ' 

1159 'post-processing.') 

1160 

1161 def __init__(self, **kwargs): 

1162 Location.__init__(self, **kwargs) 

1163 

1164 def update(self, **kwargs): 

1165 ''' 

1166 Change some of the source models parameters. 

1167 

1168 Example:: 

1169 

1170 >>> from pyrocko import gf 

1171 >>> s = gf.DCSource() 

1172 >>> s.update(strike=66., dip=33.) 

1173 >>> print(s) 

1174 --- !pf.DCSource 

1175 depth: 0.0 

1176 time: 1970-01-01 00:00:00 

1177 magnitude: 6.0 

1178 strike: 66.0 

1179 dip: 33.0 

1180 rake: 0.0 

1181 

1182 ''' 

1183 

1184 for (k, v) in kwargs.items(): 

1185 self[k] = v 

1186 

1187 def grid(self, **variables): 

1188 ''' 

1189 Create grid of source model variations. 

1190 

1191 :returns: :py:class:`SourceGrid` instance. 

1192 

1193 Example:: 

1194 

1195 >>> from pyrocko import gf 

1196 >>> base = DCSource() 

1197 >>> R = gf.Range 

1198 >>> for s in base.grid(R(' 

1199 

1200 ''' 

1201 return SourceGrid(base=self, variables=variables) 

1202 

1203 def base_key(self): 

1204 ''' 

1205 Get key to decide about source discretization / GF stack sharing. 

1206 

1207 When two source models differ only in amplitude and origin time, the 

1208 discretization and the GF stacking can be done only once for a unit 

1209 amplitude and a zero origin time and the amplitude and origin times of 

1210 the seismograms can be applied during post-processing of the synthetic 

1211 seismogram. 

1212 

1213 For any derived parameterized source model, this method is called to 

1214 decide if discretization and stacking of the source should be shared. 

1215 When two source models return an equal vector of values discretization 

1216 is shared. 

1217 ''' 

1218 return (self.depth, self.lat, self.north_shift, 

1219 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1220 self.effective_stf_pre().base_key() 

1221 

1222 def get_factor(self): 

1223 ''' 

1224 Get the scaling factor to be applied during post-processing. 

1225 

1226 Discretization of the base seismogram is usually done for a unit 

1227 amplitude, because a common factor can be efficiently multiplied to 

1228 final seismograms. This eliminates to do repeat the stacking when 

1229 creating seismograms for a series of source models only differing in 

1230 amplitude. 

1231 

1232 This method should return the scaling factor to apply in the 

1233 post-processing (often this is simply the scalar moment of the source). 

1234 ''' 

1235 

1236 return 1.0 

1237 

1238 def effective_stf_pre(self): 

1239 ''' 

1240 Return the STF applied before stacking of the Green's functions. 

1241 

1242 This STF is used during discretization of the parameterized source 

1243 models, i.e. to produce a temporal distribution of point sources. 

1244 

1245 Handling of the STF before stacking of the GFs is less efficient but 

1246 allows to use different source time functions for different parts of 

1247 the source. 

1248 ''' 

1249 

1250 if self.stf is not None and self.stf_mode == 'pre': 

1251 return self.stf 

1252 else: 

1253 return g_unit_pulse 

1254 

1255 def effective_stf_post(self): 

1256 ''' 

1257 Return the STF applied after stacking of the Green's fuctions. 

1258 

1259 This STF is used in the post-processing of the synthetic seismograms. 

1260 

1261 Handling of the STF after stacking of the GFs is usually more efficient 

1262 but is only possible when a common STF is used for all subsources. 

1263 ''' 

1264 

1265 if self.stf is not None and self.stf_mode == 'post': 

1266 return self.stf 

1267 else: 

1268 return g_unit_pulse 

1269 

1270 def _dparams_base(self): 

1271 return dict(times=arr(self.time), 

1272 lat=self.lat, lon=self.lon, 

1273 north_shifts=arr(self.north_shift), 

1274 east_shifts=arr(self.east_shift), 

1275 depths=arr(self.depth)) 

1276 

1277 def _hash(self): 

1278 sha = sha1() 

1279 for k in self.base_key(): 

1280 sha.update(str(k).encode()) 

1281 return sha.hexdigest() 

1282 

1283 def _dparams_base_repeated(self, times): 

1284 if times is None: 

1285 return self._dparams_base() 

1286 

1287 nt = times.size 

1288 north_shifts = num.repeat(self.north_shift, nt) 

1289 east_shifts = num.repeat(self.east_shift, nt) 

1290 depths = num.repeat(self.depth, nt) 

1291 return dict(times=times, 

1292 lat=self.lat, lon=self.lon, 

1293 north_shifts=north_shifts, 

1294 east_shifts=east_shifts, 

1295 depths=depths) 

1296 

1297 def pyrocko_event(self, store=None, target=None, **kwargs): 

1298 duration = None 

1299 if self.stf: 

1300 duration = self.stf.effective_duration 

1301 

1302 return model.Event( 

1303 lat=self.lat, 

1304 lon=self.lon, 

1305 north_shift=self.north_shift, 

1306 east_shift=self.east_shift, 

1307 time=self.time, 

1308 name=self.name, 

1309 depth=self.depth, 

1310 duration=duration, 

1311 **kwargs) 

1312 

1313 def outline(self, cs='xyz'): 

1314 points = num.atleast_2d(num.zeros([1, 3])) 

1315 

1316 points[:, 0] += self.north_shift 

1317 points[:, 1] += self.east_shift 

1318 points[:, 2] += self.depth 

1319 if cs == 'xyz': 

1320 return points 

1321 elif cs == 'xy': 

1322 return points[:, :2] 

1323 elif cs in ('latlon', 'lonlat'): 

1324 latlon = ne_to_latlon( 

1325 self.lat, self.lon, points[:, 0], points[:, 1]) 

1326 

1327 latlon = num.array(latlon).T 

1328 if cs == 'latlon': 

1329 return latlon 

1330 else: 

1331 return latlon[:, ::-1] 

1332 

1333 @classmethod 

1334 def from_pyrocko_event(cls, ev, **kwargs): 

1335 if ev.depth is None: 

1336 raise ConversionError( 

1337 'Cannot convert event object to source object: ' 

1338 'no depth information available') 

1339 

1340 stf = None 

1341 if ev.duration is not None: 

1342 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1343 

1344 d = dict( 

1345 name=ev.name, 

1346 time=ev.time, 

1347 lat=ev.lat, 

1348 lon=ev.lon, 

1349 north_shift=ev.north_shift, 

1350 east_shift=ev.east_shift, 

1351 depth=ev.depth, 

1352 stf=stf) 

1353 d.update(kwargs) 

1354 return cls(**d) 

1355 

1356 def get_magnitude(self): 

1357 raise NotImplementedError( 

1358 '%s does not implement get_magnitude()' 

1359 % self.__class__.__name__) 

1360 

1361 

1362class SourceWithMagnitude(Source): 

1363 ''' 

1364 Base class for sources containing a moment magnitude. 

1365 ''' 

1366 

1367 magnitude = Float.T( 

1368 default=6.0, 

1369 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1370 

1371 def __init__(self, **kwargs): 

1372 if 'moment' in kwargs: 

1373 mom = kwargs.pop('moment') 

1374 if 'magnitude' not in kwargs: 

1375 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1376 

1377 Source.__init__(self, **kwargs) 

1378 

1379 @property 

1380 def moment(self): 

1381 return float(pmt.magnitude_to_moment(self.magnitude)) 

1382 

1383 @moment.setter 

1384 def moment(self, value): 

1385 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1386 

1387 def pyrocko_event(self, store=None, target=None, **kwargs): 

1388 return Source.pyrocko_event( 

1389 self, store, target, 

1390 magnitude=self.magnitude, 

1391 **kwargs) 

1392 

1393 @classmethod 

1394 def from_pyrocko_event(cls, ev, **kwargs): 

1395 d = {} 

1396 if ev.magnitude: 

1397 d.update(magnitude=ev.magnitude) 

1398 

1399 d.update(kwargs) 

1400 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1401 

1402 def get_magnitude(self): 

1403 return self.magnitude 

1404 

1405 

1406class DerivedMagnitudeError(ValidationError): 

1407 pass 

1408 

1409 

1410class SourceWithDerivedMagnitude(Source): 

1411 

1412 class __T(Source.T): 

1413 

1414 def validate_extra(self, val): 

1415 Source.T.validate_extra(self, val) 

1416 val.check_conflicts() 

1417 

1418 def check_conflicts(self): 

1419 ''' 

1420 Check for parameter conflicts. 

1421 

1422 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1423 on conflicts. 

1424 ''' 

1425 pass 

1426 

1427 def get_magnitude(self, store=None, target=None): 

1428 raise DerivedMagnitudeError('No magnitude set.') 

1429 

1430 def get_moment(self, store=None, target=None): 

1431 return float(pmt.magnitude_to_moment( 

1432 self.get_magnitude(store, target))) 

1433 

1434 def pyrocko_moment_tensor(self, store=None, target=None): 

1435 raise NotImplementedError( 

1436 '%s does not implement pyrocko_moment_tensor()' 

1437 % self.__class__.__name__) 

1438 

1439 def pyrocko_event(self, store=None, target=None, **kwargs): 

1440 try: 

1441 mt = self.pyrocko_moment_tensor(store, target) 

1442 magnitude = self.get_magnitude() 

1443 except (DerivedMagnitudeError, NotImplementedError): 

1444 mt = None 

1445 magnitude = None 

1446 

1447 return Source.pyrocko_event( 

1448 self, store, target, 

1449 moment_tensor=mt, 

1450 magnitude=magnitude, 

1451 **kwargs) 

1452 

1453 

1454class ExplosionSource(SourceWithDerivedMagnitude): 

1455 ''' 

1456 An isotropic explosion point source. 

1457 ''' 

1458 

1459 magnitude = Float.T( 

1460 optional=True, 

1461 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1462 

1463 volume_change = Float.T( 

1464 optional=True, 

1465 help='volume change of the explosion/implosion or ' 

1466 'the contracting/extending magmatic source. [m^3]') 

1467 

1468 discretized_source_class = meta.DiscretizedExplosionSource 

1469 

1470 def __init__(self, **kwargs): 

1471 if 'moment' in kwargs: 

1472 mom = kwargs.pop('moment') 

1473 if 'magnitude' not in kwargs: 

1474 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1475 

1476 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1477 

1478 def base_key(self): 

1479 return SourceWithDerivedMagnitude.base_key(self) + \ 

1480 (self.volume_change,) 

1481 

1482 def check_conflicts(self): 

1483 if self.magnitude is not None and self.volume_change is not None: 

1484 raise DerivedMagnitudeError( 

1485 'Magnitude and volume_change are both defined.') 

1486 

1487 def get_magnitude(self, store=None, target=None): 

1488 self.check_conflicts() 

1489 

1490 if self.magnitude is not None: 

1491 return self.magnitude 

1492 

1493 elif self.volume_change is not None: 

1494 moment = self.volume_change * \ 

1495 self.get_moment_to_volume_change_ratio(store, target) 

1496 

1497 return float(pmt.moment_to_magnitude(abs(moment))) 

1498 else: 

1499 return float(pmt.moment_to_magnitude(1.0)) 

1500 

1501 def get_volume_change(self, store=None, target=None): 

1502 self.check_conflicts() 

1503 

1504 if self.volume_change is not None: 

1505 return self.volume_change 

1506 

1507 elif self.magnitude is not None: 

1508 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1509 return moment / self.get_moment_to_volume_change_ratio( 

1510 store, target) 

1511 

1512 else: 

1513 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1514 

1515 def get_moment_to_volume_change_ratio(self, store, target=None): 

1516 if store is None: 

1517 raise DerivedMagnitudeError( 

1518 'Need earth model to convert between volume change and ' 

1519 'magnitude.') 

1520 

1521 points = num.array( 

1522 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1523 

1524 interpolation = target.interpolation if target else 'multilinear' 

1525 try: 

1526 shear_moduli = store.config.get_shear_moduli( 

1527 self.lat, self.lon, 

1528 points=points, 

1529 interpolation=interpolation)[0] 

1530 except meta.OutOfBounds: 

1531 raise DerivedMagnitudeError( 

1532 'Could not get shear modulus at source position.') 

1533 

1534 return float(3. * shear_moduli) 

1535 

1536 def get_factor(self): 

1537 return 1.0 

1538 

1539 def discretize_basesource(self, store, target=None): 

1540 times, amplitudes = self.effective_stf_pre().discretize_t( 

1541 store.config.deltat, self.time) 

1542 

1543 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1544 

1545 if self.volume_change is not None: 

1546 if self.volume_change < 0.: 

1547 amplitudes *= -1 

1548 

1549 return meta.DiscretizedExplosionSource( 

1550 m0s=amplitudes, 

1551 **self._dparams_base_repeated(times)) 

1552 

1553 def pyrocko_moment_tensor(self, store=None, target=None): 

1554 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1555 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1556 

1557 

1558class RectangularExplosionSource(ExplosionSource): 

1559 ''' 

1560 Rectangular or line explosion source. 

1561 ''' 

1562 

1563 discretized_source_class = meta.DiscretizedExplosionSource 

1564 

1565 strike = Float.T( 

1566 default=0.0, 

1567 help='strike direction in [deg], measured clockwise from north') 

1568 

1569 dip = Float.T( 

1570 default=90.0, 

1571 help='dip angle in [deg], measured downward from horizontal') 

1572 

1573 length = Float.T( 

1574 default=0., 

1575 help='length of rectangular source area [m]') 

1576 

1577 width = Float.T( 

1578 default=0., 

1579 help='width of rectangular source area [m]') 

1580 

1581 anchor = StringChoice.T( 

1582 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1583 'bottom_left', 'bottom_right'], 

1584 default='center', 

1585 optional=True, 

1586 help='Anchor point for positioning the plane, can be: top, center or' 

1587 'bottom and also top_left, top_right,bottom_left,' 

1588 'bottom_right, center_left and center right') 

1589 

1590 nucleation_x = Float.T( 

1591 optional=True, 

1592 help='horizontal position of rupture nucleation in normalized fault ' 

1593 'plane coordinates (-1 = left edge, +1 = right edge)') 

1594 

1595 nucleation_y = Float.T( 

1596 optional=True, 

1597 help='down-dip position of rupture nucleation in normalized fault ' 

1598 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1599 

1600 velocity = Float.T( 

1601 default=3500., 

1602 help='speed of explosion front [m/s]') 

1603 

1604 aggressive_oversampling = Bool.T( 

1605 default=False, 

1606 help='Aggressive oversampling for basesource discretization. ' 

1607 'When using \'multilinear\' interpolation oversampling has' 

1608 ' practically no effect.') 

1609 

1610 def base_key(self): 

1611 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1612 self.width, self.nucleation_x, 

1613 self.nucleation_y, self.velocity, 

1614 self.anchor) 

1615 

1616 def discretize_basesource(self, store, target=None): 

1617 

1618 if self.nucleation_x is not None: 

1619 nucx = self.nucleation_x * 0.5 * self.length 

1620 else: 

1621 nucx = None 

1622 

1623 if self.nucleation_y is not None: 

1624 nucy = self.nucleation_y * 0.5 * self.width 

1625 else: 

1626 nucy = None 

1627 

1628 stf = self.effective_stf_pre() 

1629 

1630 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1631 store.config.deltas, store.config.deltat, 

1632 self.time, self.north_shift, self.east_shift, self.depth, 

1633 self.strike, self.dip, self.length, self.width, self.anchor, 

1634 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1635 

1636 amplitudes /= num.sum(amplitudes) 

1637 amplitudes *= self.get_moment(store, target) 

1638 

1639 return meta.DiscretizedExplosionSource( 

1640 lat=self.lat, 

1641 lon=self.lon, 

1642 times=times, 

1643 north_shifts=points[:, 0], 

1644 east_shifts=points[:, 1], 

1645 depths=points[:, 2], 

1646 m0s=amplitudes) 

1647 

1648 def outline(self, cs='xyz'): 

1649 points = outline_rect_source(self.strike, self.dip, self.length, 

1650 self.width, self.anchor) 

1651 

1652 points[:, 0] += self.north_shift 

1653 points[:, 1] += self.east_shift 

1654 points[:, 2] += self.depth 

1655 if cs == 'xyz': 

1656 return points 

1657 elif cs == 'xy': 

1658 return points[:, :2] 

1659 elif cs in ('latlon', 'lonlat'): 

1660 latlon = ne_to_latlon( 

1661 self.lat, self.lon, points[:, 0], points[:, 1]) 

1662 

1663 latlon = num.array(latlon).T 

1664 if cs == 'latlon': 

1665 return latlon 

1666 else: 

1667 return latlon[:, ::-1] 

1668 

1669 def get_nucleation_abs_coord(self, cs='xy'): 

1670 

1671 if self.nucleation_x is None: 

1672 return None, None 

1673 

1674 coords = from_plane_coords(self.strike, self.dip, self.length, 

1675 self.width, self.depth, self.nucleation_x, 

1676 self.nucleation_y, lat=self.lat, 

1677 lon=self.lon, north_shift=self.north_shift, 

1678 east_shift=self.east_shift, cs=cs) 

1679 return coords 

1680 

1681 

1682class DCSource(SourceWithMagnitude): 

1683 ''' 

1684 A double-couple point source. 

1685 ''' 

1686 

1687 strike = Float.T( 

1688 default=0.0, 

1689 help='strike direction in [deg], measured clockwise from north') 

1690 

1691 dip = Float.T( 

1692 default=90.0, 

1693 help='dip angle in [deg], measured downward from horizontal') 

1694 

1695 rake = Float.T( 

1696 default=0.0, 

1697 help='rake angle in [deg], ' 

1698 'measured counter-clockwise from right-horizontal ' 

1699 'in on-plane view') 

1700 

1701 discretized_source_class = meta.DiscretizedMTSource 

1702 

1703 def base_key(self): 

1704 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1705 

1706 def get_factor(self): 

1707 return float(pmt.magnitude_to_moment(self.magnitude)) 

1708 

1709 def discretize_basesource(self, store, target=None): 

1710 mot = pmt.MomentTensor( 

1711 strike=self.strike, dip=self.dip, rake=self.rake) 

1712 

1713 times, amplitudes = self.effective_stf_pre().discretize_t( 

1714 store.config.deltat, self.time) 

1715 return meta.DiscretizedMTSource( 

1716 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1717 **self._dparams_base_repeated(times)) 

1718 

1719 def pyrocko_moment_tensor(self, store=None, target=None): 

1720 return pmt.MomentTensor( 

1721 strike=self.strike, 

1722 dip=self.dip, 

1723 rake=self.rake, 

1724 scalar_moment=self.moment) 

1725 

1726 def pyrocko_event(self, store=None, target=None, **kwargs): 

1727 return SourceWithMagnitude.pyrocko_event( 

1728 self, store, target, 

1729 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1730 **kwargs) 

1731 

1732 @classmethod 

1733 def from_pyrocko_event(cls, ev, **kwargs): 

1734 d = {} 

1735 mt = ev.moment_tensor 

1736 if mt: 

1737 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1738 d.update( 

1739 strike=float(strike), 

1740 dip=float(dip), 

1741 rake=float(rake), 

1742 magnitude=float(mt.moment_magnitude())) 

1743 

1744 d.update(kwargs) 

1745 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1746 

1747 

1748class CLVDSource(SourceWithMagnitude): 

1749 ''' 

1750 A pure CLVD point source. 

1751 ''' 

1752 

1753 discretized_source_class = meta.DiscretizedMTSource 

1754 

1755 azimuth = Float.T( 

1756 default=0.0, 

1757 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1758 

1759 dip = Float.T( 

1760 default=90., 

1761 help='dip direction of largest dipole, downward from horizontal [deg]') 

1762 

1763 def base_key(self): 

1764 return Source.base_key(self) + (self.azimuth, self.dip) 

1765 

1766 def get_factor(self): 

1767 return float(pmt.magnitude_to_moment(self.magnitude)) 

1768 

1769 @property 

1770 def m6(self): 

1771 a = math.sqrt(4. / 3.) * self.get_factor() 

1772 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1773 rotmat1 = pmt.euler_to_matrix( 

1774 d2r * (self.dip - 90.), 

1775 d2r * (self.azimuth - 90.), 

1776 0.) 

1777 m = rotmat1.T * m * rotmat1 

1778 return pmt.to6(m) 

1779 

1780 @property 

1781 def m6_astuple(self): 

1782 return tuple(self.m6.tolist()) 

1783 

1784 def discretize_basesource(self, store, target=None): 

1785 factor = self.get_factor() 

1786 times, amplitudes = self.effective_stf_pre().discretize_t( 

1787 store.config.deltat, self.time) 

1788 return meta.DiscretizedMTSource( 

1789 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1790 **self._dparams_base_repeated(times)) 

1791 

1792 def pyrocko_moment_tensor(self, store=None, target=None): 

1793 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1794 

1795 def pyrocko_event(self, store=None, target=None, **kwargs): 

1796 mt = self.pyrocko_moment_tensor(store, target) 

1797 return Source.pyrocko_event( 

1798 self, store, target, 

1799 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1800 magnitude=float(mt.moment_magnitude()), 

1801 **kwargs) 

1802 

1803 

1804class VLVDSource(SourceWithDerivedMagnitude): 

1805 ''' 

1806 Volumetric linear vector dipole source. 

1807 

1808 This source is a parameterization for a restricted moment tensor point 

1809 source, useful to represent dyke or sill like inflation or deflation 

1810 sources. The restriction is such that the moment tensor is rotational 

1811 symmetric. It can be represented by a superposition of a linear vector 

1812 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1813 restricted moment tensor has 4 degrees of freedom: 2 independent 

1814 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1815 

1816 In this parameterization, the isotropic component is controlled by 

1817 ``volume_change``. To define the moment tensor, it must be converted to the 

1818 scalar moment of the the MT's isotropic component. For the conversion, the 

1819 shear modulus at the source's position must be known. This value is 

1820 extracted from the earth model defined in the GF store in use. 

1821 

1822 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1823 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1824 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1825 ''' 

1826 

1827 discretized_source_class = meta.DiscretizedMTSource 

1828 

1829 azimuth = Float.T( 

1830 default=0.0, 

1831 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1832 

1833 dip = Float.T( 

1834 default=90., 

1835 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1836 

1837 volume_change = Float.T( 

1838 default=0., 

1839 help='volume change of the inflation/deflation [m^3].') 

1840 

1841 clvd_moment = Float.T( 

1842 default=0., 

1843 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1844 'controls the sign of the CLVD (the sign of its largest ' 

1845 'eigenvalue).') 

1846 

1847 def get_moment_to_volume_change_ratio(self, store, target): 

1848 if store is None or target is None: 

1849 raise DerivedMagnitudeError( 

1850 'Need earth model to convert between volume change and ' 

1851 'magnitude.') 

1852 

1853 points = num.array( 

1854 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1855 

1856 try: 

1857 shear_moduli = store.config.get_shear_moduli( 

1858 self.lat, self.lon, 

1859 points=points, 

1860 interpolation=target.interpolation)[0] 

1861 except meta.OutOfBounds: 

1862 raise DerivedMagnitudeError( 

1863 'Could not get shear modulus at source position.') 

1864 

1865 return float(3. * shear_moduli) 

1866 

1867 def base_key(self): 

1868 return Source.base_key(self) + \ 

1869 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1870 

1871 def get_magnitude(self, store=None, target=None): 

1872 mt = self.pyrocko_moment_tensor(store, target) 

1873 return float(pmt.moment_to_magnitude(mt.moment)) 

1874 

1875 def get_m6(self, store, target): 

1876 a = math.sqrt(4. / 3.) * self.clvd_moment 

1877 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1878 

1879 rotmat1 = pmt.euler_to_matrix( 

1880 d2r * (self.dip - 90.), 

1881 d2r * (self.azimuth - 90.), 

1882 0.) 

1883 m_clvd = rotmat1.T * m_clvd * rotmat1 

1884 

1885 m_iso = self.volume_change * \ 

1886 self.get_moment_to_volume_change_ratio(store, target) 

1887 

1888 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1889 0., 0.,) * math.sqrt(2. / 3) 

1890 

1891 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1892 return m 

1893 

1894 def get_moment(self, store=None, target=None): 

1895 return float(pmt.magnitude_to_moment( 

1896 self.get_magnitude(store, target))) 

1897 

1898 def get_m6_astuple(self, store, target): 

1899 m6 = self.get_m6(store, target) 

1900 return tuple(m6.tolist()) 

1901 

1902 def discretize_basesource(self, store, target=None): 

1903 times, amplitudes = self.effective_stf_pre().discretize_t( 

1904 store.config.deltat, self.time) 

1905 

1906 m6 = self.get_m6(store, target) 

1907 m6 *= amplitudes / self.get_factor() 

1908 

1909 return meta.DiscretizedMTSource( 

1910 m6s=m6[num.newaxis, :], 

1911 **self._dparams_base_repeated(times)) 

1912 

1913 def pyrocko_moment_tensor(self, store=None, target=None): 

1914 m6_astuple = self.get_m6_astuple(store, target) 

1915 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1916 

1917 

1918class MTSource(Source): 

1919 ''' 

1920 A moment tensor point source. 

1921 ''' 

1922 

1923 discretized_source_class = meta.DiscretizedMTSource 

1924 

1925 mnn = Float.T( 

1926 default=1., 

1927 help='north-north component of moment tensor in [Nm]') 

1928 

1929 mee = Float.T( 

1930 default=1., 

1931 help='east-east component of moment tensor in [Nm]') 

1932 

1933 mdd = Float.T( 

1934 default=1., 

1935 help='down-down component of moment tensor in [Nm]') 

1936 

1937 mne = Float.T( 

1938 default=0., 

1939 help='north-east component of moment tensor in [Nm]') 

1940 

1941 mnd = Float.T( 

1942 default=0., 

1943 help='north-down component of moment tensor in [Nm]') 

1944 

1945 med = Float.T( 

1946 default=0., 

1947 help='east-down component of moment tensor in [Nm]') 

1948 

1949 def __init__(self, **kwargs): 

1950 if 'm6' in kwargs: 

1951 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1952 kwargs.pop('m6')): 

1953 kwargs[k] = float(v) 

1954 

1955 Source.__init__(self, **kwargs) 

1956 

1957 @property 

1958 def m6(self): 

1959 return num.array(self.m6_astuple) 

1960 

1961 @property 

1962 def m6_astuple(self): 

1963 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1964 

1965 @m6.setter 

1966 def m6(self, value): 

1967 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1968 

1969 def base_key(self): 

1970 return Source.base_key(self) + self.m6_astuple 

1971 

1972 def discretize_basesource(self, store, target=None): 

1973 times, amplitudes = self.effective_stf_pre().discretize_t( 

1974 store.config.deltat, self.time) 

1975 return meta.DiscretizedMTSource( 

1976 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1977 **self._dparams_base_repeated(times)) 

1978 

1979 def get_magnitude(self, store=None, target=None): 

1980 m6 = self.m6 

1981 return pmt.moment_to_magnitude( 

1982 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1983 math.sqrt(2.)) 

1984 

1985 def pyrocko_moment_tensor(self, store=None, target=None): 

1986 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1987 

1988 def pyrocko_event(self, store=None, target=None, **kwargs): 

1989 mt = self.pyrocko_moment_tensor(store, target) 

1990 return Source.pyrocko_event( 

1991 self, store, target, 

1992 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1993 magnitude=float(mt.moment_magnitude()), 

1994 **kwargs) 

1995 

1996 @classmethod 

1997 def from_pyrocko_event(cls, ev, **kwargs): 

1998 d = {} 

1999 mt = ev.moment_tensor 

2000 if mt: 

2001 d.update(m6=tuple(map(float, mt.m6()))) 

2002 else: 

2003 if ev.magnitude is not None: 

2004 mom = pmt.magnitude_to_moment(ev.magnitude) 

2005 v = math.sqrt(2. / 3.) * mom 

2006 d.update(m6=(v, v, v, 0., 0., 0.)) 

2007 

2008 d.update(kwargs) 

2009 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2010 

2011 

2012map_anchor = { 

2013 'center': (0.0, 0.0), 

2014 'center_left': (-1.0, 0.0), 

2015 'center_right': (1.0, 0.0), 

2016 'top': (0.0, -1.0), 

2017 'top_left': (-1.0, -1.0), 

2018 'top_right': (1.0, -1.0), 

2019 'bottom': (0.0, 1.0), 

2020 'bottom_left': (-1.0, 1.0), 

2021 'bottom_right': (1.0, 1.0)} 

2022 

2023 

2024class RectangularSource(SourceWithDerivedMagnitude): 

2025 ''' 

2026 Classical Haskell source model modified for bilateral rupture. 

2027 ''' 

2028 

2029 discretized_source_class = meta.DiscretizedMTSource 

2030 

2031 magnitude = Float.T( 

2032 optional=True, 

2033 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2034 

2035 strike = Float.T( 

2036 default=0.0, 

2037 help='strike direction in [deg], measured clockwise from north') 

2038 

2039 dip = Float.T( 

2040 default=90.0, 

2041 help='dip angle in [deg], measured downward from horizontal') 

2042 

2043 rake = Float.T( 

2044 default=0.0, 

2045 help='rake angle in [deg], ' 

2046 'measured counter-clockwise from right-horizontal ' 

2047 'in on-plane view') 

2048 

2049 length = Float.T( 

2050 default=0., 

2051 help='length of rectangular source area [m]') 

2052 

2053 width = Float.T( 

2054 default=0., 

2055 help='width of rectangular source area [m]') 

2056 

2057 anchor = StringChoice.T( 

2058 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2059 'bottom_left', 'bottom_right'], 

2060 default='center', 

2061 optional=True, 

2062 help='Anchor point for positioning the plane, can be: ``top, center ' 

2063 'bottom, top_left, top_right,bottom_left,' 

2064 'bottom_right, center_left, center right``.') 

2065 

2066 nucleation_x = Float.T( 

2067 optional=True, 

2068 help='horizontal position of rupture nucleation in normalized fault ' 

2069 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2070 

2071 nucleation_y = Float.T( 

2072 optional=True, 

2073 help='down-dip position of rupture nucleation in normalized fault ' 

2074 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2075 

2076 velocity = Float.T( 

2077 default=3500., 

2078 help='speed of rupture front [m/s]') 

2079 

2080 slip = Float.T( 

2081 optional=True, 

2082 help='Slip on the rectangular source area [m]') 

2083 

2084 opening_fraction = Float.T( 

2085 default=0., 

2086 help='Determines fraction of slip related to opening. ' 

2087 '(``-1``: pure tensile closing, ' 

2088 '``0``: pure shear, ' 

2089 '``1``: pure tensile opening)') 

2090 

2091 decimation_factor = Int.T( 

2092 optional=True, 

2093 default=1, 

2094 help='Sub-source decimation factor, a larger decimation will' 

2095 ' make the result inaccurate but shorten the necessary' 

2096 ' computation time (use for testing puposes only).') 

2097 

2098 aggressive_oversampling = Bool.T( 

2099 default=False, 

2100 help='Aggressive oversampling for basesource discretization. ' 

2101 'When using \'multilinear\' interpolation oversampling has' 

2102 ' practically no effect.') 

2103 

2104 def __init__(self, **kwargs): 

2105 if 'moment' in kwargs: 

2106 mom = kwargs.pop('moment') 

2107 if 'magnitude' not in kwargs: 

2108 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2109 

2110 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2111 

2112 def base_key(self): 

2113 return SourceWithDerivedMagnitude.base_key(self) + ( 

2114 self.magnitude, 

2115 self.slip, 

2116 self.strike, 

2117 self.dip, 

2118 self.rake, 

2119 self.length, 

2120 self.width, 

2121 self.nucleation_x, 

2122 self.nucleation_y, 

2123 self.velocity, 

2124 self.decimation_factor, 

2125 self.anchor) 

2126 

2127 def check_conflicts(self): 

2128 if self.magnitude is not None and self.slip is not None: 

2129 raise DerivedMagnitudeError( 

2130 'Magnitude and slip are both defined.') 

2131 

2132 def get_magnitude(self, store=None, target=None): 

2133 self.check_conflicts() 

2134 if self.magnitude is not None: 

2135 return self.magnitude 

2136 

2137 elif self.slip is not None: 

2138 if None in (store, target): 

2139 raise DerivedMagnitudeError( 

2140 'Magnitude for a rectangular source with slip defined ' 

2141 'can only be derived when earth model and target ' 

2142 'interpolation method are available.') 

2143 

2144 amplitudes = self._discretize(store, target)[2] 

2145 if amplitudes.ndim == 2: 

2146 # CLVD component has no net moment, leave out 

2147 return float(pmt.moment_to_magnitude( 

2148 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2149 else: 

2150 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2151 

2152 else: 

2153 return float(pmt.moment_to_magnitude(1.0)) 

2154 

2155 def get_factor(self): 

2156 return 1.0 

2157 

2158 def get_slip_tensile(self): 

2159 return self.slip * self.opening_fraction 

2160 

2161 def get_slip_shear(self): 

2162 return self.slip - abs(self.get_slip_tensile) 

2163 

2164 def _discretize(self, store, target): 

2165 if self.nucleation_x is not None: 

2166 nucx = self.nucleation_x * 0.5 * self.length 

2167 else: 

2168 nucx = None 

2169 

2170 if self.nucleation_y is not None: 

2171 nucy = self.nucleation_y * 0.5 * self.width 

2172 else: 

2173 nucy = None 

2174 

2175 stf = self.effective_stf_pre() 

2176 

2177 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2178 store.config.deltas, store.config.deltat, 

2179 self.time, self.north_shift, self.east_shift, self.depth, 

2180 self.strike, self.dip, self.length, self.width, self.anchor, 

2181 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2182 decimation_factor=self.decimation_factor, 

2183 aggressive_oversampling=self.aggressive_oversampling) 

2184 

2185 if self.slip is not None: 

2186 if target is not None: 

2187 interpolation = target.interpolation 

2188 else: 

2189 interpolation = 'nearest_neighbor' 

2190 logger.warn( 

2191 'no target information available, will use ' 

2192 '"nearest_neighbor" interpolation when extracting shear ' 

2193 'modulus from earth model') 

2194 

2195 shear_moduli = store.config.get_shear_moduli( 

2196 self.lat, self.lon, 

2197 points=points, 

2198 interpolation=interpolation) 

2199 

2200 tensile_slip = self.get_slip_tensile() 

2201 shear_slip = self.slip - abs(tensile_slip) 

2202 

2203 amplitudes_total = [shear_moduli * shear_slip] 

2204 if tensile_slip != 0: 

2205 bulk_moduli = store.config.get_bulk_moduli( 

2206 self.lat, self.lon, 

2207 points=points, 

2208 interpolation=interpolation) 

2209 

2210 tensile_iso = bulk_moduli * tensile_slip 

2211 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2212 

2213 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2214 

2215 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2216 amplitudes * dl * dw 

2217 

2218 else: 

2219 # normalization to retain total moment 

2220 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2221 moment = self.get_moment(store, target) 

2222 

2223 amplitudes_total = [ 

2224 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2225 if self.opening_fraction != 0.: 

2226 amplitudes_total.append( 

2227 amplitudes_norm * self.opening_fraction * moment) 

2228 

2229 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2230 

2231 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2232 

2233 def discretize_basesource(self, store, target=None): 

2234 

2235 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2236 store, target) 

2237 

2238 mot = pmt.MomentTensor( 

2239 strike=self.strike, dip=self.dip, rake=self.rake) 

2240 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2241 

2242 if amplitudes.ndim == 1: 

2243 m6s[:, :] *= amplitudes[:, num.newaxis] 

2244 elif amplitudes.ndim == 2: 

2245 # shear MT components 

2246 rotmat1 = pmt.euler_to_matrix( 

2247 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2248 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2249 

2250 if amplitudes.shape[0] == 2: 

2251 # tensile MT components - moment/magnitude input 

2252 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2253 rot_tensile = pmt.to6(rotmat1.T * tensile * rotmat1) 

2254 

2255 m6s_tensile = rot_tensile[ 

2256 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2257 m6s += m6s_tensile 

2258 

2259 elif amplitudes.shape[0] == 3: 

2260 # tensile MT components - slip input 

2261 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2262 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2263 

2264 rot_iso = pmt.to6(rotmat1.T * iso * rotmat1) 

2265 rot_clvd = pmt.to6(rotmat1.T * clvd * rotmat1) 

2266 

2267 m6s_iso = rot_iso[ 

2268 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2269 m6s_clvd = rot_clvd[ 

2270 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2271 m6s += m6s_iso + m6s_clvd 

2272 else: 

2273 raise ValueError('Unknwown amplitudes shape!') 

2274 else: 

2275 raise ValueError( 

2276 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2277 

2278 ds = meta.DiscretizedMTSource( 

2279 lat=self.lat, 

2280 lon=self.lon, 

2281 times=times, 

2282 north_shifts=points[:, 0], 

2283 east_shifts=points[:, 1], 

2284 depths=points[:, 2], 

2285 m6s=m6s, 

2286 dl=dl, 

2287 dw=dw, 

2288 nl=nl, 

2289 nw=nw) 

2290 

2291 return ds 

2292 

2293 def outline(self, cs='xyz'): 

2294 points = outline_rect_source(self.strike, self.dip, self.length, 

2295 self.width, self.anchor) 

2296 

2297 points[:, 0] += self.north_shift 

2298 points[:, 1] += self.east_shift 

2299 points[:, 2] += self.depth 

2300 if cs == 'xyz': 

2301 return points 

2302 elif cs == 'xy': 

2303 return points[:, :2] 

2304 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2305 latlon = ne_to_latlon( 

2306 self.lat, self.lon, points[:, 0], points[:, 1]) 

2307 

2308 latlon = num.array(latlon).T 

2309 if cs == 'latlon': 

2310 return latlon 

2311 elif cs == 'lonlat': 

2312 return latlon[:, ::-1] 

2313 else: 

2314 return num.concatenate( 

2315 (latlon, points[:, 2].reshape((len(points), 1))), 

2316 axis=1) 

2317 

2318 def points_on_source(self, cs='xyz', **kwargs): 

2319 

2320 points = points_on_rect_source( 

2321 self.strike, self.dip, self.length, self.width, 

2322 self.anchor, **kwargs) 

2323 

2324 points[:, 0] += self.north_shift 

2325 points[:, 1] += self.east_shift 

2326 points[:, 2] += self.depth 

2327 if cs == 'xyz': 

2328 return points 

2329 elif cs == 'xy': 

2330 return points[:, :2] 

2331 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2332 latlon = ne_to_latlon( 

2333 self.lat, self.lon, points[:, 0], points[:, 1]) 

2334 

2335 latlon = num.array(latlon).T 

2336 if cs == 'latlon': 

2337 return latlon 

2338 elif cs == 'lonlat': 

2339 return latlon[:, ::-1] 

2340 else: 

2341 return num.concatenate( 

2342 (latlon, points[:, 2].reshape((len(points), 1))), 

2343 axis=1) 

2344 

2345 def get_nucleation_abs_coord(self, cs='xy'): 

2346 

2347 if self.nucleation_x is None: 

2348 return None, None 

2349 

2350 coords = from_plane_coords(self.strike, self.dip, self.length, 

2351 self.width, self.depth, self.nucleation_x, 

2352 self.nucleation_y, lat=self.lat, 

2353 lon=self.lon, north_shift=self.north_shift, 

2354 east_shift=self.east_shift, cs=cs) 

2355 return coords 

2356 

2357 def pyrocko_moment_tensor(self, store=None, target=None): 

2358 return pmt.MomentTensor( 

2359 strike=self.strike, 

2360 dip=self.dip, 

2361 rake=self.rake, 

2362 scalar_moment=self.get_moment(store, target)) 

2363 

2364 def pyrocko_event(self, store=None, target=None, **kwargs): 

2365 return SourceWithDerivedMagnitude.pyrocko_event( 

2366 self, store, target, 

2367 **kwargs) 

2368 

2369 @classmethod 

2370 def from_pyrocko_event(cls, ev, **kwargs): 

2371 d = {} 

2372 mt = ev.moment_tensor 

2373 if mt: 

2374 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2375 d.update( 

2376 strike=float(strike), 

2377 dip=float(dip), 

2378 rake=float(rake), 

2379 magnitude=float(mt.moment_magnitude())) 

2380 

2381 d.update(kwargs) 

2382 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2383 

2384 

2385class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2386 ''' 

2387 Combined Eikonal and Okada quasi-dynamic rupture model. 

2388 

2389 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2390 Note: attribute `stf` is not used so far, but kept for future applications. 

2391 ''' 

2392 

2393 discretized_source_class = meta.DiscretizedMTSource 

2394 

2395 strike = Float.T( 

2396 default=0.0, 

2397 help='Strike direction in [deg], measured clockwise from north.') 

2398 

2399 dip = Float.T( 

2400 default=0.0, 

2401 help='Dip angle in [deg], measured downward from horizontal.') 

2402 

2403 length = Float.T( 

2404 default=10. * km, 

2405 help='Length of rectangular source area in [m].') 

2406 

2407 width = Float.T( 

2408 default=5. * km, 

2409 help='Width of rectangular source area in [m].') 

2410 

2411 anchor = StringChoice.T( 

2412 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2413 'bottom_left', 'bottom_right'], 

2414 default='center', 

2415 optional=True, 

2416 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2417 'bottom, top_left, top_right, bottom_left, ' 

2418 'bottom_right, center_left, center_right``.') 

2419 

2420 nucleation_x__ = Array.T( 

2421 default=num.array([0.]), 

2422 dtype=num.float, 

2423 serialize_as='list', 

2424 help='Horizontal position of rupture nucleation in normalized fault ' 

2425 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2426 

2427 nucleation_y__ = Array.T( 

2428 default=num.array([0.]), 

2429 dtype=num.float, 

2430 serialize_as='list', 

2431 help='Down-dip position of rupture nucleation in normalized fault ' 

2432 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2433 

2434 nucleation_time__ = Array.T( 

2435 optional=True, 

2436 help='Time in [s] after origin, when nucleation points defined by ' 

2437 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2438 dtype=num.float, 

2439 serialize_as='list') 

2440 

2441 gamma = Float.T( 

2442 default=0.8, 

2443 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2444 r':math:`v_r = \gamma * v_s`.') 

2445 

2446 nx = Int.T( 

2447 default=2, 

2448 help='Number of discrete source patches in x direction (along ' 

2449 'strike).') 

2450 

2451 ny = Int.T( 

2452 default=2, 

2453 help='Number of discrete source patches in y direction (down dip).') 

2454 

2455 slip = Float.T( 

2456 optional=True, 

2457 help='Maximum slip of the rectangular source [m]. ' 

2458 'Setting the slip the tractions/stress field ' 

2459 'will be normalized to accomodate the desired maximum slip.') 

2460 

2461 rake = Float.T( 

2462 optional=True, 

2463 help='Rake angle in [deg], ' 

2464 'measured counter-clockwise from right-horizontal ' 

2465 'in on-plane view. Rake is translated into homogenous tractions ' 

2466 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2467 'with tractions parameter.') 

2468 

2469 patches = List.T( 

2470 OkadaSource.T(), 

2471 optional=True, 

2472 help='List of all boundary elements/sub faults/fault patches.') 

2473 

2474 patch_mask__ = Array.T( 

2475 dtype=num.bool, 

2476 serialize_as='list', 

2477 shape=(None,), 

2478 optional=True, 

2479 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2480 'leaves the patch in the calculation, False excludes the patch.') 

2481 

2482 tractions = TractionField.T( 

2483 optional=True, 

2484 help='Traction field the rupture plane is exposed to. See the' 

2485 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2486 'If ``tractions=None`` and ``rake`` is given' 

2487 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2488 ' be used.') 

2489 

2490 coef_mat = Array.T( 

2491 optional=True, 

2492 help='Coefficient matrix linking traction and dislocation field.', 

2493 dtype=num.float, 

2494 shape=(None, None)) 

2495 

2496 eikonal_decimation = Int.T( 

2497 optional=True, 

2498 default=1, 

2499 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2500 ' increase the accuracy of rupture front calculation but' 

2501 ' increases also the computation time.') 

2502 

2503 decimation_factor = Int.T( 

2504 optional=True, 

2505 default=1, 

2506 help='Sub-source decimation factor, a larger decimation will' 

2507 ' make the result inaccurate but shorten the necessary' 

2508 ' computation time (use for testing puposes only).') 

2509 

2510 nthreads = Int.T( 

2511 optional=True, 

2512 default=1, 

2513 help='Number of threads for Okada forward modelling, ' 

2514 'matrix inversion and calculation of point subsources. ' 

2515 'Note: for small/medium matrices 1 thread is most efficient.') 

2516 

2517 pure_shear = Bool.T( 

2518 optional=True, 

2519 default=False, 

2520 help='Calculate only shear tractions and omit tensile tractions.') 

2521 

2522 smooth_rupture = Bool.T( 

2523 default=True, 

2524 help='Smooth the tractions by weighting partially ruptured' 

2525 ' fault patches.') 

2526 

2527 aggressive_oversampling = Bool.T( 

2528 default=False, 

2529 help='Aggressive oversampling for basesource discretization. ' 

2530 'When using \'multilinear\' interpolation oversampling has' 

2531 ' practically no effect.') 

2532 

2533 def __init__(self, **kwargs): 

2534 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2535 self._interpolators = {} 

2536 self.check_conflicts() 

2537 

2538 @property 

2539 def nucleation_x(self): 

2540 return self.nucleation_x__ 

2541 

2542 @nucleation_x.setter 

2543 def nucleation_x(self, nucleation_x): 

2544 if isinstance(nucleation_x, list): 

2545 nucleation_x = num.array(nucleation_x) 

2546 

2547 elif not isinstance( 

2548 nucleation_x, num.ndarray) and nucleation_x is not None: 

2549 

2550 nucleation_x = num.array([nucleation_x]) 

2551 self.nucleation_x__ = nucleation_x 

2552 

2553 @property 

2554 def nucleation_y(self): 

2555 return self.nucleation_y__ 

2556 

2557 @nucleation_y.setter 

2558 def nucleation_y(self, nucleation_y): 

2559 if isinstance(nucleation_y, list): 

2560 nucleation_y = num.array(nucleation_y) 

2561 

2562 elif not isinstance(nucleation_y, num.ndarray) \ 

2563 and nucleation_y is not None: 

2564 nucleation_y = num.array([nucleation_y]) 

2565 

2566 self.nucleation_y__ = nucleation_y 

2567 

2568 @property 

2569 def nucleation(self): 

2570 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2571 

2572 if (nucl_x is None) or (nucl_y is None): 

2573 return None 

2574 

2575 assert nucl_x.shape[0] == nucl_y.shape[0] 

2576 

2577 return num.concatenate( 

2578 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2579 

2580 @nucleation.setter 

2581 def nucleation(self, nucleation): 

2582 if isinstance(nucleation, list): 

2583 nucleation = num.array(nucleation) 

2584 

2585 assert nucleation.shape[1] == 2 

2586 

2587 self.nucleation_x = nucleation[:, 0] 

2588 self.nucleation_y = nucleation[:, 1] 

2589 

2590 @property 

2591 def nucleation_time(self): 

2592 return self.nucleation_time__ 

2593 

2594 @nucleation_time.setter 

2595 def nucleation_time(self, nucleation_time): 

2596 if not isinstance(nucleation_time, num.ndarray) \ 

2597 and nucleation_time is not None: 

2598 nucleation_time = num.array([nucleation_time]) 

2599 

2600 self.nucleation_time__ = nucleation_time 

2601 

2602 @property 

2603 def patch_mask(self): 

2604 if (self.patch_mask__ is not None and 

2605 self.patch_mask__.shape == (self.nx * self.ny,)): 

2606 

2607 return self.patch_mask__ 

2608 else: 

2609 return num.ones(self.nx * self.ny, dtype=bool) 

2610 

2611 @patch_mask.setter 

2612 def patch_mask(self, patch_mask): 

2613 if isinstance(patch_mask, list): 

2614 patch_mask = num.array(patch_mask) 

2615 

2616 self.patch_mask__ = patch_mask 

2617 

2618 def get_tractions(self): 

2619 ''' 

2620 Get source traction vectors. 

2621 

2622 If :py:attr:`rake` is given, unit length directed traction vectors 

2623 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2624 else the given :py:attr:`tractions` are used. 

2625 

2626 :returns: 

2627 Traction vectors per patch. 

2628 :rtype: 

2629 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2630 ''' 

2631 

2632 if self.rake is not None: 

2633 if num.isnan(self.rake): 

2634 raise ValueError('Rake must be a real number, not NaN.') 

2635 

2636 logger.warning( 

2637 'Tractions are derived based on the given source rake.') 

2638 tractions = DirectedTractions(rake=self.rake) 

2639 else: 

2640 tractions = self.tractions 

2641 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2642 

2643 def base_key(self): 

2644 return SourceWithDerivedMagnitude.base_key(self) + ( 

2645 self.slip, 

2646 self.strike, 

2647 self.dip, 

2648 self.rake, 

2649 self.length, 

2650 self.width, 

2651 float(self.nucleation_x.mean()), 

2652 float(self.nucleation_y.mean()), 

2653 self.decimation_factor, 

2654 self.anchor, 

2655 self.pure_shear, 

2656 self.gamma, 

2657 tuple(self.patch_mask)) 

2658 

2659 def check_conflicts(self): 

2660 if self.tractions and self.rake: 

2661 raise AttributeError( 

2662 'Tractions and rake are mutually exclusive.') 

2663 if self.tractions is None and self.rake is None: 

2664 self.rake = 0. 

2665 

2666 def get_magnitude(self, store=None, target=None): 

2667 self.check_conflicts() 

2668 if self.slip is not None or self.tractions is not None: 

2669 if store is None: 

2670 raise DerivedMagnitudeError( 

2671 'Magnitude for a rectangular source with slip or ' 

2672 'tractions defined can only be derived when earth model ' 

2673 'is set.') 

2674 

2675 moment_rate, calc_times = self.discretize_basesource( 

2676 store, target=target).get_moment_rate(store.config.deltat) 

2677 

2678 deltat = num.concatenate(( 

2679 (num.diff(calc_times)[0],), 

2680 num.diff(calc_times))) 

2681 

2682 return float(pmt.moment_to_magnitude( 

2683 num.sum(moment_rate * deltat))) 

2684 

2685 else: 

2686 return float(pmt.moment_to_magnitude(1.0)) 

2687 

2688 def get_factor(self): 

2689 return 1.0 

2690 

2691 def outline(self, cs='xyz'): 

2692 ''' 

2693 Get source outline corner coordinates. 

2694 

2695 :param cs: 

2696 :ref:`Output coordinate system <coordinate-system-names>`. 

2697 :type cs: 

2698 optional, str 

2699 

2700 :returns: 

2701 Corner points in desired coordinate system. 

2702 :rtype: 

2703 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2704 ''' 

2705 points = outline_rect_source(self.strike, self.dip, self.length, 

2706 self.width, self.anchor) 

2707 

2708 points[:, 0] += self.north_shift 

2709 points[:, 1] += self.east_shift 

2710 points[:, 2] += self.depth 

2711 if cs == 'xyz': 

2712 return points 

2713 elif cs == 'xy': 

2714 return points[:, :2] 

2715 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2716 latlon = ne_to_latlon( 

2717 self.lat, self.lon, points[:, 0], points[:, 1]) 

2718 

2719 latlon = num.array(latlon).T 

2720 if cs == 'latlon': 

2721 return latlon 

2722 elif cs == 'lonlat': 

2723 return latlon[:, ::-1] 

2724 else: 

2725 return num.concatenate( 

2726 (latlon, points[:, 2].reshape((len(points), 1))), 

2727 axis=1) 

2728 

2729 def points_on_source(self, cs='xyz', **kwargs): 

2730 ''' 

2731 Convert relative plane coordinates to geographical coordinates. 

2732 

2733 Given x and y coordinates (relative source coordinates between -1. 

2734 and 1.) are converted to desired geographical coordinates. Coordinates 

2735 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2736 and ``points_y``. 

2737 

2738 :param cs: 

2739 :ref:`Output coordinate system <coordinate-system-names>`. 

2740 :type cs: 

2741 optional, str 

2742 

2743 :returns: 

2744 Point coordinates in desired coordinate system. 

2745 :rtype: 

2746 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2747 ''' 

2748 points = points_on_rect_source( 

2749 self.strike, self.dip, self.length, self.width, 

2750 self.anchor, **kwargs) 

2751 

2752 points[:, 0] += self.north_shift 

2753 points[:, 1] += self.east_shift 

2754 points[:, 2] += self.depth 

2755 if cs == 'xyz': 

2756 return points 

2757 elif cs == 'xy': 

2758 return points[:, :2] 

2759 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2760 latlon = ne_to_latlon( 

2761 self.lat, self.lon, points[:, 0], points[:, 1]) 

2762 

2763 latlon = num.array(latlon).T 

2764 if cs == 'latlon': 

2765 return latlon 

2766 elif cs == 'lonlat': 

2767 return latlon[:, ::-1] 

2768 else: 

2769 return num.concatenate( 

2770 (latlon, points[:, 2].reshape((len(points), 1))), 

2771 axis=1) 

2772 

2773 def pyrocko_moment_tensor(self, store=None, target=None): 

2774 if store is not None: 

2775 if not self.patches: 

2776 self.discretize_patches(store) 

2777 

2778 slip = self.get_slip() 

2779 weights = num.linalg.norm(slip, axis=1) 

2780 weights /= weights.sum() 

2781 

2782 rakes = num.arctan2(slip[:, 1], slip[:, 0]) * r2d 

2783 rake = (rakes * weights).sum() 

2784 

2785 else: 

2786 tractions = self.get_tractions() 

2787 tractions = tractions.mean(axis=0) 

2788 rake = num.arctan2(tractions[1], tractions[0]) * r2d 

2789 

2790 return pmt.MomentTensor( 

2791 strike=self.strike, 

2792 dip=self.dip, 

2793 rake=rake, 

2794 scalar_moment=self.get_moment(store, target)) 

2795 

2796 def pyrocko_event(self, store=None, target=None, **kwargs): 

2797 return SourceWithDerivedMagnitude.pyrocko_event( 

2798 self, store, target, 

2799 **kwargs) 

2800 

2801 @classmethod 

2802 def from_pyrocko_event(cls, ev, **kwargs): 

2803 d = {} 

2804 mt = ev.moment_tensor 

2805 if mt: 

2806 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2807 d.update( 

2808 strike=float(strike), 

2809 dip=float(dip), 

2810 rake=float(rake)) 

2811 

2812 d.update(kwargs) 

2813 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2814 

2815 def _discretize_points(self, store, *args, **kwargs): 

2816 ''' 

2817 Discretize source plane with equal vertical and horizontal spacing. 

2818 

2819 Additional ``*args`` and ``**kwargs`` are passed to 

2820 :py:meth:`points_on_source`. 

2821 

2822 :param store: 

2823 Green's function database (needs to cover whole region of the 

2824 source). 

2825 :type store: 

2826 :py:class:`~pyrocko.gf.store.Store` 

2827 

2828 :returns: 

2829 Number of points in strike and dip direction, distance 

2830 between adjacent points, coordinates (latlondepth) and coordinates 

2831 (xy on fault) for discrete points. 

2832 :rtype: 

2833 (int, int, float, :py:class:`~numpy.ndarray`, 

2834 :py:class:`~numpy.ndarray`). 

2835 ''' 

2836 anch_x, anch_y = map_anchor[self.anchor] 

2837 

2838 npoints = int(self.width // km) + 1 

2839 points = num.zeros((npoints, 3)) 

2840 points[:, 1] = num.linspace(-1., 1., npoints) 

2841 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2842 

2843 rotmat = num.asarray( 

2844 pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)) 

2845 points = num.dot(rotmat.T, points.T).T 

2846 points[:, 2] += self.depth 

2847 

2848 vs_min = store.config.get_vs( 

2849 self.lat, self.lon, points, 

2850 interpolation='nearest_neighbor') 

2851 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2852 

2853 oversampling = 10. 

2854 delta_l = self.length / (self.nx * oversampling) 

2855 delta_w = self.width / (self.ny * oversampling) 

2856 

2857 delta = self.eikonal_decimation * num.min([ 

2858 store.config.deltat * vr_min / oversampling, 

2859 delta_l, delta_w] + [ 

2860 deltas for deltas in store.config.deltas]) 

2861 

2862 delta = delta_w / num.ceil(delta_w / delta) 

2863 

2864 nx = int(num.ceil(self.length / delta)) + 1 

2865 ny = int(num.ceil(self.width / delta)) + 1 

2866 

2867 rem_l = (nx-1)*delta - self.length 

2868 lim_x = rem_l / self.length 

2869 

2870 points_xy = num.zeros((nx * ny, 2)) 

2871 points_xy[:, 0] = num.repeat( 

2872 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2873 points_xy[:, 1] = num.tile( 

2874 num.linspace(-1., 1., ny), nx) 

2875 

2876 points = self.points_on_source( 

2877 points_x=points_xy[:, 0], 

2878 points_y=points_xy[:, 1], 

2879 **kwargs) 

2880 

2881 return nx, ny, delta, points, points_xy 

2882 

2883 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2884 points=None): 

2885 ''' 

2886 Get rupture velocity for discrete points on source plane. 

2887 

2888 :param store: 

2889 Green's function database (needs to cover the whole region of the 

2890 source) 

2891 :type store: 

2892 optional, :py:class:`~pyrocko.gf.store.Store` 

2893 

2894 :param interpolation: 

2895 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2896 and ``'multilinear'``). 

2897 :type interpolation: 

2898 optional, str 

2899 

2900 :param points: 

2901 Coordinates on fault (-1.:1.) of discrete points. 

2902 :type points: 

2903 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2904 

2905 :returns: 

2906 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2907 points. 

2908 :rtype: 

2909 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2910 ''' 

2911 

2912 if points is None: 

2913 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2914 

2915 return store.config.get_vs( 

2916 self.lat, self.lon, 

2917 points=points, 

2918 interpolation=interpolation) * self.gamma 

2919 

2920 def discretize_time( 

2921 self, store, interpolation='nearest_neighbor', 

2922 vr=None, times=None, *args, **kwargs): 

2923 ''' 

2924 Get rupture start time for discrete points on source plane. 

2925 

2926 :param store: 

2927 Green's function database (needs to cover whole region of the 

2928 source) 

2929 :type store: 

2930 :py:class:`~pyrocko.gf.store.Store` 

2931 

2932 :param interpolation: 

2933 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2934 and ``'multilinear'``). 

2935 :type interpolation: 

2936 optional, str 

2937 

2938 :param vr: 

2939 Array, containing rupture user defined rupture velocity values. 

2940 :type vr: 

2941 optional, :py:class:`~numpy.ndarray` 

2942 

2943 :param times: 

2944 Array, containing zeros, where rupture is starting, real positive 

2945 numbers at later secondary nucleation points and -1, where time 

2946 will be calculated. If not given, rupture starts at nucleation_x, 

2947 nucleation_y. Times are given for discrete points with equal 

2948 horizontal and vertical spacing. 

2949 :type times: 

2950 optional, :py:class:`~numpy.ndarray` 

2951 

2952 :returns: 

2953 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

2954 rupture propagation time of discrete points. 

2955 :rtype: 

2956 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

2957 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

2958 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

2959 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

2960 ''' 

2961 nx, ny, delta, points, points_xy = self._discretize_points( 

2962 store, cs='xyz') 

2963 

2964 if vr is None or vr.shape != tuple((nx, ny)): 

2965 if vr: 

2966 logger.warning( 

2967 'Given rupture velocities are not in right shape: ' 

2968 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

2969 vr = self._discretize_rupture_v(store, interpolation, points)\ 

2970 .reshape(nx, ny) 

2971 

2972 if vr.shape != tuple((nx, ny)): 

2973 logger.warning( 

2974 'Given rupture velocities are not in right shape. Therefore' 

2975 ' standard rupture velocity array is used.') 

2976 

2977 def initialize_times(): 

2978 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2979 

2980 if nucl_x.shape != nucl_y.shape: 

2981 raise ValueError( 

2982 'Nucleation coordinates have different shape.') 

2983 

2984 dist_points = num.array([ 

2985 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

2986 for x, y in zip(nucl_x, nucl_y)]) 

2987 nucl_indices = num.argmin(dist_points, axis=1) 

2988 

2989 if self.nucleation_time is None: 

2990 nucl_times = num.zeros_like(nucl_indices) 

2991 else: 

2992 if self.nucleation_time.shape == nucl_x.shape: 

2993 nucl_times = self.nucleation_time 

2994 else: 

2995 raise ValueError( 

2996 'Nucleation coordinates and times have different ' 

2997 'shapes') 

2998 

2999 t = num.full(nx * ny, -1.) 

3000 t[nucl_indices] = nucl_times 

3001 return t.reshape(nx, ny) 

3002 

3003 if times is None: 

3004 times = initialize_times() 

3005 elif times.shape != tuple((nx, ny)): 

3006 times = initialize_times() 

3007 logger.warning( 

3008 'Given times are not in right shape. Therefore standard time' 

3009 ' array is used.') 

3010 

3011 eikonal_ext.eikonal_solver_fmm_cartesian( 

3012 speeds=vr, times=times, delta=delta) 

3013 

3014 return points, points_xy, vr, times 

3015 

3016 def get_vr_time_interpolators( 

3017 self, store, interpolation='nearest_neighbor', force=False, 

3018 **kwargs): 

3019 ''' 

3020 Get interpolators for rupture velocity and rupture time. 

3021 

3022 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3023 

3024 :param store: 

3025 Green's function database (needs to cover whole region of the 

3026 source). 

3027 :type store: 

3028 :py:class:`~pyrocko.gf.store.Store` 

3029 

3030 :param interpolation: 

3031 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3032 and ``'multilinear'``). 

3033 :type interpolation: 

3034 optional, str 

3035 

3036 :param force: 

3037 Force recalculation of the interpolators (e.g. after change of 

3038 nucleation point locations/times). Default is ``False``. 

3039 :type force: 

3040 optional, bool 

3041 ''' 

3042 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3043 if interpolation not in interp_map: 

3044 raise TypeError( 

3045 'Interpolation method %s not available' % interpolation) 

3046 

3047 if not self._interpolators.get(interpolation, False) or force: 

3048 _, points_xy, vr, times = self.discretize_time( 

3049 store, **kwargs) 

3050 

3051 if self.length <= 0.: 

3052 raise ValueError( 

3053 'length must be larger then 0. not %g' % self.length) 

3054 

3055 if self.width <= 0.: 

3056 raise ValueError( 

3057 'width must be larger then 0. not %g' % self.width) 

3058 

3059 nx, ny = times.shape 

3060 anch_x, anch_y = map_anchor[self.anchor] 

3061 

3062 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3063 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3064 

3065 self._interpolators[interpolation] = ( 

3066 nx, ny, times, vr, 

3067 RegularGridInterpolator( 

3068 (points_xy[::ny, 0], points_xy[:ny, 1]), times, 

3069 method=interp_map[interpolation]), 

3070 RegularGridInterpolator( 

3071 (points_xy[::ny, 0], points_xy[:ny, 1]), vr, 

3072 method=interp_map[interpolation])) 

3073 return self._interpolators[interpolation] 

3074 

3075 def discretize_patches( 

3076 self, store, interpolation='nearest_neighbor', force=False, 

3077 grid_shape=(), 

3078 **kwargs): 

3079 ''' 

3080 Get rupture start time and OkadaSource elements for points on rupture. 

3081 

3082 All source elements and their corresponding center points are 

3083 calculated and stored in the :py:attr:`patches` attribute. 

3084 

3085 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3086 

3087 :param store: 

3088 Green's function database (needs to cover whole region of the 

3089 source). 

3090 :type store: 

3091 :py:class:`~pyrocko.gf.store.Store` 

3092 

3093 :param interpolation: 

3094 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3095 and ``'multilinear'``). 

3096 :type interpolation: 

3097 optional, str 

3098 

3099 :param force: 

3100 Force recalculation of the vr and time interpolators ( e.g. after 

3101 change of nucleation point locations/times). Default is ``False``. 

3102 :type force: 

3103 optional, bool 

3104 

3105 :param grid_shape: 

3106 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3107 or grid_shape should be set. 

3108 :type grid_shape: 

3109 optional, tuple of int 

3110 ''' 

3111 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3112 self.get_vr_time_interpolators( 

3113 store, 

3114 interpolation=interpolation, force=force, **kwargs) 

3115 anch_x, anch_y = map_anchor[self.anchor] 

3116 

3117 al = self.length / 2. 

3118 aw = self.width / 2. 

3119 al1 = -(al + anch_x * al) 

3120 al2 = al - anch_x * al 

3121 aw1 = -aw + anch_y * aw 

3122 aw2 = aw + anch_y * aw 

3123 assert num.abs([al1, al2]).sum() == self.length 

3124 assert num.abs([aw1, aw2]).sum() == self.width 

3125 

3126 def get_lame(*a, **kw): 

3127 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3128 lamb = store.config.get_vp(*a, **kw)**2 \ 

3129 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3130 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3131 

3132 shear_mod, poisson = get_lame( 

3133 self.lat, self.lon, 

3134 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3135 interpolation=interpolation) 

3136 

3137 okada_src = OkadaSource( 

3138 lat=self.lat, lon=self.lon, 

3139 strike=self.strike, dip=self.dip, 

3140 north_shift=self.north_shift, east_shift=self.east_shift, 

3141 depth=self.depth, 

3142 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3143 poisson=poisson.mean(), 

3144 shearmod=shear_mod.mean(), 

3145 opening=kwargs.get('opening', 0.)) 

3146 

3147 if not (self.nx and self.ny): 

3148 if grid_shape: 

3149 self.nx, self.ny = grid_shape 

3150 else: 

3151 self.nx = nx 

3152 self.ny = ny 

3153 

3154 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3155 

3156 shear_mod, poisson = get_lame( 

3157 self.lat, self.lon, 

3158 num.array([src.source_patch()[:3] for src in source_disc]), 

3159 interpolation=interpolation) 

3160 

3161 if (self.nx, self.ny) != (nx, ny): 

3162 times_interp = time_interpolator(source_points[:, :2]) 

3163 vr_interp = vr_interpolator(source_points[:, :2]) 

3164 else: 

3165 times_interp = times.T.ravel() 

3166 vr_interp = vr.T.ravel() 

3167 

3168 for isrc, src in enumerate(source_disc): 

3169 src.vr = vr_interp[isrc] 

3170 src.time = times_interp[isrc] + self.time 

3171 

3172 self.patches = source_disc 

3173 

3174 def discretize_basesource(self, store, target=None): 

3175 ''' 

3176 Prepare source for synthetic waveform calculation. 

3177 

3178 :param store: 

3179 Green's function database (needs to cover whole region of the 

3180 source). 

3181 :type store: 

3182 :py:class:`~pyrocko.gf.store.Store` 

3183 

3184 :param target: 

3185 Target information. 

3186 :type target: 

3187 optional, :py:class:`~pyrocko.gf.targets.Target` 

3188 

3189 :returns: 

3190 Source discretized by a set of moment tensors and times. 

3191 :rtype: 

3192 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3193 ''' 

3194 if not target: 

3195 interpolation = 'nearest_neighbor' 

3196 else: 

3197 interpolation = target.interpolation 

3198 

3199 if not self.patches: 

3200 self.discretize_patches(store, interpolation) 

3201 

3202 if self.coef_mat is None: 

3203 self.calc_coef_mat() 

3204 

3205 delta_slip, slip_times = self.get_delta_slip(store) 

3206 npatches = self.nx * self.ny 

3207 ntimes = slip_times.size 

3208 

3209 anch_x, anch_y = map_anchor[self.anchor] 

3210 

3211 pln = self.length / self.nx 

3212 pwd = self.width / self.ny 

3213 

3214 patch_coords = num.array([ 

3215 (p.ix, p.iy) 

3216 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3217 

3218 # boundary condition is zero-slip 

3219 # is not valid to avoid unwished interpolation effects 

3220 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3221 slip_grid[1:-1, 1:-1, :, :] = \ 

3222 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3223 

3224 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3225 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3226 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3227 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3228 

3229 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3230 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3231 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3232 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3233 

3234 def make_grid(patch_parameter): 

3235 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3236 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3237 

3238 grid[0, 0] = grid[1, 1] 

3239 grid[0, -1] = grid[1, -2] 

3240 grid[-1, 0] = grid[-2, 1] 

3241 grid[-1, -1] = grid[-2, -2] 

3242 

3243 grid[1:-1, 0] = grid[1:-1, 1] 

3244 grid[1:-1, -1] = grid[1:-1, -2] 

3245 grid[0, 1:-1] = grid[1, 1:-1] 

3246 grid[-1, 1:-1] = grid[-2, 1:-1] 

3247 

3248 return grid 

3249 

3250 lamb = self.get_patch_attribute('lamb') 

3251 mu = self.get_patch_attribute('shearmod') 

3252 

3253 lamb_grid = make_grid(lamb) 

3254 mu_grid = make_grid(mu) 

3255 

3256 coords_x = num.zeros(self.nx + 2) 

3257 coords_x[1:-1] = patch_coords[:, 0, 0] 

3258 coords_x[0] = coords_x[1] - pln / 2 

3259 coords_x[-1] = coords_x[-2] + pln / 2 

3260 

3261 coords_y = num.zeros(self.ny + 2) 

3262 coords_y[1:-1] = patch_coords[0, :, 1] 

3263 coords_y[0] = coords_y[1] - pwd / 2 

3264 coords_y[-1] = coords_y[-2] + pwd / 2 

3265 

3266 slip_interp = RegularGridInterpolator( 

3267 (coords_x, coords_y, slip_times), 

3268 slip_grid, method='nearest') 

3269 

3270 lamb_interp = RegularGridInterpolator( 

3271 (coords_x, coords_y), 

3272 lamb_grid, method='nearest') 

3273 

3274 mu_interp = RegularGridInterpolator( 

3275 (coords_x, coords_y), 

3276 mu_grid, method='nearest') 

3277 

3278 # discretize basesources 

3279 mindeltagf = min(tuple( 

3280 (self.length / self.nx, self.width / self.ny) + 

3281 tuple(store.config.deltas))) 

3282 

3283 nl = int((1. / self.decimation_factor) * 

3284 num.ceil(pln / mindeltagf)) + 1 

3285 nw = int((1. / self.decimation_factor) * 

3286 num.ceil(pwd / mindeltagf)) + 1 

3287 nsrc_patch = int(nl * nw) 

3288 dl = pln / nl 

3289 dw = pwd / nw 

3290 

3291 patch_area = dl * dw 

3292 

3293 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3294 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3295 

3296 base_coords = num.zeros((nsrc_patch, 3), dtype=num.float) 

3297 base_coords[:, 0] = num.tile(xl, nw) 

3298 base_coords[:, 1] = num.repeat(xw, nl) 

3299 base_coords = num.tile(base_coords, (npatches, 1)) 

3300 

3301 center_coords = num.zeros((npatches, 3)) 

3302 center_coords[:, 0] = num.repeat( 

3303 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3304 center_coords[:, 1] = num.tile( 

3305 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3306 

3307 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3308 nbaselocs = base_coords.shape[0] 

3309 

3310 base_interp = base_coords.repeat(ntimes, axis=0) 

3311 

3312 base_times = num.tile(slip_times, nbaselocs) 

3313 base_interp[:, 0] -= anch_x * self.length / 2 

3314 base_interp[:, 1] -= anch_y * self.width / 2 

3315 base_interp[:, 2] = base_times 

3316 

3317 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3318 store, interpolation=interpolation) 

3319 

3320 time_eikonal_max = time_interpolator.values.max() 

3321 

3322 nbasesrcs = base_interp.shape[0] 

3323 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3324 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3325 mu = mu_interp(base_interp[:, :2]).ravel() 

3326 

3327 if False: 

3328 try: 

3329 import matplotlib.pyplot as plt 

3330 coords = base_coords.copy() 

3331 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3332 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3333 plt.show() 

3334 except AttributeError: 

3335 pass 

3336 

3337 base_interp[:, 2] = 0. 

3338 rotmat = num.asarray( 

3339 pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)) 

3340 base_interp = num.dot(rotmat.T, base_interp.T).T 

3341 base_interp[:, 0] += self.north_shift 

3342 base_interp[:, 1] += self.east_shift 

3343 base_interp[:, 2] += self.depth 

3344 

3345 slip_strike = delta_slip[:, :, 0].ravel() 

3346 slip_dip = delta_slip[:, :, 1].ravel() 

3347 slip_norm = delta_slip[:, :, 2].ravel() 

3348 

3349 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3350 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3351 

3352 m6s = okada_ext.patch2m6( 

3353 strikes=num.full(nbasesrcs, self.strike, dtype=num.float), 

3354 dips=num.full(nbasesrcs, self.dip, dtype=num.float), 

3355 rakes=slip_rake, 

3356 disl_shear=slip_shear, 

3357 disl_norm=slip_norm, 

3358 lamb=lamb, 

3359 mu=mu, 

3360 nthreads=self.nthreads) 

3361 

3362 m6s *= patch_area 

3363 

3364 dl = -self.patches[0].al1 + self.patches[0].al2 

3365 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3366 

3367 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3368 

3369 ds = meta.DiscretizedMTSource( 

3370 lat=self.lat, 

3371 lon=self.lon, 

3372 times=base_times + self.time, 

3373 north_shifts=base_interp[:, 0], 

3374 east_shifts=base_interp[:, 1], 

3375 depths=base_interp[:, 2], 

3376 m6s=m6s, 

3377 dl=dl, 

3378 dw=dw, 

3379 nl=self.nx, 

3380 nw=self.ny) 

3381 

3382 return ds 

3383 

3384 def calc_coef_mat(self): 

3385 ''' 

3386 Calculate coefficients connecting tractions and dislocations. 

3387 ''' 

3388 if not self.patches: 

3389 raise ValueError( 

3390 'Patches are needed. Please calculate them first.') 

3391 

3392 self.coef_mat = make_okada_coefficient_matrix( 

3393 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3394 

3395 def get_patch_attribute(self, attr): 

3396 ''' 

3397 Get patch attributes. 

3398 

3399 :param attr: 

3400 Name of selected attribute (see 

3401 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3402 :type attr: 

3403 str 

3404 

3405 :returns: 

3406 Array with attribute value for each fault patch. 

3407 :rtype: 

3408 :py:class:`~numpy.ndarray` 

3409 

3410 ''' 

3411 if not self.patches: 

3412 raise ValueError( 

3413 'Patches are needed. Please calculate them first.') 

3414 return num.array([getattr(p, attr) for p in self.patches]) 

3415 

3416 def get_slip( 

3417 self, 

3418 time=None, 

3419 scale_slip=True, 

3420 interpolation='nearest_neighbor', 

3421 **kwargs): 

3422 ''' 

3423 Get slip per subfault patch for given time after rupture start. 

3424 

3425 :param time: 

3426 Time after origin [s], for which slip is computed. If not 

3427 given, final static slip is returned. 

3428 :type time: 

3429 optional, float > 0. 

3430 

3431 :param scale_slip: 

3432 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3433 to fit the given maximum slip. 

3434 :type scale_slip: 

3435 optional, bool 

3436 

3437 :param interpolation: 

3438 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3439 and ``'multilinear'``). 

3440 :type interpolation: 

3441 optional, str 

3442 

3443 :returns: 

3444 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3445 for each source patch. 

3446 :rtype: 

3447 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3448 ''' 

3449 

3450 if self.patches is None: 

3451 raise ValueError( 

3452 'Please discretize the source first (discretize_patches())') 

3453 npatches = len(self.patches) 

3454 tractions = self.get_tractions() 

3455 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3456 

3457 time_patch = time 

3458 if time is None: 

3459 time_patch = time_patch_max 

3460 

3461 if self.coef_mat is None: 

3462 self.calc_coef_mat() 

3463 

3464 if tractions.shape != (npatches, 3): 

3465 raise AttributeError( 

3466 'The traction vector is of invalid shape.' 

3467 ' Required shape is (npatches, 3)') 

3468 

3469 patch_mask = num.ones(npatches, dtype=num.bool) 

3470 if self.patch_mask is not None: 

3471 patch_mask = self.patch_mask 

3472 

3473 times = self.get_patch_attribute('time') - self.time 

3474 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3475 relevant_sources = num.nonzero(times <= time_patch)[0] 

3476 disloc_est = num.zeros_like(tractions) 

3477 

3478 if self.smooth_rupture: 

3479 patch_activation = num.zeros(npatches) 

3480 

3481 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3482 self.get_vr_time_interpolators( 

3483 store, interpolation=interpolation) 

3484 

3485 # Getting the native Eikonal grid, bit hackish 

3486 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3487 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3488 times_eikonal = time_interpolator.values 

3489 

3490 time_max = time 

3491 if time is None: 

3492 time_max = times_eikonal.max() 

3493 

3494 for ip, p in enumerate(self.patches): 

3495 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3496 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3497 

3498 idx_length = num.logical_and( 

3499 points_x >= ul[0], points_x <= lr[0]) 

3500 idx_width = num.logical_and( 

3501 points_y >= ul[1], points_y <= lr[1]) 

3502 

3503 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3504 if times_patch.size == 0: 

3505 raise AttributeError('could not use smooth_rupture') 

3506 

3507 patch_activation[ip] = \ 

3508 (times_patch <= time_max).sum() / times_patch.size 

3509 

3510 if time_patch == 0 and time_patch != time_patch_max: 

3511 patch_activation[ip] = 0. 

3512 

3513 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3514 

3515 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3516 

3517 if relevant_sources.size == 0: 

3518 return disloc_est 

3519 

3520 indices_disl = num.repeat(relevant_sources * 3, 3) 

3521 indices_disl[1::3] += 1 

3522 indices_disl[2::3] += 2 

3523 

3524 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3525 stress_field=tractions[relevant_sources, :].ravel(), 

3526 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3527 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3528 epsilon=None, 

3529 **kwargs) 

3530 

3531 if self.smooth_rupture: 

3532 disloc_est *= patch_activation[:, num.newaxis] 

3533 

3534 if scale_slip and self.slip is not None: 

3535 disloc_tmax = num.zeros(npatches) 

3536 

3537 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3538 indices_disl[1::3] += 1 

3539 indices_disl[2::3] += 2 

3540 

3541 disloc_tmax[patch_mask] = num.linalg.norm( 

3542 invert_fault_dislocations_bem( 

3543 stress_field=tractions[patch_mask, :].ravel(), 

3544 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3545 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3546 epsilon=None, 

3547 **kwargs), axis=1) 

3548 

3549 disloc_tmax_max = disloc_tmax.max() 

3550 if disloc_tmax_max == 0.: 

3551 logger.warning( 

3552 'slip scaling not performed. Maximum slip is 0.') 

3553 

3554 disloc_est *= self.slip / disloc_tmax_max 

3555 

3556 return disloc_est 

3557 

3558 def get_delta_slip( 

3559 self, 

3560 store=None, 

3561 deltat=None, 

3562 delta=True, 

3563 interpolation='nearest_neighbor', 

3564 **kwargs): 

3565 ''' 

3566 Get slip change snapshots. 

3567 

3568 The time interval, within which the slip changes are computed is 

3569 determined by the sampling rate of the Green's function database or 

3570 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3571 

3572 :param store: 

3573 Green's function database (needs to cover whole region of of the 

3574 source). Its sampling interval is used as time increment for slip 

3575 difference calculation. Either ``deltat`` or ``store`` should be 

3576 given. 

3577 :type store: 

3578 optional, :py:class:`~pyrocko.gf.store.Store` 

3579 

3580 :param deltat: 

3581 Time interval for slip difference calculation [s]. Either 

3582 ``deltat`` or ``store`` should be given. 

3583 :type deltat: 

3584 optional, float 

3585 

3586 :param delta: 

3587 If ``True``, slip differences between two time steps are given. If 

3588 ``False``, cumulative slip for all time steps. 

3589 :type delta: 

3590 optional, bool 

3591 

3592 :param interpolation: 

3593 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3594 and ``'multilinear'``). 

3595 :type interpolation: 

3596 optional, str 

3597 

3598 :returns: 

3599 Displacement changes(:math:`\\Delta u_{strike}, 

3600 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3601 time; corner times, for which delta slip is computed. The order of 

3602 displacement changes array is: 

3603 

3604 .. math:: 

3605 

3606 &[[\\\\ 

3607 &[\\Delta u_{strike, patch1, t1}, 

3608 \\Delta u_{dip, patch1, t1}, 

3609 \\Delta u_{tensile, patch1, t1}],\\\\ 

3610 &[\\Delta u_{strike, patch1, t2}, 

3611 \\Delta u_{dip, patch1, t2}, 

3612 \\Delta u_{tensile, patch1, t2}]\\\\ 

3613 &], [\\\\ 

3614 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3615 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3616 

3617 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3618 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3619 ''' 

3620 if store and deltat: 

3621 raise AttributeError( 

3622 'Argument collision. ' 

3623 'Please define only the store or the deltat argument.') 

3624 

3625 if store: 

3626 deltat = store.config.deltat 

3627 

3628 if not deltat: 

3629 raise AttributeError('Please give a GF store or set deltat.') 

3630 

3631 npatches = len(self.patches) 

3632 

3633 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3634 store, interpolation=interpolation) 

3635 tmax = time_interpolator.values.max() 

3636 

3637 calc_times = num.arange(0., tmax + deltat, deltat) 

3638 calc_times[calc_times > tmax] = tmax 

3639 

3640 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3641 

3642 for itime, t in enumerate(calc_times): 

3643 disloc_est[:, itime, :] = self.get_slip( 

3644 time=t, scale_slip=False, **kwargs) 

3645 

3646 if self.slip: 

3647 disloc_tmax = num.linalg.norm( 

3648 self.get_slip(scale_slip=False, time=tmax), 

3649 axis=1) 

3650 

3651 disloc_tmax_max = disloc_tmax.max() 

3652 if disloc_tmax_max == 0.: 

3653 logger.warning( 

3654 'Slip scaling not performed. Maximum slip is 0.') 

3655 else: 

3656 disloc_est *= self.slip / disloc_tmax_max 

3657 

3658 if not delta: 

3659 return disloc_est, calc_times 

3660 

3661 # if we have only one timestep there is no gradient 

3662 if calc_times.size > 1: 

3663 disloc_init = disloc_est[:, 0, :] 

3664 disloc_est = num.diff(disloc_est, axis=1) 

3665 disloc_est = num.concatenate(( 

3666 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3667 

3668 calc_times = calc_times 

3669 

3670 return disloc_est, calc_times 

3671 

3672 def get_slip_rate(self, *args, **kwargs): 

3673 ''' 

3674 Get slip rate inverted from patches. 

3675 

3676 The time interval, within which the slip rates are computed is 

3677 determined by the sampling rate of the Green's function database or 

3678 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3679 :py:meth:`get_delta_slip`. 

3680 

3681 :returns: 

3682 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3683 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3684 for each source patch and time; corner times, for which slip rate 

3685 is computed. The order of sliprate array is: 

3686 

3687 .. math:: 

3688 

3689 &[[\\\\ 

3690 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3691 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3692 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3693 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3694 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3695 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3696 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3697 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3698 

3699 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3700 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3701 ''' 

3702 ddisloc_est, calc_times = self.get_delta_slip( 

3703 *args, delta=True, **kwargs) 

3704 

3705 dt = num.concatenate( 

3706 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3707 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3708 

3709 return slip_rate, calc_times 

3710 

3711 def get_moment_rate_patches(self, *args, **kwargs): 

3712 ''' 

3713 Get scalar seismic moment rate for each patch individually. 

3714 

3715 Additional ``*args`` and ``**kwargs`` are passed to 

3716 :py:meth:`get_slip_rate`. 

3717 

3718 :returns: 

3719 Seismic moment rate for each source patch and time; corner times, 

3720 for which patch moment rate is computed based on slip rate. The 

3721 order of the moment rate array is: 

3722 

3723 .. math:: 

3724 

3725 &[\\\\ 

3726 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3727 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3728 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3729 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3730 &[...]]\\\\ 

3731 

3732 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3733 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3734 ''' 

3735 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3736 

3737 shear_mod = self.get_patch_attribute('shearmod') 

3738 p_length = self.get_patch_attribute('length') 

3739 p_width = self.get_patch_attribute('width') 

3740 

3741 dA = p_length * p_width 

3742 

3743 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3744 

3745 return mom_rate, calc_times 

3746 

3747 def get_moment_rate(self, store, target=None, deltat=None): 

3748 ''' 

3749 Get seismic source moment rate for the total source (STF). 

3750 

3751 :param store: 

3752 Green's function database (needs to cover whole region of of the 

3753 source). Its ``deltat`` [s] is used as time increment for slip 

3754 difference calculation. Either ``deltat`` or ``store`` should be 

3755 given. 

3756 :type store: 

3757 :py:class:`~pyrocko.gf.store.Store` 

3758 

3759 :param target: 

3760 Target information, needed for interpolation method. 

3761 :type target: 

3762 optional, :py:class:`~pyrocko.gf.targets.Target` 

3763 

3764 :param deltat: 

3765 Time increment for slip difference calculation [s]. If not given 

3766 ``store.deltat`` is used. 

3767 :type deltat: 

3768 optional, float 

3769 

3770 :return: 

3771 Seismic moment rate [Nm/s] for each time; corner times, for which 

3772 moment rate is computed. The order of the moment rate array is: 

3773 

3774 .. math:: 

3775 

3776 &[\\\\ 

3777 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3778 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3779 &...]\\\\ 

3780 

3781 :rtype: 

3782 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3783 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3784 ''' 

3785 if not deltat: 

3786 deltat = store.config.deltat 

3787 return self.discretize_basesource( 

3788 store, target=target).get_moment_rate(deltat) 

3789 

3790 def get_moment(self, *args, **kwargs): 

3791 ''' 

3792 Get seismic cumulative moment. 

3793 

3794 Additional ``*args`` and ``**kwargs`` are passed to 

3795 :py:meth:`get_magnitude`. 

3796 

3797 :returns: 

3798 Cumulative seismic moment in [Nm]. 

3799 :rtype: 

3800 float 

3801 ''' 

3802 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3803 *args, **kwargs))) 

3804 

3805 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3806 ''' 

3807 Rescale source slip based on given target magnitude or seismic moment. 

3808 

3809 Rescale the maximum source slip to fit the source moment magnitude or 

3810 seismic moment to the given target values. Either ``magnitude`` or 

3811 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3812 :py:meth:`get_moment`. 

3813 

3814 :param magnitude: 

3815 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3816 [Hanks and Kanamori, 1979] 

3817 :type magnitude: 

3818 optional, float 

3819 

3820 :param moment: 

3821 Target seismic moment :math:`M_0` [Nm]. 

3822 :type moment: 

3823 optional, float 

3824 ''' 

3825 if self.slip is None: 

3826 self.slip = 1. 

3827 logger.warning('No slip found for rescaling. ' 

3828 'An initial slip of 1 m is assumed.') 

3829 

3830 if magnitude is None and moment is None: 

3831 raise ValueError( 

3832 'Either target magnitude or moment need to be given.') 

3833 

3834 moment_init = self.get_moment(**kwargs) 

3835 

3836 if magnitude is not None: 

3837 moment = pmt.magnitude_to_moment(magnitude) 

3838 

3839 self.slip *= moment / moment_init 

3840 

3841 def get_centroid(self, store, *args, **kwargs): 

3842 ''' 

3843 Centroid of the pseudo dynamic rupture model. 

3844 

3845 The centroid location and time are derived from the locations and times 

3846 of the individual patches weighted with their moment contribution. 

3847 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3848 

3849 :param store: 

3850 Green's function database (needs to cover whole region of of the 

3851 source). Its ``deltat`` [s] is used as time increment for slip 

3852 difference calculation. Either ``deltat`` or ``store`` should be 

3853 given. 

3854 :type store: 

3855 :py:class:`~pyrocko.gf.store.Store` 

3856 

3857 :returns: 

3858 The centroid location and associated moment tensor. 

3859 :rtype: 

3860 :py:class:`pyrocko.model.Event` 

3861 ''' 

3862 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3863 t_max = time.values.max() 

3864 

3865 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3866 

3867 moment = num.sum(moment_rate * times, axis=1) 

3868 weights = moment / moment.sum() 

3869 

3870 norths = self.get_patch_attribute('north_shift') 

3871 easts = self.get_patch_attribute('east_shift') 

3872 depths = self.get_patch_attribute('depth') 

3873 times = self.get_patch_attribute('time') - self.time 

3874 

3875 centroid_n = num.sum(weights * norths) 

3876 centroid_e = num.sum(weights * easts) 

3877 centroid_d = num.sum(weights * depths) 

3878 centroid_t = num.sum(weights * times) + self.time 

3879 

3880 centroid_lat, centroid_lon = ne_to_latlon( 

3881 self.lat, self.lon, centroid_n, centroid_e) 

3882 

3883 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3884 

3885 return model.Event( 

3886 lat=centroid_lat, 

3887 lon=centroid_lon, 

3888 depth=centroid_d, 

3889 time=centroid_t, 

3890 moment_tensor=mt, 

3891 magnitude=mt.magnitude, 

3892 duration=t_max) 

3893 

3894 

3895class DoubleDCSource(SourceWithMagnitude): 

3896 ''' 

3897 Two double-couple point sources separated in space and time. 

3898 Moment share between the sub-sources is controlled by the 

3899 parameter mix. 

3900 The position of the subsources is dependent on the moment 

3901 distribution between the two sources. Depth, east and north 

3902 shift are given for the centroid between the two double-couples. 

3903 The subsources will positioned according to their moment shares 

3904 around this centroid position. 

3905 This is done according to their delta parameters, which are 

3906 therefore in relation to that centroid. 

3907 Note that depth of the subsources therefore can be 

3908 depth+/-delta_depth. For shallow earthquakes therefore 

3909 the depth has to be chosen deeper to avoid sampling 

3910 above surface. 

3911 ''' 

3912 

3913 strike1 = Float.T( 

3914 default=0.0, 

3915 help='strike direction in [deg], measured clockwise from north') 

3916 

3917 dip1 = Float.T( 

3918 default=90.0, 

3919 help='dip angle in [deg], measured downward from horizontal') 

3920 

3921 azimuth = Float.T( 

3922 default=0.0, 

3923 help='azimuth to second double-couple [deg], ' 

3924 'measured at first, clockwise from north') 

3925 

3926 rake1 = Float.T( 

3927 default=0.0, 

3928 help='rake angle in [deg], ' 

3929 'measured counter-clockwise from right-horizontal ' 

3930 'in on-plane view') 

3931 

3932 strike2 = Float.T( 

3933 default=0.0, 

3934 help='strike direction in [deg], measured clockwise from north') 

3935 

3936 dip2 = Float.T( 

3937 default=90.0, 

3938 help='dip angle in [deg], measured downward from horizontal') 

3939 

3940 rake2 = Float.T( 

3941 default=0.0, 

3942 help='rake angle in [deg], ' 

3943 'measured counter-clockwise from right-horizontal ' 

3944 'in on-plane view') 

3945 

3946 delta_time = Float.T( 

3947 default=0.0, 

3948 help='separation of double-couples in time (t2-t1) [s]') 

3949 

3950 delta_depth = Float.T( 

3951 default=0.0, 

3952 help='difference in depth (z2-z1) [m]') 

3953 

3954 distance = Float.T( 

3955 default=0.0, 

3956 help='distance between the two double-couples [m]') 

3957 

3958 mix = Float.T( 

3959 default=0.5, 

3960 help='how to distribute the moment to the two doublecouples ' 

3961 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

3962 

3963 stf1 = STF.T( 

3964 optional=True, 

3965 help='Source time function of subsource 1 ' 

3966 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3967 

3968 stf2 = STF.T( 

3969 optional=True, 

3970 help='Source time function of subsource 2 ' 

3971 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3972 

3973 discretized_source_class = meta.DiscretizedMTSource 

3974 

3975 def base_key(self): 

3976 return ( 

3977 self.time, self.depth, self.lat, self.north_shift, 

3978 self.lon, self.east_shift, type(self).__name__) + \ 

3979 self.effective_stf1_pre().base_key() + \ 

3980 self.effective_stf2_pre().base_key() + ( 

3981 self.strike1, self.dip1, self.rake1, 

3982 self.strike2, self.dip2, self.rake2, 

3983 self.delta_time, self.delta_depth, 

3984 self.azimuth, self.distance, self.mix) 

3985 

3986 def get_factor(self): 

3987 return self.moment 

3988 

3989 def effective_stf1_pre(self): 

3990 return self.stf1 or self.stf or g_unit_pulse 

3991 

3992 def effective_stf2_pre(self): 

3993 return self.stf2 or self.stf or g_unit_pulse 

3994 

3995 def effective_stf_post(self): 

3996 return g_unit_pulse 

3997 

3998 def split(self): 

3999 a1 = 1.0 - self.mix 

4000 a2 = self.mix 

4001 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4002 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4003 

4004 dc1 = DCSource( 

4005 lat=self.lat, 

4006 lon=self.lon, 

4007 time=self.time - self.delta_time * a2, 

4008 north_shift=self.north_shift - delta_north * a2, 

4009 east_shift=self.east_shift - delta_east * a2, 

4010 depth=self.depth - self.delta_depth * a2, 

4011 moment=self.moment * a1, 

4012 strike=self.strike1, 

4013 dip=self.dip1, 

4014 rake=self.rake1, 

4015 stf=self.stf1 or self.stf) 

4016 

4017 dc2 = DCSource( 

4018 lat=self.lat, 

4019 lon=self.lon, 

4020 time=self.time + self.delta_time * a1, 

4021 north_shift=self.north_shift + delta_north * a1, 

4022 east_shift=self.east_shift + delta_east * a1, 

4023 depth=self.depth + self.delta_depth * a1, 

4024 moment=self.moment * a2, 

4025 strike=self.strike2, 

4026 dip=self.dip2, 

4027 rake=self.rake2, 

4028 stf=self.stf2 or self.stf) 

4029 

4030 return [dc1, dc2] 

4031 

4032 def discretize_basesource(self, store, target=None): 

4033 a1 = 1.0 - self.mix 

4034 a2 = self.mix 

4035 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4036 rake=self.rake1, scalar_moment=a1) 

4037 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4038 rake=self.rake2, scalar_moment=a2) 

4039 

4040 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4041 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4042 

4043 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4044 store.config.deltat, self.time - self.delta_time * a2) 

4045 

4046 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4047 store.config.deltat, self.time + self.delta_time * a1) 

4048 

4049 nt1 = times1.size 

4050 nt2 = times2.size 

4051 

4052 ds = meta.DiscretizedMTSource( 

4053 lat=self.lat, 

4054 lon=self.lon, 

4055 times=num.concatenate((times1, times2)), 

4056 north_shifts=num.concatenate(( 

4057 num.repeat(self.north_shift - delta_north * a2, nt1), 

4058 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4059 east_shifts=num.concatenate(( 

4060 num.repeat(self.east_shift - delta_east * a2, nt1), 

4061 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4062 depths=num.concatenate(( 

4063 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4064 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4065 m6s=num.vstack(( 

4066 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4067 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4068 

4069 return ds 

4070 

4071 def pyrocko_moment_tensor(self, store=None, target=None): 

4072 a1 = 1.0 - self.mix 

4073 a2 = self.mix 

4074 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4075 rake=self.rake1, 

4076 scalar_moment=a1 * self.moment) 

4077 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4078 rake=self.rake2, 

4079 scalar_moment=a2 * self.moment) 

4080 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4081 

4082 def pyrocko_event(self, store=None, target=None, **kwargs): 

4083 return SourceWithMagnitude.pyrocko_event( 

4084 self, store, target, 

4085 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4086 **kwargs) 

4087 

4088 @classmethod 

4089 def from_pyrocko_event(cls, ev, **kwargs): 

4090 d = {} 

4091 mt = ev.moment_tensor 

4092 if mt: 

4093 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4094 d.update( 

4095 strike1=float(strike), 

4096 dip1=float(dip), 

4097 rake1=float(rake), 

4098 strike2=float(strike), 

4099 dip2=float(dip), 

4100 rake2=float(rake), 

4101 mix=0.0, 

4102 magnitude=float(mt.moment_magnitude())) 

4103 

4104 d.update(kwargs) 

4105 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4106 source.stf1 = source.stf 

4107 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4108 source.stf = None 

4109 return source 

4110 

4111 

4112class RingfaultSource(SourceWithMagnitude): 

4113 ''' 

4114 A ring fault with vertical doublecouples. 

4115 ''' 

4116 

4117 diameter = Float.T( 

4118 default=1.0, 

4119 help='diameter of the ring in [m]') 

4120 

4121 sign = Float.T( 

4122 default=1.0, 

4123 help='inside of the ring moves up (+1) or down (-1)') 

4124 

4125 strike = Float.T( 

4126 default=0.0, 

4127 help='strike direction of the ring plane, clockwise from north,' 

4128 ' in [deg]') 

4129 

4130 dip = Float.T( 

4131 default=0.0, 

4132 help='dip angle of the ring plane from horizontal in [deg]') 

4133 

4134 npointsources = Int.T( 

4135 default=360, 

4136 help='number of point sources to use') 

4137 

4138 discretized_source_class = meta.DiscretizedMTSource 

4139 

4140 def base_key(self): 

4141 return Source.base_key(self) + ( 

4142 self.strike, self.dip, self.diameter, self.npointsources) 

4143 

4144 def get_factor(self): 

4145 return self.sign * self.moment 

4146 

4147 def discretize_basesource(self, store=None, target=None): 

4148 n = self.npointsources 

4149 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4150 

4151 points = num.zeros((n, 3)) 

4152 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4153 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4154 

4155 rotmat = num.array(pmt.euler_to_matrix( 

4156 self.dip * d2r, self.strike * d2r, 0.0)) 

4157 points = num.dot(rotmat.T, points.T).T # !!! ? 

4158 

4159 points[:, 0] += self.north_shift 

4160 points[:, 1] += self.east_shift 

4161 points[:, 2] += self.depth 

4162 

4163 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4164 scalar_moment=1.0 / n).m()) 

4165 

4166 rotmats = num.transpose( 

4167 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4168 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4169 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4170 

4171 ms = num.zeros((n, 3, 3)) 

4172 for i in range(n): 

4173 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4174 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4175 

4176 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4177 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4178 

4179 times, amplitudes = self.effective_stf_pre().discretize_t( 

4180 store.config.deltat, self.time) 

4181 

4182 nt = times.size 

4183 

4184 return meta.DiscretizedMTSource( 

4185 times=num.tile(times, n), 

4186 lat=self.lat, 

4187 lon=self.lon, 

4188 north_shifts=num.repeat(points[:, 0], nt), 

4189 east_shifts=num.repeat(points[:, 1], nt), 

4190 depths=num.repeat(points[:, 2], nt), 

4191 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4192 amplitudes, n)[:, num.newaxis]) 

4193 

4194 

4195class CombiSource(Source): 

4196 ''' 

4197 Composite source model. 

4198 ''' 

4199 

4200 discretized_source_class = meta.DiscretizedMTSource 

4201 

4202 subsources = List.T(Source.T()) 

4203 

4204 def __init__(self, subsources=[], **kwargs): 

4205 if not subsources: 

4206 raise BadRequest( 

4207 'Need at least one sub-source to create a CombiSource object.') 

4208 

4209 lats = num.array( 

4210 [subsource.lat for subsource in subsources], dtype=float) 

4211 lons = num.array( 

4212 [subsource.lon for subsource in subsources], dtype=float) 

4213 

4214 lat, lon = lats[0], lons[0] 

4215 if not num.all(lats == lat) and num.all(lons == lon): 

4216 subsources = [s.clone() for s in subsources] 

4217 for subsource in subsources[1:]: 

4218 subsource.set_origin(lat, lon) 

4219 

4220 depth = float(num.mean([p.depth for p in subsources])) 

4221 time = float(num.mean([p.time for p in subsources])) 

4222 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4223 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4224 kwargs.update( 

4225 time=time, 

4226 lat=float(lat), 

4227 lon=float(lon), 

4228 north_shift=north_shift, 

4229 east_shift=east_shift, 

4230 depth=depth) 

4231 

4232 Source.__init__(self, subsources=subsources, **kwargs) 

4233 

4234 def get_factor(self): 

4235 return 1.0 

4236 

4237 def discretize_basesource(self, store, target=None): 

4238 dsources = [] 

4239 for sf in self.subsources: 

4240 ds = sf.discretize_basesource(store, target) 

4241 ds.m6s *= sf.get_factor() 

4242 dsources.append(ds) 

4243 

4244 return meta.DiscretizedMTSource.combine(dsources) 

4245 

4246 

4247class SFSource(Source): 

4248 ''' 

4249 A single force point source. 

4250 

4251 Supported GF schemes: `'elastic5'`. 

4252 ''' 

4253 

4254 discretized_source_class = meta.DiscretizedSFSource 

4255 

4256 fn = Float.T( 

4257 default=0., 

4258 help='northward component of single force [N]') 

4259 

4260 fe = Float.T( 

4261 default=0., 

4262 help='eastward component of single force [N]') 

4263 

4264 fd = Float.T( 

4265 default=0., 

4266 help='downward component of single force [N]') 

4267 

4268 def __init__(self, **kwargs): 

4269 Source.__init__(self, **kwargs) 

4270 

4271 def base_key(self): 

4272 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4273 

4274 def get_factor(self): 

4275 return 1.0 

4276 

4277 def discretize_basesource(self, store, target=None): 

4278 times, amplitudes = self.effective_stf_pre().discretize_t( 

4279 store.config.deltat, self.time) 

4280 forces = amplitudes[:, num.newaxis] * num.array( 

4281 [[self.fn, self.fe, self.fd]], dtype=float) 

4282 

4283 return meta.DiscretizedSFSource(forces=forces, 

4284 **self._dparams_base_repeated(times)) 

4285 

4286 def pyrocko_event(self, store=None, target=None, **kwargs): 

4287 return Source.pyrocko_event( 

4288 self, store, target, 

4289 **kwargs) 

4290 

4291 @classmethod 

4292 def from_pyrocko_event(cls, ev, **kwargs): 

4293 d = {} 

4294 d.update(kwargs) 

4295 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4296 

4297 

4298class PorePressurePointSource(Source): 

4299 ''' 

4300 Excess pore pressure point source. 

4301 

4302 For poro-elastic initial value problem where an excess pore pressure is 

4303 brought into a small source volume. 

4304 ''' 

4305 

4306 discretized_source_class = meta.DiscretizedPorePressureSource 

4307 

4308 pp = Float.T( 

4309 default=1.0, 

4310 help='initial excess pore pressure in [Pa]') 

4311 

4312 def base_key(self): 

4313 return Source.base_key(self) 

4314 

4315 def get_factor(self): 

4316 return self.pp 

4317 

4318 def discretize_basesource(self, store, target=None): 

4319 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4320 **self._dparams_base()) 

4321 

4322 

4323class PorePressureLineSource(Source): 

4324 ''' 

4325 Excess pore pressure line source. 

4326 

4327 The line source is centered at (north_shift, east_shift, depth). 

4328 ''' 

4329 

4330 discretized_source_class = meta.DiscretizedPorePressureSource 

4331 

4332 pp = Float.T( 

4333 default=1.0, 

4334 help='initial excess pore pressure in [Pa]') 

4335 

4336 length = Float.T( 

4337 default=0.0, 

4338 help='length of the line source [m]') 

4339 

4340 azimuth = Float.T( 

4341 default=0.0, 

4342 help='azimuth direction, clockwise from north [deg]') 

4343 

4344 dip = Float.T( 

4345 default=90., 

4346 help='dip direction, downward from horizontal [deg]') 

4347 

4348 def base_key(self): 

4349 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4350 

4351 def get_factor(self): 

4352 return self.pp 

4353 

4354 def discretize_basesource(self, store, target=None): 

4355 

4356 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4357 

4358 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4359 

4360 sa = math.sin(self.azimuth * d2r) 

4361 ca = math.cos(self.azimuth * d2r) 

4362 sd = math.sin(self.dip * d2r) 

4363 cd = math.cos(self.dip * d2r) 

4364 

4365 points = num.zeros((n, 3)) 

4366 points[:, 0] = self.north_shift + a * ca * cd 

4367 points[:, 1] = self.east_shift + a * sa * cd 

4368 points[:, 2] = self.depth + a * sd 

4369 

4370 return meta.DiscretizedPorePressureSource( 

4371 times=util.num_full(n, self.time), 

4372 lat=self.lat, 

4373 lon=self.lon, 

4374 north_shifts=points[:, 0], 

4375 east_shifts=points[:, 1], 

4376 depths=points[:, 2], 

4377 pp=num.ones(n) / n) 

4378 

4379 

4380class Request(Object): 

4381 ''' 

4382 Synthetic seismogram computation request. 

4383 

4384 :: 

4385 

4386 Request(**kwargs) 

4387 Request(sources, targets, **kwargs) 

4388 ''' 

4389 

4390 sources = List.T( 

4391 Source.T(), 

4392 help='list of sources for which to produce synthetics.') 

4393 

4394 targets = List.T( 

4395 Target.T(), 

4396 help='list of targets for which to produce synthetics.') 

4397 

4398 @classmethod 

4399 def args2kwargs(cls, args): 

4400 if len(args) not in (0, 2, 3): 

4401 raise BadRequest('Invalid arguments.') 

4402 

4403 if len(args) == 2: 

4404 return dict(sources=args[0], targets=args[1]) 

4405 else: 

4406 return {} 

4407 

4408 def __init__(self, *args, **kwargs): 

4409 kwargs.update(self.args2kwargs(args)) 

4410 sources = kwargs.pop('sources', []) 

4411 targets = kwargs.pop('targets', []) 

4412 

4413 if isinstance(sources, Source): 

4414 sources = [sources] 

4415 

4416 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4417 targets = [targets] 

4418 

4419 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4420 

4421 @property 

4422 def targets_dynamic(self): 

4423 return [t for t in self.targets if isinstance(t, Target)] 

4424 

4425 @property 

4426 def targets_static(self): 

4427 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4428 

4429 @property 

4430 def has_dynamic(self): 

4431 return True if len(self.targets_dynamic) > 0 else False 

4432 

4433 @property 

4434 def has_statics(self): 

4435 return True if len(self.targets_static) > 0 else False 

4436 

4437 def subsources_map(self): 

4438 m = defaultdict(list) 

4439 for source in self.sources: 

4440 m[source.base_key()].append(source) 

4441 

4442 return m 

4443 

4444 def subtargets_map(self): 

4445 m = defaultdict(list) 

4446 for target in self.targets: 

4447 m[target.base_key()].append(target) 

4448 

4449 return m 

4450 

4451 def subrequest_map(self): 

4452 ms = self.subsources_map() 

4453 mt = self.subtargets_map() 

4454 m = {} 

4455 for (ks, ls) in ms.items(): 

4456 for (kt, lt) in mt.items(): 

4457 m[ks, kt] = (ls, lt) 

4458 

4459 return m 

4460 

4461 

4462class ProcessingStats(Object): 

4463 t_perc_get_store_and_receiver = Float.T(default=0.) 

4464 t_perc_discretize_source = Float.T(default=0.) 

4465 t_perc_make_base_seismogram = Float.T(default=0.) 

4466 t_perc_make_same_span = Float.T(default=0.) 

4467 t_perc_post_process = Float.T(default=0.) 

4468 t_perc_optimize = Float.T(default=0.) 

4469 t_perc_stack = Float.T(default=0.) 

4470 t_perc_static_get_store = Float.T(default=0.) 

4471 t_perc_static_discretize_basesource = Float.T(default=0.) 

4472 t_perc_static_sum_statics = Float.T(default=0.) 

4473 t_perc_static_post_process = Float.T(default=0.) 

4474 t_wallclock = Float.T(default=0.) 

4475 t_cpu = Float.T(default=0.) 

4476 n_read_blocks = Int.T(default=0) 

4477 n_results = Int.T(default=0) 

4478 n_subrequests = Int.T(default=0) 

4479 n_stores = Int.T(default=0) 

4480 n_records_stacked = Int.T(default=0) 

4481 

4482 

4483class Response(Object): 

4484 ''' 

4485 Resonse object to a synthetic seismogram computation request. 

4486 ''' 

4487 

4488 request = Request.T() 

4489 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4490 stats = ProcessingStats.T() 

4491 

4492 def pyrocko_traces(self): 

4493 ''' 

4494 Return a list of requested 

4495 :class:`~pyrocko.trace.Trace` instances. 

4496 ''' 

4497 

4498 traces = [] 

4499 for results in self.results_list: 

4500 for result in results: 

4501 if not isinstance(result, meta.Result): 

4502 continue 

4503 traces.append(result.trace.pyrocko_trace()) 

4504 

4505 return traces 

4506 

4507 def kite_scenes(self): 

4508 ''' 

4509 Return a list of requested 

4510 :class:`~kite.scenes` instances. 

4511 ''' 

4512 kite_scenes = [] 

4513 for results in self.results_list: 

4514 for result in results: 

4515 if isinstance(result, meta.KiteSceneResult): 

4516 sc = result.get_scene() 

4517 kite_scenes.append(sc) 

4518 

4519 return kite_scenes 

4520 

4521 def static_results(self): 

4522 ''' 

4523 Return a list of requested 

4524 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4525 ''' 

4526 statics = [] 

4527 for results in self.results_list: 

4528 for result in results: 

4529 if not isinstance(result, meta.StaticResult): 

4530 continue 

4531 statics.append(result) 

4532 

4533 return statics 

4534 

4535 def iter_results(self, get='pyrocko_traces'): 

4536 ''' 

4537 Generator function to iterate over results of request. 

4538 

4539 Yields associated :py:class:`Source`, 

4540 :class:`~pyrocko.gf.targets.Target`, 

4541 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4542 ''' 

4543 

4544 for isource, source in enumerate(self.request.sources): 

4545 for itarget, target in enumerate(self.request.targets): 

4546 result = self.results_list[isource][itarget] 

4547 if get == 'pyrocko_traces': 

4548 yield source, target, result.trace.pyrocko_trace() 

4549 elif get == 'results': 

4550 yield source, target, result 

4551 

4552 def snuffle(self, **kwargs): 

4553 ''' 

4554 Open *snuffler* with requested traces. 

4555 ''' 

4556 

4557 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4558 

4559 

4560class Engine(Object): 

4561 ''' 

4562 Base class for synthetic seismogram calculators. 

4563 ''' 

4564 

4565 def get_store_ids(self): 

4566 ''' 

4567 Get list of available GF store IDs 

4568 ''' 

4569 

4570 return [] 

4571 

4572 

4573class Rule(object): 

4574 pass 

4575 

4576 

4577class VectorRule(Rule): 

4578 

4579 def __init__(self, quantity, differentiate=0, integrate=0): 

4580 self.components = [quantity + '.' + c for c in 'ned'] 

4581 self.differentiate = differentiate 

4582 self.integrate = integrate 

4583 

4584 def required_components(self, target): 

4585 n, e, d = self.components 

4586 sa, ca, sd, cd = target.get_sin_cos_factors() 

4587 

4588 comps = [] 

4589 if nonzero(ca * cd): 

4590 comps.append(n) 

4591 

4592 if nonzero(sa * cd): 

4593 comps.append(e) 

4594 

4595 if nonzero(sd): 

4596 comps.append(d) 

4597 

4598 return tuple(comps) 

4599 

4600 def apply_(self, target, base_seismogram): 

4601 n, e, d = self.components 

4602 sa, ca, sd, cd = target.get_sin_cos_factors() 

4603 

4604 if nonzero(ca * cd): 

4605 data = base_seismogram[n].data * (ca * cd) 

4606 deltat = base_seismogram[n].deltat 

4607 else: 

4608 data = 0.0 

4609 

4610 if nonzero(sa * cd): 

4611 data = data + base_seismogram[e].data * (sa * cd) 

4612 deltat = base_seismogram[e].deltat 

4613 

4614 if nonzero(sd): 

4615 data = data + base_seismogram[d].data * sd 

4616 deltat = base_seismogram[d].deltat 

4617 

4618 if self.differentiate: 

4619 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4620 

4621 if self.integrate: 

4622 raise NotImplementedError('Integration is not implemented yet.') 

4623 

4624 return data 

4625 

4626 

4627class HorizontalVectorRule(Rule): 

4628 

4629 def __init__(self, quantity, differentiate=0, integrate=0): 

4630 self.components = [quantity + '.' + c for c in 'ne'] 

4631 self.differentiate = differentiate 

4632 self.integrate = integrate 

4633 

4634 def required_components(self, target): 

4635 n, e = self.components 

4636 sa, ca, _, _ = target.get_sin_cos_factors() 

4637 

4638 comps = [] 

4639 if nonzero(ca): 

4640 comps.append(n) 

4641 

4642 if nonzero(sa): 

4643 comps.append(e) 

4644 

4645 return tuple(comps) 

4646 

4647 def apply_(self, target, base_seismogram): 

4648 n, e = self.components 

4649 sa, ca, _, _ = target.get_sin_cos_factors() 

4650 

4651 if nonzero(ca): 

4652 data = base_seismogram[n].data * ca 

4653 else: 

4654 data = 0.0 

4655 

4656 if nonzero(sa): 

4657 data = data + base_seismogram[e].data * sa 

4658 

4659 if self.differentiate: 

4660 deltat = base_seismogram[e].deltat 

4661 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4662 

4663 if self.integrate: 

4664 raise NotImplementedError('Integration is not implemented yet.') 

4665 

4666 return data 

4667 

4668 

4669class ScalarRule(Rule): 

4670 

4671 def __init__(self, quantity, differentiate=0): 

4672 self.c = quantity 

4673 

4674 def required_components(self, target): 

4675 return (self.c, ) 

4676 

4677 def apply_(self, target, base_seismogram): 

4678 data = base_seismogram[self.c].data.copy() 

4679 deltat = base_seismogram[self.c].deltat 

4680 if self.differentiate: 

4681 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4682 

4683 return data 

4684 

4685 

4686class StaticDisplacement(Rule): 

4687 

4688 def required_components(self, target): 

4689 return tuple(['displacement.%s' % c for c in list('ned')]) 

4690 

4691 def apply_(self, target, base_statics): 

4692 if isinstance(target, SatelliteTarget): 

4693 los_fac = target.get_los_factors() 

4694 base_statics['displacement.los'] =\ 

4695 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4696 los_fac[:, 1] * base_statics['displacement.e'] + 

4697 los_fac[:, 2] * base_statics['displacement.n']) 

4698 return base_statics 

4699 

4700 

4701channel_rules = { 

4702 'displacement': [VectorRule('displacement')], 

4703 'rotation': [VectorRule('rotation')], 

4704 'velocity': [ 

4705 VectorRule('velocity'), 

4706 VectorRule('displacement', differentiate=1)], 

4707 'acceleration': [ 

4708 VectorRule('acceleration'), 

4709 VectorRule('velocity', differentiate=1), 

4710 VectorRule('displacement', differentiate=2)], 

4711 'pore_pressure': [ScalarRule('pore_pressure')], 

4712 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4713 'darcy_velocity': [VectorRule('darcy_velocity')], 

4714} 

4715 

4716static_rules = { 

4717 'displacement': [StaticDisplacement()] 

4718} 

4719 

4720 

4721class OutOfBoundsContext(Object): 

4722 source = Source.T() 

4723 target = Target.T() 

4724 distance = Float.T() 

4725 components = List.T(String.T()) 

4726 

4727 

4728def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4729 dsource_cache = {} 

4730 tcounters = list(range(6)) 

4731 

4732 store_ids = set() 

4733 sources = set() 

4734 targets = set() 

4735 

4736 for itarget, target in enumerate(ptargets): 

4737 target._id = itarget 

4738 

4739 for w in work: 

4740 _, _, isources, itargets = w 

4741 

4742 sources.update([psources[isource] for isource in isources]) 

4743 targets.update([ptargets[itarget] for itarget in itargets]) 

4744 

4745 store_ids = set([t.store_id for t in targets]) 

4746 

4747 for isource, source in enumerate(psources): 

4748 

4749 components = set() 

4750 for itarget, target in enumerate(targets): 

4751 rule = engine.get_rule(source, target) 

4752 components.update(rule.required_components(target)) 

4753 

4754 for store_id in store_ids: 

4755 store_targets = [t for t in targets if t.store_id == store_id] 

4756 

4757 sample_rates = set([t.sample_rate for t in store_targets]) 

4758 interpolations = set([t.interpolation for t in store_targets]) 

4759 

4760 base_seismograms = [] 

4761 store_targets_out = [] 

4762 

4763 for samp_rate in sample_rates: 

4764 for interp in interpolations: 

4765 engine_targets = [ 

4766 t for t in store_targets if t.sample_rate == samp_rate 

4767 and t.interpolation == interp] 

4768 

4769 if not engine_targets: 

4770 continue 

4771 

4772 store_targets_out += engine_targets 

4773 

4774 base_seismograms += engine.base_seismograms( 

4775 source, 

4776 engine_targets, 

4777 components, 

4778 dsource_cache, 

4779 nthreads) 

4780 

4781 for iseis, seismogram in enumerate(base_seismograms): 

4782 for tr in seismogram.values(): 

4783 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4784 e = SeismosizerError( 

4785 'Seismosizer failed with return code %i\n%s' % ( 

4786 tr.err, str( 

4787 OutOfBoundsContext( 

4788 source=source, 

4789 target=store_targets[iseis], 

4790 distance=source.distance_to( 

4791 store_targets[iseis]), 

4792 components=components)))) 

4793 raise e 

4794 

4795 for seismogram, target in zip(base_seismograms, store_targets_out): 

4796 

4797 try: 

4798 result = engine._post_process_dynamic( 

4799 seismogram, source, target) 

4800 except SeismosizerError as e: 

4801 result = e 

4802 

4803 yield (isource, target._id, result), tcounters 

4804 

4805 

4806def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4807 dsource_cache = {} 

4808 

4809 for w in work: 

4810 _, _, isources, itargets = w 

4811 

4812 sources = [psources[isource] for isource in isources] 

4813 targets = [ptargets[itarget] for itarget in itargets] 

4814 

4815 components = set() 

4816 for target in targets: 

4817 rule = engine.get_rule(sources[0], target) 

4818 components.update(rule.required_components(target)) 

4819 

4820 for isource, source in zip(isources, sources): 

4821 for itarget, target in zip(itargets, targets): 

4822 

4823 try: 

4824 base_seismogram, tcounters = engine.base_seismogram( 

4825 source, target, components, dsource_cache, nthreads) 

4826 except meta.OutOfBounds as e: 

4827 e.context = OutOfBoundsContext( 

4828 source=sources[0], 

4829 target=targets[0], 

4830 distance=sources[0].distance_to(targets[0]), 

4831 components=components) 

4832 raise 

4833 

4834 n_records_stacked = 0 

4835 t_optimize = 0.0 

4836 t_stack = 0.0 

4837 

4838 for _, tr in base_seismogram.items(): 

4839 n_records_stacked += tr.n_records_stacked 

4840 t_optimize += tr.t_optimize 

4841 t_stack += tr.t_stack 

4842 

4843 try: 

4844 result = engine._post_process_dynamic( 

4845 base_seismogram, source, target) 

4846 result.n_records_stacked = n_records_stacked 

4847 result.n_shared_stacking = len(sources) *\ 

4848 len(targets) 

4849 result.t_optimize = t_optimize 

4850 result.t_stack = t_stack 

4851 except SeismosizerError as e: 

4852 result = e 

4853 

4854 tcounters.append(xtime()) 

4855 yield (isource, itarget, result), tcounters 

4856 

4857 

4858def process_static(work, psources, ptargets, engine, nthreads=0): 

4859 for w in work: 

4860 _, _, isources, itargets = w 

4861 

4862 sources = [psources[isource] for isource in isources] 

4863 targets = [ptargets[itarget] for itarget in itargets] 

4864 

4865 for isource, source in zip(isources, sources): 

4866 for itarget, target in zip(itargets, targets): 

4867 components = engine.get_rule(source, target)\ 

4868 .required_components(target) 

4869 

4870 try: 

4871 base_statics, tcounters = engine.base_statics( 

4872 source, target, components, nthreads) 

4873 except meta.OutOfBounds as e: 

4874 e.context = OutOfBoundsContext( 

4875 source=sources[0], 

4876 target=targets[0], 

4877 distance=float('nan'), 

4878 components=components) 

4879 raise 

4880 result = engine._post_process_statics( 

4881 base_statics, source, target) 

4882 tcounters.append(xtime()) 

4883 

4884 yield (isource, itarget, result), tcounters 

4885 

4886 

4887class LocalEngine(Engine): 

4888 ''' 

4889 Offline synthetic seismogram calculator. 

4890 

4891 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4892 :py:attr:`store_dirs` with paths set in environment variables 

4893 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4894 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4895 :py:attr:`store_dirs` with paths set in the user's config file. 

4896 

4897 The config file can be found at :file:`~/.pyrocko/config.pf` 

4898 

4899 .. code-block :: python 

4900 

4901 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4902 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4903 ''' 

4904 

4905 store_superdirs = List.T( 

4906 String.T(), 

4907 help='directories which are searched for Green\'s function stores') 

4908 

4909 store_dirs = List.T( 

4910 String.T(), 

4911 help='additional individual Green\'s function store directories') 

4912 

4913 default_store_id = String.T( 

4914 optional=True, 

4915 help='default store ID to be used when a request does not provide ' 

4916 'one') 

4917 

4918 def __init__(self, **kwargs): 

4919 use_env = kwargs.pop('use_env', False) 

4920 use_config = kwargs.pop('use_config', False) 

4921 Engine.__init__(self, **kwargs) 

4922 if use_env: 

4923 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4924 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4925 if env_store_superdirs: 

4926 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4927 

4928 if env_store_dirs: 

4929 self.store_dirs.extend(env_store_dirs.split(':')) 

4930 

4931 if use_config: 

4932 c = config.config() 

4933 self.store_superdirs.extend(c.gf_store_superdirs) 

4934 self.store_dirs.extend(c.gf_store_dirs) 

4935 

4936 self._check_store_dirs_type() 

4937 self._id_to_store_dir = {} 

4938 self._open_stores = {} 

4939 self._effective_default_store_id = None 

4940 

4941 def _check_store_dirs_type(self): 

4942 for sdir in ['store_dirs', 'store_superdirs']: 

4943 if not isinstance(self.__getattribute__(sdir), list): 

4944 raise TypeError("{} of {} is not of type list".format( 

4945 sdir, self.__class__.__name__)) 

4946 

4947 def _get_store_id(self, store_dir): 

4948 store_ = store.Store(store_dir) 

4949 store_id = store_.config.id 

4950 store_.close() 

4951 return store_id 

4952 

4953 def _looks_like_store_dir(self, store_dir): 

4954 return os.path.isdir(store_dir) and \ 

4955 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4956 ('index', 'traces', 'config')) 

4957 

4958 def iter_store_dirs(self): 

4959 store_dirs = set() 

4960 for d in self.store_superdirs: 

4961 if not os.path.exists(d): 

4962 logger.warning('store_superdir not available: %s' % d) 

4963 continue 

4964 

4965 for entry in os.listdir(d): 

4966 store_dir = os.path.realpath(pjoin(d, entry)) 

4967 if self._looks_like_store_dir(store_dir): 

4968 store_dirs.add(store_dir) 

4969 

4970 for store_dir in self.store_dirs: 

4971 store_dirs.add(os.path.realpath(store_dir)) 

4972 

4973 return store_dirs 

4974 

4975 def _scan_stores(self): 

4976 for store_dir in self.iter_store_dirs(): 

4977 store_id = self._get_store_id(store_dir) 

4978 if store_id not in self._id_to_store_dir: 

4979 self._id_to_store_dir[store_id] = store_dir 

4980 else: 

4981 if store_dir != self._id_to_store_dir[store_id]: 

4982 raise DuplicateStoreId( 

4983 'GF store ID %s is used in (at least) two ' 

4984 'different stores. Locations are: %s and %s' % 

4985 (store_id, self._id_to_store_dir[store_id], store_dir)) 

4986 

4987 def get_store_dir(self, store_id): 

4988 ''' 

4989 Lookup directory given a GF store ID. 

4990 ''' 

4991 

4992 if store_id not in self._id_to_store_dir: 

4993 self._scan_stores() 

4994 

4995 if store_id not in self._id_to_store_dir: 

4996 raise NoSuchStore(store_id, self.iter_store_dirs()) 

4997 

4998 return self._id_to_store_dir[store_id] 

4999 

5000 def get_store_ids(self): 

5001 ''' 

5002 Get list of available store IDs. 

5003 ''' 

5004 

5005 self._scan_stores() 

5006 return sorted(self._id_to_store_dir.keys()) 

5007 

5008 def effective_default_store_id(self): 

5009 if self._effective_default_store_id is None: 

5010 if self.default_store_id is None: 

5011 store_ids = self.get_store_ids() 

5012 if len(store_ids) == 1: 

5013 self._effective_default_store_id = self.get_store_ids()[0] 

5014 else: 

5015 raise NoDefaultStoreSet() 

5016 else: 

5017 self._effective_default_store_id = self.default_store_id 

5018 

5019 return self._effective_default_store_id 

5020 

5021 def get_store(self, store_id=None): 

5022 ''' 

5023 Get a store from the engine. 

5024 

5025 :param store_id: identifier of the store (optional) 

5026 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5027 

5028 If no ``store_id`` is provided the store 

5029 associated with the :py:gattr:`default_store_id` is returned. 

5030 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5031 undefined. 

5032 ''' 

5033 

5034 if store_id is None: 

5035 store_id = self.effective_default_store_id() 

5036 

5037 if store_id not in self._open_stores: 

5038 store_dir = self.get_store_dir(store_id) 

5039 self._open_stores[store_id] = store.Store(store_dir) 

5040 

5041 return self._open_stores[store_id] 

5042 

5043 def get_store_config(self, store_id): 

5044 store = self.get_store(store_id) 

5045 return store.config 

5046 

5047 def get_store_extra(self, store_id, key): 

5048 store = self.get_store(store_id) 

5049 return store.get_extra(key) 

5050 

5051 def close_cashed_stores(self): 

5052 ''' 

5053 Close and remove ids from cashed stores. 

5054 ''' 

5055 store_ids = [] 

5056 for store_id, store_ in self._open_stores.items(): 

5057 store_.close() 

5058 store_ids.append(store_id) 

5059 

5060 for store_id in store_ids: 

5061 self._open_stores.pop(store_id) 

5062 

5063 def get_rule(self, source, target): 

5064 cprovided = self.get_store(target.store_id).get_provided_components() 

5065 

5066 if isinstance(target, StaticTarget): 

5067 quantity = target.quantity 

5068 available_rules = static_rules 

5069 elif isinstance(target, Target): 

5070 quantity = target.effective_quantity() 

5071 available_rules = channel_rules 

5072 

5073 try: 

5074 for rule in available_rules[quantity]: 

5075 cneeded = rule.required_components(target) 

5076 if all(c in cprovided for c in cneeded): 

5077 return rule 

5078 

5079 except KeyError: 

5080 pass 

5081 

5082 raise BadRequest( 

5083 'No rule to calculate "%s" with GFs from store "%s" ' 

5084 'for source model "%s".' % ( 

5085 target.effective_quantity(), 

5086 target.store_id, 

5087 source.__class__.__name__)) 

5088 

5089 def _cached_discretize_basesource(self, source, store, cache, target): 

5090 if (source, store) not in cache: 

5091 cache[source, store] = source.discretize_basesource(store, target) 

5092 

5093 return cache[source, store] 

5094 

5095 def base_seismograms(self, source, targets, components, dsource_cache, 

5096 nthreads=0): 

5097 

5098 target = targets[0] 

5099 

5100 interp = set([t.interpolation for t in targets]) 

5101 if len(interp) > 1: 

5102 raise BadRequest('Targets have different interpolation schemes.') 

5103 

5104 rates = set([t.sample_rate for t in targets]) 

5105 if len(rates) > 1: 

5106 raise BadRequest('Targets have different sample rates.') 

5107 

5108 store_ = self.get_store(target.store_id) 

5109 receivers = [t.receiver(store_) for t in targets] 

5110 

5111 if target.sample_rate is not None: 

5112 deltat = 1. / target.sample_rate 

5113 rate = target.sample_rate 

5114 else: 

5115 deltat = None 

5116 rate = store_.config.sample_rate 

5117 

5118 tmin = num.fromiter( 

5119 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5120 tmax = num.fromiter( 

5121 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5122 

5123 itmin = num.floor(tmin * rate).astype(num.int64) 

5124 itmax = num.ceil(tmax * rate).astype(num.int64) 

5125 nsamples = itmax - itmin + 1 

5126 

5127 mask = num.isnan(tmin) 

5128 itmin[mask] = 0 

5129 nsamples[mask] = -1 

5130 

5131 base_source = self._cached_discretize_basesource( 

5132 source, store_, dsource_cache, target) 

5133 

5134 base_seismograms = store_.calc_seismograms( 

5135 base_source, receivers, components, 

5136 deltat=deltat, 

5137 itmin=itmin, nsamples=nsamples, 

5138 interpolation=target.interpolation, 

5139 optimization=target.optimization, 

5140 nthreads=nthreads) 

5141 

5142 for i, base_seismogram in enumerate(base_seismograms): 

5143 base_seismograms[i] = store.make_same_span(base_seismogram) 

5144 

5145 return base_seismograms 

5146 

5147 def base_seismogram(self, source, target, components, dsource_cache, 

5148 nthreads): 

5149 

5150 tcounters = [xtime()] 

5151 

5152 store_ = self.get_store(target.store_id) 

5153 receiver = target.receiver(store_) 

5154 

5155 if target.tmin and target.tmax is not None: 

5156 rate = store_.config.sample_rate 

5157 itmin = int(num.floor(target.tmin * rate)) 

5158 itmax = int(num.ceil(target.tmax * rate)) 

5159 nsamples = itmax - itmin + 1 

5160 else: 

5161 itmin = None 

5162 nsamples = None 

5163 

5164 tcounters.append(xtime()) 

5165 base_source = self._cached_discretize_basesource( 

5166 source, store_, dsource_cache, target) 

5167 

5168 tcounters.append(xtime()) 

5169 

5170 if target.sample_rate is not None: 

5171 deltat = 1. / target.sample_rate 

5172 else: 

5173 deltat = None 

5174 

5175 base_seismogram = store_.seismogram( 

5176 base_source, receiver, components, 

5177 deltat=deltat, 

5178 itmin=itmin, nsamples=nsamples, 

5179 interpolation=target.interpolation, 

5180 optimization=target.optimization, 

5181 nthreads=nthreads) 

5182 

5183 tcounters.append(xtime()) 

5184 

5185 base_seismogram = store.make_same_span(base_seismogram) 

5186 

5187 tcounters.append(xtime()) 

5188 

5189 return base_seismogram, tcounters 

5190 

5191 def base_statics(self, source, target, components, nthreads): 

5192 tcounters = [xtime()] 

5193 store_ = self.get_store(target.store_id) 

5194 

5195 if target.tsnapshot is not None: 

5196 rate = store_.config.sample_rate 

5197 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5198 else: 

5199 itsnapshot = None 

5200 tcounters.append(xtime()) 

5201 

5202 base_source = source.discretize_basesource(store_, target=target) 

5203 

5204 tcounters.append(xtime()) 

5205 

5206 base_statics = store_.statics( 

5207 base_source, 

5208 target, 

5209 itsnapshot, 

5210 components, 

5211 target.interpolation, 

5212 nthreads) 

5213 

5214 tcounters.append(xtime()) 

5215 

5216 return base_statics, tcounters 

5217 

5218 def _post_process_dynamic(self, base_seismogram, source, target): 

5219 base_any = next(iter(base_seismogram.values())) 

5220 deltat = base_any.deltat 

5221 itmin = base_any.itmin 

5222 

5223 rule = self.get_rule(source, target) 

5224 data = rule.apply_(target, base_seismogram) 

5225 

5226 factor = source.get_factor() * target.get_factor() 

5227 if factor != 1.0: 

5228 data = data * factor 

5229 

5230 stf = source.effective_stf_post() 

5231 

5232 times, amplitudes = stf.discretize_t( 

5233 deltat, 0.0) 

5234 

5235 # repeat end point to prevent boundary effects 

5236 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5237 padded_data[:data.size] = data 

5238 padded_data[data.size:] = data[-1] 

5239 data = num.convolve(amplitudes, padded_data) 

5240 

5241 tmin = itmin * deltat + times[0] 

5242 

5243 tr = meta.SeismosizerTrace( 

5244 codes=target.codes, 

5245 data=data[:-amplitudes.size], 

5246 deltat=deltat, 

5247 tmin=tmin) 

5248 

5249 return target.post_process(self, source, tr) 

5250 

5251 def _post_process_statics(self, base_statics, source, starget): 

5252 rule = self.get_rule(source, starget) 

5253 data = rule.apply_(starget, base_statics) 

5254 

5255 factor = source.get_factor() 

5256 if factor != 1.0: 

5257 for v in data.values(): 

5258 v *= factor 

5259 

5260 return starget.post_process(self, source, base_statics) 

5261 

5262 def process(self, *args, **kwargs): 

5263 ''' 

5264 Process a request. 

5265 

5266 :: 

5267 

5268 process(**kwargs) 

5269 process(request, **kwargs) 

5270 process(sources, targets, **kwargs) 

5271 

5272 The request can be given a a :py:class:`Request` object, or such an 

5273 object is created using ``Request(**kwargs)`` for convenience. 

5274 

5275 :returns: :py:class:`Response` object 

5276 ''' 

5277 

5278 if len(args) not in (0, 1, 2): 

5279 raise BadRequest('Invalid arguments.') 

5280 

5281 if len(args) == 1: 

5282 kwargs['request'] = args[0] 

5283 

5284 elif len(args) == 2: 

5285 kwargs.update(Request.args2kwargs(args)) 

5286 

5287 request = kwargs.pop('request', None) 

5288 status_callback = kwargs.pop('status_callback', None) 

5289 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5290 

5291 nprocs = kwargs.pop('nprocs', None) 

5292 nthreads = kwargs.pop('nthreads', 1) 

5293 if nprocs is not None: 

5294 nthreads = nprocs 

5295 

5296 if request is None: 

5297 request = Request(**kwargs) 

5298 

5299 if resource: 

5300 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5301 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5302 tt0 = xtime() 

5303 

5304 # make sure stores are open before fork() 

5305 store_ids = set(target.store_id for target in request.targets) 

5306 for store_id in store_ids: 

5307 self.get_store(store_id) 

5308 

5309 source_index = dict((x, i) for (i, x) in 

5310 enumerate(request.sources)) 

5311 target_index = dict((x, i) for (i, x) in 

5312 enumerate(request.targets)) 

5313 

5314 m = request.subrequest_map() 

5315 

5316 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5317 results_list = [] 

5318 

5319 for i in range(len(request.sources)): 

5320 results_list.append([None] * len(request.targets)) 

5321 

5322 tcounters_dyn_list = [] 

5323 tcounters_static_list = [] 

5324 nsub = len(skeys) 

5325 isub = 0 

5326 

5327 # Processing dynamic targets through 

5328 # parimap(process_subrequest_dynamic) 

5329 

5330 if calc_timeseries: 

5331 _process_dynamic = process_dynamic_timeseries 

5332 else: 

5333 _process_dynamic = process_dynamic 

5334 

5335 if request.has_dynamic: 

5336 work_dynamic = [ 

5337 (i, nsub, 

5338 [source_index[source] for source in m[k][0]], 

5339 [target_index[target] for target in m[k][1] 

5340 if not isinstance(target, StaticTarget)]) 

5341 for (i, k) in enumerate(skeys)] 

5342 

5343 for ii_results, tcounters_dyn in _process_dynamic( 

5344 work_dynamic, request.sources, request.targets, self, 

5345 nthreads): 

5346 

5347 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5348 isource, itarget, result = ii_results 

5349 results_list[isource][itarget] = result 

5350 

5351 if status_callback: 

5352 status_callback(isub, nsub) 

5353 

5354 isub += 1 

5355 

5356 # Processing static targets through process_static 

5357 if request.has_statics: 

5358 work_static = [ 

5359 (i, nsub, 

5360 [source_index[source] for source in m[k][0]], 

5361 [target_index[target] for target in m[k][1] 

5362 if isinstance(target, StaticTarget)]) 

5363 for (i, k) in enumerate(skeys)] 

5364 

5365 for ii_results, tcounters_static in process_static( 

5366 work_static, request.sources, request.targets, self, 

5367 nthreads=nthreads): 

5368 

5369 tcounters_static_list.append(num.diff(tcounters_static)) 

5370 isource, itarget, result = ii_results 

5371 results_list[isource][itarget] = result 

5372 

5373 if status_callback: 

5374 status_callback(isub, nsub) 

5375 

5376 isub += 1 

5377 

5378 if status_callback: 

5379 status_callback(nsub, nsub) 

5380 

5381 tt1 = time.time() 

5382 if resource: 

5383 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5384 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5385 

5386 s = ProcessingStats() 

5387 

5388 if request.has_dynamic: 

5389 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5390 t_dyn = float(num.sum(tcumu_dyn)) 

5391 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5392 (s.t_perc_get_store_and_receiver, 

5393 s.t_perc_discretize_source, 

5394 s.t_perc_make_base_seismogram, 

5395 s.t_perc_make_same_span, 

5396 s.t_perc_post_process) = perc_dyn 

5397 else: 

5398 t_dyn = 0. 

5399 

5400 if request.has_statics: 

5401 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5402 t_static = num.sum(tcumu_static) 

5403 perc_static = map(float, tcumu_static / t_static * 100.) 

5404 (s.t_perc_static_get_store, 

5405 s.t_perc_static_discretize_basesource, 

5406 s.t_perc_static_sum_statics, 

5407 s.t_perc_static_post_process) = perc_static 

5408 

5409 s.t_wallclock = tt1 - tt0 

5410 if resource: 

5411 s.t_cpu = ( 

5412 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5413 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5414 s.n_read_blocks = ( 

5415 (rs1.ru_inblock + rc1.ru_inblock) - 

5416 (rs0.ru_inblock + rc0.ru_inblock)) 

5417 

5418 n_records_stacked = 0. 

5419 for results in results_list: 

5420 for result in results: 

5421 if not isinstance(result, meta.Result): 

5422 continue 

5423 shr = float(result.n_shared_stacking) 

5424 n_records_stacked += result.n_records_stacked / shr 

5425 s.t_perc_optimize += result.t_optimize / shr 

5426 s.t_perc_stack += result.t_stack / shr 

5427 s.n_records_stacked = int(n_records_stacked) 

5428 if t_dyn != 0.: 

5429 s.t_perc_optimize /= t_dyn * 100 

5430 s.t_perc_stack /= t_dyn * 100 

5431 

5432 return Response( 

5433 request=request, 

5434 results_list=results_list, 

5435 stats=s) 

5436 

5437 

5438class RemoteEngine(Engine): 

5439 ''' 

5440 Client for remote synthetic seismogram calculator. 

5441 ''' 

5442 

5443 site = String.T(default=ws.g_default_site, optional=True) 

5444 url = String.T(default=ws.g_url, optional=True) 

5445 

5446 def process(self, request=None, status_callback=None, **kwargs): 

5447 

5448 if request is None: 

5449 request = Request(**kwargs) 

5450 

5451 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5452 

5453 

5454g_engine = None 

5455 

5456 

5457def get_engine(store_superdirs=[]): 

5458 global g_engine 

5459 if g_engine is None: 

5460 g_engine = LocalEngine(use_env=True, use_config=True) 

5461 

5462 for d in store_superdirs: 

5463 if d not in g_engine.store_superdirs: 

5464 g_engine.store_superdirs.append(d) 

5465 

5466 return g_engine 

5467 

5468 

5469class SourceGroup(Object): 

5470 

5471 def __getattr__(self, k): 

5472 return num.fromiter((getattr(s, k) for s in self), 

5473 dtype=float) 

5474 

5475 def __iter__(self): 

5476 raise NotImplementedError( 

5477 'This method should be implemented in subclass.') 

5478 

5479 def __len__(self): 

5480 raise NotImplementedError( 

5481 'This method should be implemented in subclass.') 

5482 

5483 

5484class SourceList(SourceGroup): 

5485 sources = List.T(Source.T()) 

5486 

5487 def append(self, s): 

5488 self.sources.append(s) 

5489 

5490 def __iter__(self): 

5491 return iter(self.sources) 

5492 

5493 def __len__(self): 

5494 return len(self.sources) 

5495 

5496 

5497class SourceGrid(SourceGroup): 

5498 

5499 base = Source.T() 

5500 variables = Dict.T(String.T(), Range.T()) 

5501 order = List.T(String.T()) 

5502 

5503 def __len__(self): 

5504 n = 1 

5505 for (k, v) in self.make_coords(self.base): 

5506 n *= len(list(v)) 

5507 

5508 return n 

5509 

5510 def __iter__(self): 

5511 for items in permudef(self.make_coords(self.base)): 

5512 s = self.base.clone(**{k: v for (k, v) in items}) 

5513 s.regularize() 

5514 yield s 

5515 

5516 def ordered_params(self): 

5517 ks = list(self.variables.keys()) 

5518 for k in self.order + list(self.base.keys()): 

5519 if k in ks: 

5520 yield k 

5521 ks.remove(k) 

5522 if ks: 

5523 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5524 (ks[0], self.base.__class__.__name__)) 

5525 

5526 def make_coords(self, base): 

5527 return [(param, self.variables[param].make(base=base[param])) 

5528 for param in self.ordered_params()] 

5529 

5530 

5531source_classes = [ 

5532 Source, 

5533 SourceWithMagnitude, 

5534 SourceWithDerivedMagnitude, 

5535 ExplosionSource, 

5536 RectangularExplosionSource, 

5537 DCSource, 

5538 CLVDSource, 

5539 VLVDSource, 

5540 MTSource, 

5541 RectangularSource, 

5542 PseudoDynamicRupture, 

5543 DoubleDCSource, 

5544 RingfaultSource, 

5545 CombiSource, 

5546 SFSource, 

5547 PorePressurePointSource, 

5548 PorePressureLineSource, 

5549] 

5550 

5551stf_classes = [ 

5552 STF, 

5553 BoxcarSTF, 

5554 TriangularSTF, 

5555 HalfSinusoidSTF, 

5556 ResonatorSTF, 

5557] 

5558 

5559__all__ = ''' 

5560SeismosizerError 

5561BadRequest 

5562NoSuchStore 

5563DerivedMagnitudeError 

5564STFMode 

5565'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5566Request 

5567ProcessingStats 

5568Response 

5569Engine 

5570LocalEngine 

5571RemoteEngine 

5572source_classes 

5573get_engine 

5574Range 

5575SourceGroup 

5576SourceList 

5577SourceGrid 

5578map_anchor 

5579'''.split()