1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28 

29from __future__ import absolute_import, division, print_function 

30 

31from collections import defaultdict 

32from functools import cmp_to_key 

33import time 

34import math 

35import os 

36import re 

37import logging 

38try: 

39 import resource 

40except ImportError: 

41 resource = None 

42from hashlib import sha1 

43 

44import numpy as num 

45from scipy.interpolate import RegularGridInterpolator 

46 

47from pyrocko.guts import (Object, Float, String, StringChoice, List, 

48 Timestamp, Int, SObject, ArgumentError, Dict, 

49 ValidationError, Bool) 

50from pyrocko.guts_array import Array 

51 

52from pyrocko import moment_tensor as pmt 

53from pyrocko import trace, util, config, model, eikonal_ext 

54from pyrocko.orthodrome import ne_to_latlon 

55from pyrocko.model import Location 

56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

57 okada_ext, invert_fault_dislocations_bem 

58 

59from . import meta, store, ws 

60from .tractions import TractionField, DirectedTractions 

61from .targets import Target, StaticTarget, SatelliteTarget 

62 

63pjoin = os.path.join 

64 

65guts_prefix = 'pf' 

66 

67d2r = math.pi / 180. 

68r2d = 180. / math.pi 

69km = 1e3 

70 

71logger = logging.getLogger('pyrocko.gf.seismosizer') 

72 

73 

74def cmp_none_aware(a, b): 

75 if isinstance(a, tuple) and isinstance(b, tuple): 

76 for xa, xb in zip(a, b): 

77 rv = cmp_none_aware(xa, xb) 

78 if rv != 0: 

79 return rv 

80 

81 return 0 

82 

83 anone = a is None 

84 bnone = b is None 

85 

86 if anone and bnone: 

87 return 0 

88 

89 if anone: 

90 return -1 

91 

92 if bnone: 

93 return 1 

94 

95 return bool(a > b) - bool(a < b) 

96 

97 

98def xtime(): 

99 return time.time() 

100 

101 

102class SeismosizerError(Exception): 

103 pass 

104 

105 

106class BadRequest(SeismosizerError): 

107 pass 

108 

109 

110class DuplicateStoreId(Exception): 

111 pass 

112 

113 

114class NoDefaultStoreSet(Exception): 

115 pass 

116 

117 

118class ConversionError(Exception): 

119 pass 

120 

121 

122class NoSuchStore(BadRequest): 

123 

124 def __init__(self, store_id=None, dirs=None): 

125 BadRequest.__init__(self) 

126 self.store_id = store_id 

127 self.dirs = dirs 

128 

129 def __str__(self): 

130 if self.store_id is not None: 

131 rstr = 'no GF store with id "%s" found.' % self.store_id 

132 else: 

133 rstr = 'GF store not found.' 

134 

135 if self.dirs is not None: 

136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

137 return rstr 

138 

139 

140def ufloat(s): 

141 units = { 

142 'k': 1e3, 

143 'M': 1e6, 

144 } 

145 

146 factor = 1.0 

147 if s and s[-1] in units: 

148 factor = units[s[-1]] 

149 s = s[:-1] 

150 if not s: 

151 raise ValueError('unit without a number: \'%s\'' % s) 

152 

153 return float(s) * factor 

154 

155 

156def ufloat_or_none(s): 

157 if s: 

158 return ufloat(s) 

159 else: 

160 return None 

161 

162 

163def int_or_none(s): 

164 if s: 

165 return int(s) 

166 else: 

167 return None 

168 

169 

170def nonzero(x, eps=1e-15): 

171 return abs(x) > eps 

172 

173 

174def permudef(ln, j=0): 

175 if j < len(ln): 

176 k, v = ln[j] 

177 for y in v: 

178 ln[j] = k, y 

179 for s in permudef(ln, j + 1): 

180 yield s 

181 

182 ln[j] = k, v 

183 return 

184 else: 

185 yield ln 

186 

187 

188def arr(x): 

189 return num.atleast_1d(num.asarray(x)) 

190 

191 

192def discretize_rect_source(deltas, deltat, time, north, east, depth, 

193 strike, dip, length, width, 

194 anchor, velocity=None, stf=None, 

195 nucleation_x=None, nucleation_y=None, 

196 decimation_factor=1, pointsonly=False, 

197 plane_coords=False, 

198 aggressive_oversampling=False): 

199 

200 if stf is None: 

201 stf = STF() 

202 

203 if not velocity and not pointsonly: 

204 raise AttributeError('velocity is required in time mode') 

205 

206 mindeltagf = float(num.min(deltas)) 

207 if velocity: 

208 mindeltagf = min(mindeltagf, deltat * velocity) 

209 

210 ln = length 

211 wd = width 

212 

213 if aggressive_oversampling: 

214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 else: 

217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

219 

220 n = int(nl * nw) 

221 

222 dl = ln / nl 

223 dw = wd / nw 

224 

225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

227 

228 points = num.zeros((n, 3), dtype=num.float) 

229 points[:, 0] = num.tile(xl, nw) 

230 points[:, 1] = num.repeat(xw, nl) 

231 

232 if nucleation_x is not None: 

233 dist_x = num.abs(nucleation_x - points[:, 0]) 

234 else: 

235 dist_x = num.zeros(n) 

236 

237 if nucleation_y is not None: 

238 dist_y = num.abs(nucleation_y - points[:, 1]) 

239 else: 

240 dist_y = num.zeros(n) 

241 

242 dist = num.sqrt(dist_x**2 + dist_y**2) 

243 times = dist / velocity 

244 

245 anch_x, anch_y = map_anchor[anchor] 

246 

247 points[:, 0] -= anch_x * 0.5 * length 

248 points[:, 1] -= anch_y * 0.5 * width 

249 

250 if plane_coords: 

251 return points, dl, dw, nl, nw 

252 

253 rotmat = num.asarray( 

254 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

255 points = num.dot(rotmat.T, points.T).T 

256 

257 points[:, 0] += north 

258 points[:, 1] += east 

259 points[:, 2] += depth 

260 

261 if pointsonly: 

262 return points, dl, dw, nl, nw 

263 

264 xtau, amplitudes = stf.discretize_t(deltat, time) 

265 nt = xtau.size 

266 

267 points2 = num.repeat(points, nt, axis=0) 

268 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

269 amplitudes2 = num.tile(amplitudes, n) 

270 

271 return points2, times2, amplitudes2, dl, dw, nl, nw 

272 

273 

274def check_rect_source_discretisation(points2, nl, nw, store): 

275 # We assume a non-rotated fault plane 

276 N_CRITICAL = 8 

277 points = points2.T.reshape((3, nl, nw)) 

278 if points.size <= N_CRITICAL: 

279 logger.warning('RectangularSource is defined by only %d sub-sources!' 

280 % points.size) 

281 return True 

282 

283 distances = num.sqrt( 

284 (points[0, 0, :] - points[0, 1, :])**2 + 

285 (points[1, 0, :] - points[1, 1, :])**2 + 

286 (points[2, 0, :] - points[2, 1, :])**2) 

287 

288 depths = points[2, 0, :] 

289 vs_profile = store.config.get_vs( 

290 lat=0., lon=0., 

291 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

292 interpolation='multilinear') 

293 

294 min_wavelength = vs_profile * (store.config.deltat * 2) 

295 if not num.all(min_wavelength > distances / 2): 

296 return False 

297 return True 

298 

299 

300def outline_rect_source(strike, dip, length, width, anchor): 

301 ln = length 

302 wd = width 

303 points = num.array( 

304 [[-0.5 * ln, -0.5 * wd, 0.], 

305 [0.5 * ln, -0.5 * wd, 0.], 

306 [0.5 * ln, 0.5 * wd, 0.], 

307 [-0.5 * ln, 0.5 * wd, 0.], 

308 [-0.5 * ln, -0.5 * wd, 0.]]) 

309 

310 anch_x, anch_y = map_anchor[anchor] 

311 points[:, 0] -= anch_x * 0.5 * length 

312 points[:, 1] -= anch_y * 0.5 * width 

313 

314 rotmat = num.asarray( 

315 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

316 

317 return num.dot(rotmat.T, points.T).T 

318 

319 

320def from_plane_coords( 

321 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

322 lat=0., lon=0., 

323 north_shift=0, east_shift=0, 

324 anchor='top', cs='xy'): 

325 

326 ln = length 

327 wd = width 

328 x_abs = [] 

329 y_abs = [] 

330 if not isinstance(x_plane_coords, list): 

331 x_plane_coords = [x_plane_coords] 

332 y_plane_coords = [y_plane_coords] 

333 

334 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

335 points = num.array( 

336 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

337 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

338 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

339 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

340 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

341 

342 anch_x, anch_y = map_anchor[anchor] 

343 points[:, 0] -= anch_x * 0.5 * length 

344 points[:, 1] -= anch_y * 0.5 * width 

345 

346 rotmat = num.asarray( 

347 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

348 

349 points = num.dot(rotmat.T, points.T).T 

350 points[:, 0] += north_shift 

351 points[:, 1] += east_shift 

352 points[:, 2] += depth 

353 if cs in ('latlon', 'lonlat'): 

354 latlon = ne_to_latlon(lat, lon, 

355 points[:, 0], points[:, 1]) 

356 latlon = num.array(latlon).T 

357 x_abs.append(latlon[1:2, 1]) 

358 y_abs.append(latlon[2:3, 0]) 

359 if cs == 'xy': 

360 x_abs.append(points[1:2, 1]) 

361 y_abs.append(points[2:3, 0]) 

362 

363 if cs == 'lonlat': 

364 return y_abs, x_abs 

365 else: 

366 return x_abs, y_abs 

367 

368 

369def points_on_rect_source( 

370 strike, dip, length, width, anchor, 

371 discretized_basesource=None, points_x=None, points_y=None): 

372 

373 ln = length 

374 wd = width 

375 

376 if isinstance(points_x, list) or isinstance(points_x, float): 

377 points_x = num.array([points_x]) 

378 if isinstance(points_y, list) or isinstance(points_y, float): 

379 points_y = num.array([points_y]) 

380 

381 if discretized_basesource: 

382 ds = discretized_basesource 

383 

384 nl_patches = ds.nl + 1 

385 nw_patches = ds.nw + 1 

386 

387 npoints = nl_patches * nw_patches 

388 points = num.zeros((npoints, 3)) 

389 ln_patches = num.array([il for il in range(nl_patches)]) 

390 wd_patches = num.array([iw for iw in range(nw_patches)]) 

391 

392 points_ln =\ 

393 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

394 points_wd =\ 

395 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

396 

397 for il in range(nl_patches): 

398 for iw in range(nw_patches): 

399 points[il * nw_patches + iw, :] = num.array([ 

400 points_ln[il] * ln * 0.5, 

401 points_wd[iw] * wd * 0.5, 0.0]) 

402 

403 elif points_x.any() and points_y.any(): 

404 points = num.zeros(shape=((len(points_x), 3))) 

405 for i, (x, y) in enumerate(zip(points_x, points_y)): 

406 points[i, :] = num.array( 

407 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

408 

409 anch_x, anch_y = map_anchor[anchor] 

410 

411 points[:, 0] -= anch_x * 0.5 * ln 

412 points[:, 1] -= anch_y * 0.5 * wd 

413 

414 rotmat = num.asarray( 

415 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

416 

417 return num.dot(rotmat.T, points.T).T 

418 

419 

420class InvalidGridDef(Exception): 

421 pass 

422 

423 

424class Range(SObject): 

425 ''' 

426 Convenient range specification. 

427 

428 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

429 

430 Range('0 .. 10k : 1k') 

431 Range(start=0., stop=10e3, step=1e3) 

432 Range(0, 10e3, 1e3) 

433 Range('0 .. 10k @ 11') 

434 Range(start=0., stop=10*km, n=11) 

435 

436 Range(0, 10e3, n=11) 

437 Range(values=[x*1e3 for x in range(11)]) 

438 

439 Depending on the use context, it can be possible to omit any part of the 

440 specification. E.g. in the context of extracting a subset of an already 

441 existing range, the existing range's specification values would be filled 

442 in where missing. 

443 

444 The values are distributed with equal spacing, unless the ``spacing`` 

445 argument is modified. The values can be created offset or relative to an 

446 external base value with the ``relative`` argument if the use context 

447 supports this. 

448 

449 The range specification can be expressed with a short string 

450 representation:: 

451 

452 'start .. stop @ num | spacing, relative' 

453 'start .. stop : step | spacing, relative' 

454 

455 most parts of the expression can be omitted if not needed. Whitespace is 

456 allowed for readability but can also be omitted. 

457 ''' 

458 

459 start = Float.T(optional=True) 

460 stop = Float.T(optional=True) 

461 step = Float.T(optional=True) 

462 n = Int.T(optional=True) 

463 values = Array.T(optional=True, dtype=float, shape=(None,)) 

464 

465 spacing = StringChoice.T( 

466 choices=['lin', 'log', 'symlog'], 

467 default='lin', 

468 optional=True) 

469 

470 relative = StringChoice.T( 

471 choices=['', 'add', 'mult'], 

472 default='', 

473 optional=True) 

474 

475 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

476 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

477 r'(\|(?P<stuff>.+))?$') 

478 

479 def __init__(self, *args, **kwargs): 

480 d = {} 

481 if len(args) == 1: 

482 d = self.parse(args[0]) 

483 elif len(args) in (2, 3): 

484 d['start'], d['stop'] = [float(x) for x in args[:2]] 

485 if len(args) == 3: 

486 d['step'] = float(args[2]) 

487 

488 for k, v in kwargs.items(): 

489 if k in d: 

490 raise ArgumentError('%s specified more than once' % k) 

491 

492 d[k] = v 

493 

494 SObject.__init__(self, **d) 

495 

496 def __str__(self): 

497 def sfloat(x): 

498 if x is not None: 

499 return '%g' % x 

500 else: 

501 return '' 

502 

503 if self.values: 

504 return ','.join('%g' % x for x in self.values) 

505 

506 if self.start is None and self.stop is None: 

507 s0 = '' 

508 else: 

509 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

510 

511 s1 = '' 

512 if self.step is not None: 

513 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

514 elif self.n is not None: 

515 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

516 

517 if self.spacing == 'lin' and self.relative == '': 

518 s2 = '' 

519 else: 

520 x = [] 

521 if self.spacing != 'lin': 

522 x.append(self.spacing) 

523 

524 if self.relative != '': 

525 x.append(self.relative) 

526 

527 s2 = ' | %s' % ','.join(x) 

528 

529 return s0 + s1 + s2 

530 

531 @classmethod 

532 def parse(cls, s): 

533 s = re.sub(r'\s+', '', s) 

534 m = cls.pattern.match(s) 

535 if not m: 

536 try: 

537 vals = [ufloat(x) for x in s.split(',')] 

538 except Exception: 

539 raise InvalidGridDef( 

540 '"%s" is not a valid range specification' % s) 

541 

542 return dict(values=num.array(vals, dtype=float)) 

543 

544 d = m.groupdict() 

545 try: 

546 start = ufloat_or_none(d['start']) 

547 stop = ufloat_or_none(d['stop']) 

548 step = ufloat_or_none(d['step']) 

549 n = int_or_none(d['n']) 

550 except Exception: 

551 raise InvalidGridDef( 

552 '"%s" is not a valid range specification' % s) 

553 

554 spacing = 'lin' 

555 relative = '' 

556 

557 if d['stuff'] is not None: 

558 t = d['stuff'].split(',') 

559 for x in t: 

560 if x in cls.spacing.choices: 

561 spacing = x 

562 elif x and x in cls.relative.choices: 

563 relative = x 

564 else: 

565 raise InvalidGridDef( 

566 '"%s" is not a valid range specification' % s) 

567 

568 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

569 relative=relative) 

570 

571 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

572 if self.values: 

573 return self.values 

574 

575 start = self.start 

576 stop = self.stop 

577 step = self.step 

578 n = self.n 

579 

580 swap = step is not None and step < 0. 

581 if start is None: 

582 start = [mi, ma][swap] 

583 if stop is None: 

584 stop = [ma, mi][swap] 

585 if step is None and inc is not None: 

586 step = [inc, -inc][ma < mi] 

587 

588 if start is None or stop is None: 

589 raise InvalidGridDef( 

590 'Cannot use range specification "%s" without start ' 

591 'and stop in this context' % self) 

592 

593 if step is None and n is None: 

594 step = stop - start 

595 

596 if n is None: 

597 if (step < 0) != (stop - start < 0): 

598 raise InvalidGridDef( 

599 'Range specification "%s" has inconsistent ordering ' 

600 '(step < 0 => stop > start)' % self) 

601 

602 n = int(round((stop - start) / step)) + 1 

603 stop2 = start + (n - 1) * step 

604 if abs(stop - stop2) > eps: 

605 n = int(math.floor((stop - start) / step)) + 1 

606 stop = start + (n - 1) * step 

607 else: 

608 stop = stop2 

609 

610 if start == stop: 

611 n = 1 

612 

613 if self.spacing == 'lin': 

614 vals = num.linspace(start, stop, n) 

615 

616 elif self.spacing in ('log', 'symlog'): 

617 if start > 0. and stop > 0.: 

618 vals = num.exp(num.linspace(num.log(start), 

619 num.log(stop), n)) 

620 elif start < 0. and stop < 0.: 

621 vals = -num.exp(num.linspace(num.log(-start), 

622 num.log(-stop), n)) 

623 else: 

624 raise InvalidGridDef( 

625 'Log ranges should not include or cross zero ' 

626 '(in range specification "%s").' % self) 

627 

628 if self.spacing == 'symlog': 

629 nvals = - vals 

630 vals = num.concatenate((nvals[::-1], vals)) 

631 

632 if self.relative in ('add', 'mult') and base is None: 

633 raise InvalidGridDef( 

634 'Cannot use relative range specification in this context.') 

635 

636 vals = self.make_relative(base, vals) 

637 

638 return list(map(float, vals)) 

639 

640 def make_relative(self, base, vals): 

641 if self.relative == 'add': 

642 vals += base 

643 

644 if self.relative == 'mult': 

645 vals *= base 

646 

647 return vals 

648 

649 

650class GridDefElement(Object): 

651 

652 param = meta.StringID.T() 

653 rs = Range.T() 

654 

655 def __init__(self, shorthand=None, **kwargs): 

656 if shorthand is not None: 

657 t = shorthand.split('=') 

658 if len(t) != 2: 

659 raise InvalidGridDef( 

660 'Invalid grid specification element: %s' % shorthand) 

661 

662 sp, sr = t[0].strip(), t[1].strip() 

663 

664 kwargs['param'] = sp 

665 kwargs['rs'] = Range(sr) 

666 

667 Object.__init__(self, **kwargs) 

668 

669 def shorthand(self): 

670 return self.param + ' = ' + str(self.rs) 

671 

672 

673class GridDef(Object): 

674 

675 elements = List.T(GridDefElement.T()) 

676 

677 def __init__(self, shorthand=None, **kwargs): 

678 if shorthand is not None: 

679 t = shorthand.splitlines() 

680 tt = [] 

681 for x in t: 

682 x = x.strip() 

683 if x: 

684 tt.extend(x.split(';')) 

685 

686 elements = [] 

687 for se in tt: 

688 elements.append(GridDef(se)) 

689 

690 kwargs['elements'] = elements 

691 

692 Object.__init__(self, **kwargs) 

693 

694 def shorthand(self): 

695 return '; '.join(str(x) for x in self.elements) 

696 

697 

698class Cloneable(object): 

699 

700 def __iter__(self): 

701 return iter(self.T.propnames) 

702 

703 def __getitem__(self, k): 

704 if k not in self.keys(): 

705 raise KeyError(k) 

706 

707 return getattr(self, k) 

708 

709 def __setitem__(self, k, v): 

710 if k not in self.keys(): 

711 raise KeyError(k) 

712 

713 return setattr(self, k, v) 

714 

715 def clone(self, **kwargs): 

716 ''' 

717 Make a copy of the object. 

718 

719 A new object of the same class is created and initialized with the 

720 parameters of the object on which this method is called on. If 

721 ``kwargs`` are given, these are used to override any of the 

722 initialization parameters. 

723 ''' 

724 

725 d = dict(self) 

726 for k in d: 

727 v = d[k] 

728 if isinstance(v, Cloneable): 

729 d[k] = v.clone() 

730 

731 d.update(kwargs) 

732 return self.__class__(**d) 

733 

734 @classmethod 

735 def keys(cls): 

736 ''' 

737 Get list of the source model's parameter names. 

738 ''' 

739 

740 return cls.T.propnames 

741 

742 

743class STF(Object, Cloneable): 

744 

745 ''' 

746 Base class for source time functions. 

747 ''' 

748 

749 def __init__(self, effective_duration=None, **kwargs): 

750 if effective_duration is not None: 

751 kwargs['duration'] = effective_duration / \ 

752 self.factor_duration_to_effective() 

753 

754 Object.__init__(self, **kwargs) 

755 

756 @classmethod 

757 def factor_duration_to_effective(cls): 

758 return 1.0 

759 

760 def centroid_time(self, tref): 

761 return tref 

762 

763 @property 

764 def effective_duration(self): 

765 return self.duration * self.factor_duration_to_effective() 

766 

767 def discretize_t(self, deltat, tref): 

768 tl = math.floor(tref / deltat) * deltat 

769 th = math.ceil(tref / deltat) * deltat 

770 if tl == th: 

771 return num.array([tl], dtype=float), num.ones(1) 

772 else: 

773 return ( 

774 num.array([tl, th], dtype=float), 

775 num.array([th - tref, tref - tl], dtype=float) / deltat) 

776 

777 def base_key(self): 

778 return (type(self).__name__,) 

779 

780 

781g_unit_pulse = STF() 

782 

783 

784def sshift(times, amplitudes, tshift, deltat): 

785 

786 t0 = math.floor(tshift / deltat) * deltat 

787 t1 = math.ceil(tshift / deltat) * deltat 

788 if t0 == t1: 

789 return times, amplitudes 

790 

791 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

792 

793 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

794 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

795 

796 times2 = num.arange(times.size + 1, dtype=float) * \ 

797 deltat + times[0] + t0 

798 

799 return times2, amplitudes2 

800 

801 

802class BoxcarSTF(STF): 

803 

804 ''' 

805 Boxcar type source time function. 

806 

807 .. figure :: /static/stf-BoxcarSTF.svg 

808 :width: 40% 

809 :align: center 

810 :alt: boxcar source time function 

811 ''' 

812 

813 duration = Float.T( 

814 default=0.0, 

815 help='duration of the boxcar') 

816 

817 anchor = Float.T( 

818 default=0.0, 

819 help='anchor point with respect to source.time: (' 

820 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

821 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

822 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

823 

824 @classmethod 

825 def factor_duration_to_effective(cls): 

826 return 1.0 

827 

828 def centroid_time(self, tref): 

829 return tref - 0.5 * self.duration * self.anchor 

830 

831 def discretize_t(self, deltat, tref): 

832 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

833 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

834 tmin = round(tmin_stf / deltat) * deltat 

835 tmax = round(tmax_stf / deltat) * deltat 

836 nt = int(round((tmax - tmin) / deltat)) + 1 

837 times = num.linspace(tmin, tmax, nt) 

838 amplitudes = num.ones_like(times) 

839 if times.size > 1: 

840 t_edges = num.linspace( 

841 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

842 t = tmin_stf + self.duration * num.array( 

843 [0.0, 0.0, 1.0, 1.0], dtype=float) 

844 f = num.array([0., 1., 1., 0.], dtype=float) 

845 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

846 amplitudes /= num.sum(amplitudes) 

847 

848 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

849 

850 return sshift(times, amplitudes, -tshift, deltat) 

851 

852 def base_key(self): 

853 return (type(self).__name__, self.duration, self.anchor) 

854 

855 

856class TriangularSTF(STF): 

857 

858 ''' 

859 Triangular type source time function. 

860 

861 .. figure :: /static/stf-TriangularSTF.svg 

862 :width: 40% 

863 :align: center 

864 :alt: triangular source time function 

865 ''' 

866 

867 duration = Float.T( 

868 default=0.0, 

869 help='baseline of the triangle') 

870 

871 peak_ratio = Float.T( 

872 default=0.5, 

873 help='fraction of time compared to duration, ' 

874 'when the maximum amplitude is reached') 

875 

876 anchor = Float.T( 

877 default=0.0, 

878 help='anchor point with respect to source.time: (' 

879 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

880 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

881 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

882 

883 @classmethod 

884 def factor_duration_to_effective(cls, peak_ratio=None): 

885 if peak_ratio is None: 

886 peak_ratio = cls.peak_ratio.default() 

887 

888 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

889 

890 def __init__(self, effective_duration=None, **kwargs): 

891 if effective_duration is not None: 

892 kwargs['duration'] = effective_duration / \ 

893 self.factor_duration_to_effective( 

894 kwargs.get('peak_ratio', None)) 

895 

896 STF.__init__(self, **kwargs) 

897 

898 @property 

899 def centroid_ratio(self): 

900 ra = self.peak_ratio 

901 rb = 1.0 - ra 

902 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

903 

904 def centroid_time(self, tref): 

905 ca = self.centroid_ratio 

906 cb = 1.0 - ca 

907 if self.anchor <= 0.: 

908 return tref - ca * self.duration * self.anchor 

909 else: 

910 return tref - cb * self.duration * self.anchor 

911 

912 @property 

913 def effective_duration(self): 

914 return self.duration * self.factor_duration_to_effective( 

915 self.peak_ratio) 

916 

917 def tminmax_stf(self, tref): 

918 ca = self.centroid_ratio 

919 cb = 1.0 - ca 

920 if self.anchor <= 0.: 

921 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

922 tmax_stf = tmin_stf + self.duration 

923 else: 

924 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

925 tmin_stf = tmax_stf - self.duration 

926 

927 return tmin_stf, tmax_stf 

928 

929 def discretize_t(self, deltat, tref): 

930 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

931 

932 tmin = round(tmin_stf / deltat) * deltat 

933 tmax = round(tmax_stf / deltat) * deltat 

934 nt = int(round((tmax - tmin) / deltat)) + 1 

935 if nt > 1: 

936 t_edges = num.linspace( 

937 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

938 t = tmin_stf + self.duration * num.array( 

939 [0.0, self.peak_ratio, 1.0], dtype=float) 

940 f = num.array([0., 1., 0.], dtype=float) 

941 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

942 amplitudes /= num.sum(amplitudes) 

943 else: 

944 amplitudes = num.ones(1) 

945 

946 times = num.linspace(tmin, tmax, nt) 

947 return times, amplitudes 

948 

949 def base_key(self): 

950 return ( 

951 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

952 

953 

954class HalfSinusoidSTF(STF): 

955 

956 ''' 

957 Half sinusoid type source time function. 

958 

959 .. figure :: /static/stf-HalfSinusoidSTF.svg 

960 :width: 40% 

961 :align: center 

962 :alt: half-sinusouid source time function 

963 ''' 

964 

965 duration = Float.T( 

966 default=0.0, 

967 help='duration of the half-sinusoid (baseline)') 

968 

969 anchor = Float.T( 

970 default=0.0, 

971 help='anchor point with respect to source.time: (' 

972 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

973 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

974 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

975 

976 exponent = Int.T( 

977 default=1, 

978 help='set to 2 to use square of the half-period sinusoidal function.') 

979 

980 def __init__(self, effective_duration=None, **kwargs): 

981 if effective_duration is not None: 

982 kwargs['duration'] = effective_duration / \ 

983 self.factor_duration_to_effective( 

984 kwargs.get('exponent', 1)) 

985 

986 STF.__init__(self, **kwargs) 

987 

988 @classmethod 

989 def factor_duration_to_effective(cls, exponent): 

990 if exponent == 1: 

991 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

992 elif exponent == 2: 

993 return math.sqrt(math.pi**2 - 6) / math.pi 

994 else: 

995 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

996 

997 @property 

998 def effective_duration(self): 

999 return self.duration * self.factor_duration_to_effective(self.exponent) 

1000 

1001 def centroid_time(self, tref): 

1002 return tref - 0.5 * self.duration * self.anchor 

1003 

1004 def discretize_t(self, deltat, tref): 

1005 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1006 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1007 tmin = round(tmin_stf / deltat) * deltat 

1008 tmax = round(tmax_stf / deltat) * deltat 

1009 nt = int(round((tmax - tmin) / deltat)) + 1 

1010 if nt > 1: 

1011 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1012 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1013 

1014 if self.exponent == 1: 

1015 fint = -num.cos( 

1016 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1017 

1018 elif self.exponent == 2: 

1019 fint = (t_edges - tmin_stf) / self.duration \ 

1020 - 1.0 / (2.0 * math.pi) * num.sin( 

1021 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1022 else: 

1023 raise ValueError( 

1024 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1025 

1026 amplitudes = fint[1:] - fint[:-1] 

1027 amplitudes /= num.sum(amplitudes) 

1028 else: 

1029 amplitudes = num.ones(1) 

1030 

1031 times = num.linspace(tmin, tmax, nt) 

1032 return times, amplitudes 

1033 

1034 def base_key(self): 

1035 return (type(self).__name__, self.duration, self.anchor) 

1036 

1037 

1038class SmoothRampSTF(STF): 

1039 ''' 

1040 Smooth-ramp type source time function for near-field displacement. 

1041 Based on moment function of double-couple point source proposed by Bruestle 

1042 and Mueller (PEPI, 1983). 

1043 

1044 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1045 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1046 312-324. 

1047 

1048 .. figure :: /static/stf-SmoothRampSTF.svg 

1049 :width: 40% 

1050 :alt: smooth ramp source time function 

1051 ''' 

1052 duration = Float.T( 

1053 default=0.0, 

1054 help='duration of the ramp (baseline)') 

1055 

1056 rise_ratio = Float.T( 

1057 default=0.5, 

1058 help='fraction of time compared to duration, ' 

1059 'when the maximum amplitude is reached') 

1060 

1061 anchor = Float.T( 

1062 default=0.0, 

1063 help='anchor point with respect to source.time: (' 

1064 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1065 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1066 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1067 

1068 def discretize_t(self, deltat, tref): 

1069 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1070 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1071 tmin = round(tmin_stf / deltat) * deltat 

1072 tmax = round(tmax_stf / deltat) * deltat 

1073 D = round((tmax - tmin) / deltat) * deltat 

1074 nt = int(round(D / deltat)) + 1 

1075 times = num.linspace(tmin, tmax, nt) 

1076 if nt > 1: 

1077 rise_time = self.rise_ratio * self.duration 

1078 amplitudes = num.ones_like(times) 

1079 tp = tmin + rise_time 

1080 ii = num.where(times <= tp) 

1081 t_inc = times[ii] 

1082 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1083 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1084 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1085 

1086 amplitudes /= num.sum(amplitudes) 

1087 else: 

1088 amplitudes = num.ones(1) 

1089 

1090 return times, amplitudes 

1091 

1092 def base_key(self): 

1093 return (type(self).__name__, 

1094 self.duration, self.rise_ratio, self.anchor) 

1095 

1096 

1097class ResonatorSTF(STF): 

1098 ''' 

1099 Simple resonator like source time function. 

1100 

1101 .. math :: 

1102 

1103 f(t) = 0 for t < 0 

1104 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1105 

1106 

1107 .. figure :: /static/stf-SmoothRampSTF.svg 

1108 :width: 40% 

1109 :alt: smooth ramp source time function 

1110 

1111 ''' 

1112 

1113 duration = Float.T( 

1114 default=0.0, 

1115 help='decay time') 

1116 

1117 frequency = Float.T( 

1118 default=1.0, 

1119 help='resonance frequency') 

1120 

1121 def discretize_t(self, deltat, tref): 

1122 tmin_stf = tref 

1123 tmax_stf = tref + self.duration * 3 

1124 tmin = math.floor(tmin_stf / deltat) * deltat 

1125 tmax = math.ceil(tmax_stf / deltat) * deltat 

1126 times = util.arange2(tmin, tmax, deltat) 

1127 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1128 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1129 

1130 return times, amplitudes 

1131 

1132 def base_key(self): 

1133 return (type(self).__name__, 

1134 self.duration, self.frequency) 

1135 

1136 

1137class STFMode(StringChoice): 

1138 choices = ['pre', 'post'] 

1139 

1140 

1141class Source(Location, Cloneable): 

1142 ''' 

1143 Base class for all source models. 

1144 ''' 

1145 

1146 name = String.T(optional=True, default='') 

1147 

1148 time = Timestamp.T( 

1149 default=Timestamp.D('1970-01-01 00:00:00'), 

1150 help='source origin time.') 

1151 

1152 stf = STF.T( 

1153 optional=True, 

1154 help='source time function.') 

1155 

1156 stf_mode = STFMode.T( 

1157 default='post', 

1158 help='whether to apply source time function in pre or ' 

1159 'post-processing.') 

1160 

1161 def __init__(self, **kwargs): 

1162 Location.__init__(self, **kwargs) 

1163 

1164 def update(self, **kwargs): 

1165 ''' 

1166 Change some of the source models parameters. 

1167 

1168 Example:: 

1169 

1170 >>> from pyrocko import gf 

1171 >>> s = gf.DCSource() 

1172 >>> s.update(strike=66., dip=33.) 

1173 >>> print(s) 

1174 --- !pf.DCSource 

1175 depth: 0.0 

1176 time: 1970-01-01 00:00:00 

1177 magnitude: 6.0 

1178 strike: 66.0 

1179 dip: 33.0 

1180 rake: 0.0 

1181 

1182 ''' 

1183 

1184 for (k, v) in kwargs.items(): 

1185 self[k] = v 

1186 

1187 def grid(self, **variables): 

1188 ''' 

1189 Create grid of source model variations. 

1190 

1191 :returns: :py:class:`SourceGrid` instance. 

1192 

1193 Example:: 

1194 

1195 >>> from pyrocko import gf 

1196 >>> base = DCSource() 

1197 >>> R = gf.Range 

1198 >>> for s in base.grid(R(' 

1199 

1200 ''' 

1201 return SourceGrid(base=self, variables=variables) 

1202 

1203 def base_key(self): 

1204 ''' 

1205 Get key to decide about source discretization / GF stack sharing. 

1206 

1207 When two source models differ only in amplitude and origin time, the 

1208 discretization and the GF stacking can be done only once for a unit 

1209 amplitude and a zero origin time and the amplitude and origin times of 

1210 the seismograms can be applied during post-processing of the synthetic 

1211 seismogram. 

1212 

1213 For any derived parameterized source model, this method is called to 

1214 decide if discretization and stacking of the source should be shared. 

1215 When two source models return an equal vector of values discretization 

1216 is shared. 

1217 ''' 

1218 return (self.depth, self.lat, self.north_shift, 

1219 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1220 self.effective_stf_pre().base_key() 

1221 

1222 def get_factor(self): 

1223 ''' 

1224 Get the scaling factor to be applied during post-processing. 

1225 

1226 Discretization of the base seismogram is usually done for a unit 

1227 amplitude, because a common factor can be efficiently multiplied to 

1228 final seismograms. This eliminates to do repeat the stacking when 

1229 creating seismograms for a series of source models only differing in 

1230 amplitude. 

1231 

1232 This method should return the scaling factor to apply in the 

1233 post-processing (often this is simply the scalar moment of the source). 

1234 ''' 

1235 

1236 return 1.0 

1237 

1238 def effective_stf_pre(self): 

1239 ''' 

1240 Return the STF applied before stacking of the Green's functions. 

1241 

1242 This STF is used during discretization of the parameterized source 

1243 models, i.e. to produce a temporal distribution of point sources. 

1244 

1245 Handling of the STF before stacking of the GFs is less efficient but 

1246 allows to use different source time functions for different parts of 

1247 the source. 

1248 ''' 

1249 

1250 if self.stf is not None and self.stf_mode == 'pre': 

1251 return self.stf 

1252 else: 

1253 return g_unit_pulse 

1254 

1255 def effective_stf_post(self): 

1256 ''' 

1257 Return the STF applied after stacking of the Green's fuctions. 

1258 

1259 This STF is used in the post-processing of the synthetic seismograms. 

1260 

1261 Handling of the STF after stacking of the GFs is usually more efficient 

1262 but is only possible when a common STF is used for all subsources. 

1263 ''' 

1264 

1265 if self.stf is not None and self.stf_mode == 'post': 

1266 return self.stf 

1267 else: 

1268 return g_unit_pulse 

1269 

1270 def _dparams_base(self): 

1271 return dict(times=arr(self.time), 

1272 lat=self.lat, lon=self.lon, 

1273 north_shifts=arr(self.north_shift), 

1274 east_shifts=arr(self.east_shift), 

1275 depths=arr(self.depth)) 

1276 

1277 def _hash(self): 

1278 sha = sha1() 

1279 for k in self.base_key(): 

1280 sha.update(str(k).encode()) 

1281 return sha.hexdigest() 

1282 

1283 def _dparams_base_repeated(self, times): 

1284 if times is None: 

1285 return self._dparams_base() 

1286 

1287 nt = times.size 

1288 north_shifts = num.repeat(self.north_shift, nt) 

1289 east_shifts = num.repeat(self.east_shift, nt) 

1290 depths = num.repeat(self.depth, nt) 

1291 return dict(times=times, 

1292 lat=self.lat, lon=self.lon, 

1293 north_shifts=north_shifts, 

1294 east_shifts=east_shifts, 

1295 depths=depths) 

1296 

1297 def pyrocko_event(self, store=None, target=None, **kwargs): 

1298 duration = None 

1299 if self.stf: 

1300 duration = self.stf.effective_duration 

1301 

1302 return model.Event( 

1303 lat=self.lat, 

1304 lon=self.lon, 

1305 north_shift=self.north_shift, 

1306 east_shift=self.east_shift, 

1307 time=self.time, 

1308 name=self.name, 

1309 depth=self.depth, 

1310 duration=duration, 

1311 **kwargs) 

1312 

1313 def outline(self, cs='xyz'): 

1314 points = num.atleast_2d(num.zeros([1, 3])) 

1315 

1316 points[:, 0] += self.north_shift 

1317 points[:, 1] += self.east_shift 

1318 points[:, 2] += self.depth 

1319 if cs == 'xyz': 

1320 return points 

1321 elif cs == 'xy': 

1322 return points[:, :2] 

1323 elif cs in ('latlon', 'lonlat'): 

1324 latlon = ne_to_latlon( 

1325 self.lat, self.lon, points[:, 0], points[:, 1]) 

1326 

1327 latlon = num.array(latlon).T 

1328 if cs == 'latlon': 

1329 return latlon 

1330 else: 

1331 return latlon[:, ::-1] 

1332 

1333 @classmethod 

1334 def from_pyrocko_event(cls, ev, **kwargs): 

1335 if ev.depth is None: 

1336 raise ConversionError( 

1337 'Cannot convert event object to source object: ' 

1338 'no depth information available') 

1339 

1340 stf = None 

1341 if ev.duration is not None: 

1342 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1343 

1344 d = dict( 

1345 name=ev.name, 

1346 time=ev.time, 

1347 lat=ev.lat, 

1348 lon=ev.lon, 

1349 north_shift=ev.north_shift, 

1350 east_shift=ev.east_shift, 

1351 depth=ev.depth, 

1352 stf=stf) 

1353 d.update(kwargs) 

1354 return cls(**d) 

1355 

1356 def get_magnitude(self): 

1357 raise NotImplementedError( 

1358 '%s does not implement get_magnitude()' 

1359 % self.__class__.__name__) 

1360 

1361 

1362class SourceWithMagnitude(Source): 

1363 ''' 

1364 Base class for sources containing a moment magnitude. 

1365 ''' 

1366 

1367 magnitude = Float.T( 

1368 default=6.0, 

1369 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1370 

1371 def __init__(self, **kwargs): 

1372 if 'moment' in kwargs: 

1373 mom = kwargs.pop('moment') 

1374 if 'magnitude' not in kwargs: 

1375 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1376 

1377 Source.__init__(self, **kwargs) 

1378 

1379 @property 

1380 def moment(self): 

1381 return float(pmt.magnitude_to_moment(self.magnitude)) 

1382 

1383 @moment.setter 

1384 def moment(self, value): 

1385 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1386 

1387 def pyrocko_event(self, store=None, target=None, **kwargs): 

1388 return Source.pyrocko_event( 

1389 self, store, target, 

1390 magnitude=self.magnitude, 

1391 **kwargs) 

1392 

1393 @classmethod 

1394 def from_pyrocko_event(cls, ev, **kwargs): 

1395 d = {} 

1396 if ev.magnitude: 

1397 d.update(magnitude=ev.magnitude) 

1398 

1399 d.update(kwargs) 

1400 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1401 

1402 def get_magnitude(self): 

1403 return self.magnitude 

1404 

1405 

1406class DerivedMagnitudeError(ValidationError): 

1407 pass 

1408 

1409 

1410class SourceWithDerivedMagnitude(Source): 

1411 

1412 class __T(Source.T): 

1413 

1414 def validate_extra(self, val): 

1415 Source.T.validate_extra(self, val) 

1416 val.check_conflicts() 

1417 

1418 def check_conflicts(self): 

1419 ''' 

1420 Check for parameter conflicts. 

1421 

1422 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1423 on conflicts. 

1424 ''' 

1425 pass 

1426 

1427 def get_magnitude(self, store=None, target=None): 

1428 raise DerivedMagnitudeError('No magnitude set.') 

1429 

1430 def get_moment(self, store=None, target=None): 

1431 return float(pmt.magnitude_to_moment( 

1432 self.get_magnitude(store, target))) 

1433 

1434 def pyrocko_moment_tensor(self, store=None, target=None): 

1435 raise NotImplementedError( 

1436 '%s does not implement pyrocko_moment_tensor()' 

1437 % self.__class__.__name__) 

1438 

1439 def pyrocko_event(self, store=None, target=None, **kwargs): 

1440 try: 

1441 mt = self.pyrocko_moment_tensor(store, target) 

1442 magnitude = self.get_magnitude() 

1443 except (DerivedMagnitudeError, NotImplementedError): 

1444 mt = None 

1445 magnitude = None 

1446 

1447 return Source.pyrocko_event( 

1448 self, store, target, 

1449 moment_tensor=mt, 

1450 magnitude=magnitude, 

1451 **kwargs) 

1452 

1453 

1454class ExplosionSource(SourceWithDerivedMagnitude): 

1455 ''' 

1456 An isotropic explosion point source. 

1457 ''' 

1458 

1459 magnitude = Float.T( 

1460 optional=True, 

1461 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1462 

1463 volume_change = Float.T( 

1464 optional=True, 

1465 help='volume change of the explosion/implosion or ' 

1466 'the contracting/extending magmatic source. [m^3]') 

1467 

1468 discretized_source_class = meta.DiscretizedExplosionSource 

1469 

1470 def __init__(self, **kwargs): 

1471 if 'moment' in kwargs: 

1472 mom = kwargs.pop('moment') 

1473 if 'magnitude' not in kwargs: 

1474 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1475 

1476 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1477 

1478 def base_key(self): 

1479 return SourceWithDerivedMagnitude.base_key(self) + \ 

1480 (self.volume_change,) 

1481 

1482 def check_conflicts(self): 

1483 if self.magnitude is not None and self.volume_change is not None: 

1484 raise DerivedMagnitudeError( 

1485 'Magnitude and volume_change are both defined.') 

1486 

1487 def get_magnitude(self, store=None, target=None): 

1488 self.check_conflicts() 

1489 

1490 if self.magnitude is not None: 

1491 return self.magnitude 

1492 

1493 elif self.volume_change is not None: 

1494 moment = self.volume_change * \ 

1495 self.get_moment_to_volume_change_ratio(store, target) 

1496 

1497 return float(pmt.moment_to_magnitude(abs(moment))) 

1498 else: 

1499 return float(pmt.moment_to_magnitude(1.0)) 

1500 

1501 def get_volume_change(self, store=None, target=None): 

1502 self.check_conflicts() 

1503 

1504 if self.volume_change is not None: 

1505 return self.volume_change 

1506 

1507 elif self.magnitude is not None: 

1508 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1509 return moment / self.get_moment_to_volume_change_ratio( 

1510 store, target) 

1511 

1512 else: 

1513 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1514 

1515 def get_moment_to_volume_change_ratio(self, store, target=None): 

1516 if store is None: 

1517 raise DerivedMagnitudeError( 

1518 'Need earth model to convert between volume change and ' 

1519 'magnitude.') 

1520 

1521 points = num.array( 

1522 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1523 

1524 interpolation = target.interpolation if target else 'multilinear' 

1525 try: 

1526 shear_moduli = store.config.get_shear_moduli( 

1527 self.lat, self.lon, 

1528 points=points, 

1529 interpolation=interpolation)[0] 

1530 except meta.OutOfBounds: 

1531 raise DerivedMagnitudeError( 

1532 'Could not get shear modulus at source position.') 

1533 

1534 return float(3. * shear_moduli) 

1535 

1536 def get_factor(self): 

1537 return 1.0 

1538 

1539 def discretize_basesource(self, store, target=None): 

1540 times, amplitudes = self.effective_stf_pre().discretize_t( 

1541 store.config.deltat, self.time) 

1542 

1543 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1544 

1545 if self.volume_change is not None: 

1546 if self.volume_change < 0.: 

1547 amplitudes *= -1 

1548 

1549 return meta.DiscretizedExplosionSource( 

1550 m0s=amplitudes, 

1551 **self._dparams_base_repeated(times)) 

1552 

1553 def pyrocko_moment_tensor(self, store=None, target=None): 

1554 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1555 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1556 

1557 

1558class RectangularExplosionSource(ExplosionSource): 

1559 ''' 

1560 Rectangular or line explosion source. 

1561 ''' 

1562 

1563 discretized_source_class = meta.DiscretizedExplosionSource 

1564 

1565 strike = Float.T( 

1566 default=0.0, 

1567 help='strike direction in [deg], measured clockwise from north') 

1568 

1569 dip = Float.T( 

1570 default=90.0, 

1571 help='dip angle in [deg], measured downward from horizontal') 

1572 

1573 length = Float.T( 

1574 default=0., 

1575 help='length of rectangular source area [m]') 

1576 

1577 width = Float.T( 

1578 default=0., 

1579 help='width of rectangular source area [m]') 

1580 

1581 anchor = StringChoice.T( 

1582 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1583 'bottom_left', 'bottom_right'], 

1584 default='center', 

1585 optional=True, 

1586 help='Anchor point for positioning the plane, can be: top, center or' 

1587 'bottom and also top_left, top_right,bottom_left,' 

1588 'bottom_right, center_left and center right') 

1589 

1590 nucleation_x = Float.T( 

1591 optional=True, 

1592 help='horizontal position of rupture nucleation in normalized fault ' 

1593 'plane coordinates (-1 = left edge, +1 = right edge)') 

1594 

1595 nucleation_y = Float.T( 

1596 optional=True, 

1597 help='down-dip position of rupture nucleation in normalized fault ' 

1598 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1599 

1600 velocity = Float.T( 

1601 default=3500., 

1602 help='speed of explosion front [m/s]') 

1603 

1604 aggressive_oversampling = Bool.T( 

1605 default=False, 

1606 help='Aggressive oversampling for basesource discretization. ' 

1607 'When using \'multilinear\' interpolation oversampling has' 

1608 ' practically no effect.') 

1609 

1610 def base_key(self): 

1611 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1612 self.width, self.nucleation_x, 

1613 self.nucleation_y, self.velocity, 

1614 self.anchor) 

1615 

1616 def discretize_basesource(self, store, target=None): 

1617 

1618 if self.nucleation_x is not None: 

1619 nucx = self.nucleation_x * 0.5 * self.length 

1620 else: 

1621 nucx = None 

1622 

1623 if self.nucleation_y is not None: 

1624 nucy = self.nucleation_y * 0.5 * self.width 

1625 else: 

1626 nucy = None 

1627 

1628 stf = self.effective_stf_pre() 

1629 

1630 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1631 store.config.deltas, store.config.deltat, 

1632 self.time, self.north_shift, self.east_shift, self.depth, 

1633 self.strike, self.dip, self.length, self.width, self.anchor, 

1634 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1635 

1636 amplitudes /= num.sum(amplitudes) 

1637 amplitudes *= self.get_moment(store, target) 

1638 

1639 return meta.DiscretizedExplosionSource( 

1640 lat=self.lat, 

1641 lon=self.lon, 

1642 times=times, 

1643 north_shifts=points[:, 0], 

1644 east_shifts=points[:, 1], 

1645 depths=points[:, 2], 

1646 m0s=amplitudes) 

1647 

1648 def outline(self, cs='xyz'): 

1649 points = outline_rect_source(self.strike, self.dip, self.length, 

1650 self.width, self.anchor) 

1651 

1652 points[:, 0] += self.north_shift 

1653 points[:, 1] += self.east_shift 

1654 points[:, 2] += self.depth 

1655 if cs == 'xyz': 

1656 return points 

1657 elif cs == 'xy': 

1658 return points[:, :2] 

1659 elif cs in ('latlon', 'lonlat'): 

1660 latlon = ne_to_latlon( 

1661 self.lat, self.lon, points[:, 0], points[:, 1]) 

1662 

1663 latlon = num.array(latlon).T 

1664 if cs == 'latlon': 

1665 return latlon 

1666 else: 

1667 return latlon[:, ::-1] 

1668 

1669 def get_nucleation_abs_coord(self, cs='xy'): 

1670 

1671 if self.nucleation_x is None: 

1672 return None, None 

1673 

1674 coords = from_plane_coords(self.strike, self.dip, self.length, 

1675 self.width, self.depth, self.nucleation_x, 

1676 self.nucleation_y, lat=self.lat, 

1677 lon=self.lon, north_shift=self.north_shift, 

1678 east_shift=self.east_shift, cs=cs) 

1679 return coords 

1680 

1681 

1682class DCSource(SourceWithMagnitude): 

1683 ''' 

1684 A double-couple point source. 

1685 ''' 

1686 

1687 strike = Float.T( 

1688 default=0.0, 

1689 help='strike direction in [deg], measured clockwise from north') 

1690 

1691 dip = Float.T( 

1692 default=90.0, 

1693 help='dip angle in [deg], measured downward from horizontal') 

1694 

1695 rake = Float.T( 

1696 default=0.0, 

1697 help='rake angle in [deg], ' 

1698 'measured counter-clockwise from right-horizontal ' 

1699 'in on-plane view') 

1700 

1701 discretized_source_class = meta.DiscretizedMTSource 

1702 

1703 def base_key(self): 

1704 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1705 

1706 def get_factor(self): 

1707 return float(pmt.magnitude_to_moment(self.magnitude)) 

1708 

1709 def discretize_basesource(self, store, target=None): 

1710 mot = pmt.MomentTensor( 

1711 strike=self.strike, dip=self.dip, rake=self.rake) 

1712 

1713 times, amplitudes = self.effective_stf_pre().discretize_t( 

1714 store.config.deltat, self.time) 

1715 return meta.DiscretizedMTSource( 

1716 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1717 **self._dparams_base_repeated(times)) 

1718 

1719 def pyrocko_moment_tensor(self, store=None, target=None): 

1720 return pmt.MomentTensor( 

1721 strike=self.strike, 

1722 dip=self.dip, 

1723 rake=self.rake, 

1724 scalar_moment=self.moment) 

1725 

1726 def pyrocko_event(self, store=None, target=None, **kwargs): 

1727 return SourceWithMagnitude.pyrocko_event( 

1728 self, store, target, 

1729 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1730 **kwargs) 

1731 

1732 @classmethod 

1733 def from_pyrocko_event(cls, ev, **kwargs): 

1734 d = {} 

1735 mt = ev.moment_tensor 

1736 if mt: 

1737 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1738 d.update( 

1739 strike=float(strike), 

1740 dip=float(dip), 

1741 rake=float(rake), 

1742 magnitude=float(mt.moment_magnitude())) 

1743 

1744 d.update(kwargs) 

1745 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1746 

1747 

1748class CLVDSource(SourceWithMagnitude): 

1749 ''' 

1750 A pure CLVD point source. 

1751 ''' 

1752 

1753 discretized_source_class = meta.DiscretizedMTSource 

1754 

1755 azimuth = Float.T( 

1756 default=0.0, 

1757 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1758 

1759 dip = Float.T( 

1760 default=90., 

1761 help='dip direction of largest dipole, downward from horizontal [deg]') 

1762 

1763 def base_key(self): 

1764 return Source.base_key(self) + (self.azimuth, self.dip) 

1765 

1766 def get_factor(self): 

1767 return float(pmt.magnitude_to_moment(self.magnitude)) 

1768 

1769 @property 

1770 def m6(self): 

1771 a = math.sqrt(4. / 3.) * self.get_factor() 

1772 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1773 rotmat1 = pmt.euler_to_matrix( 

1774 d2r * (self.dip - 90.), 

1775 d2r * (self.azimuth - 90.), 

1776 0.) 

1777 m = rotmat1.T * m * rotmat1 

1778 return pmt.to6(m) 

1779 

1780 @property 

1781 def m6_astuple(self): 

1782 return tuple(self.m6.tolist()) 

1783 

1784 def discretize_basesource(self, store, target=None): 

1785 factor = self.get_factor() 

1786 times, amplitudes = self.effective_stf_pre().discretize_t( 

1787 store.config.deltat, self.time) 

1788 return meta.DiscretizedMTSource( 

1789 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1790 **self._dparams_base_repeated(times)) 

1791 

1792 def pyrocko_moment_tensor(self, store=None, target=None): 

1793 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1794 

1795 def pyrocko_event(self, store=None, target=None, **kwargs): 

1796 mt = self.pyrocko_moment_tensor(store, target) 

1797 return Source.pyrocko_event( 

1798 self, store, target, 

1799 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1800 magnitude=float(mt.moment_magnitude()), 

1801 **kwargs) 

1802 

1803 

1804class VLVDSource(SourceWithDerivedMagnitude): 

1805 ''' 

1806 Volumetric linear vector dipole source. 

1807 

1808 This source is a parameterization for a restricted moment tensor point 

1809 source, useful to represent dyke or sill like inflation or deflation 

1810 sources. The restriction is such that the moment tensor is rotational 

1811 symmetric. It can be represented by a superposition of a linear vector 

1812 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1813 restricted moment tensor has 4 degrees of freedom: 2 independent 

1814 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1815 

1816 In this parameterization, the isotropic component is controlled by 

1817 ``volume_change``. To define the moment tensor, it must be converted to the 

1818 scalar moment of the the MT's isotropic component. For the conversion, the 

1819 shear modulus at the source's position must be known. This value is 

1820 extracted from the earth model defined in the GF store in use. 

1821 

1822 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1823 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1824 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1825 ''' 

1826 

1827 discretized_source_class = meta.DiscretizedMTSource 

1828 

1829 azimuth = Float.T( 

1830 default=0.0, 

1831 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1832 

1833 dip = Float.T( 

1834 default=90., 

1835 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1836 

1837 volume_change = Float.T( 

1838 default=0., 

1839 help='volume change of the inflation/deflation [m^3].') 

1840 

1841 clvd_moment = Float.T( 

1842 default=0., 

1843 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1844 'controls the sign of the CLVD (the sign of its largest ' 

1845 'eigenvalue).') 

1846 

1847 def get_moment_to_volume_change_ratio(self, store, target): 

1848 if store is None or target is None: 

1849 raise DerivedMagnitudeError( 

1850 'Need earth model to convert between volume change and ' 

1851 'magnitude.') 

1852 

1853 points = num.array( 

1854 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1855 

1856 try: 

1857 shear_moduli = store.config.get_shear_moduli( 

1858 self.lat, self.lon, 

1859 points=points, 

1860 interpolation=target.interpolation)[0] 

1861 except meta.OutOfBounds: 

1862 raise DerivedMagnitudeError( 

1863 'Could not get shear modulus at source position.') 

1864 

1865 return float(3. * shear_moduli) 

1866 

1867 def base_key(self): 

1868 return Source.base_key(self) + \ 

1869 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1870 

1871 def get_magnitude(self, store=None, target=None): 

1872 mt = self.pyrocko_moment_tensor(store, target) 

1873 return float(pmt.moment_to_magnitude(mt.moment)) 

1874 

1875 def get_m6(self, store, target): 

1876 a = math.sqrt(4. / 3.) * self.clvd_moment 

1877 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1878 

1879 rotmat1 = pmt.euler_to_matrix( 

1880 d2r * (self.dip - 90.), 

1881 d2r * (self.azimuth - 90.), 

1882 0.) 

1883 m_clvd = rotmat1.T * m_clvd * rotmat1 

1884 

1885 m_iso = self.volume_change * \ 

1886 self.get_moment_to_volume_change_ratio(store, target) 

1887 

1888 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1889 0., 0.,) * math.sqrt(2. / 3) 

1890 

1891 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1892 return m 

1893 

1894 def get_moment(self, store=None, target=None): 

1895 return float(pmt.magnitude_to_moment( 

1896 self.get_magnitude(store, target))) 

1897 

1898 def get_m6_astuple(self, store, target): 

1899 m6 = self.get_m6(store, target) 

1900 return tuple(m6.tolist()) 

1901 

1902 def discretize_basesource(self, store, target=None): 

1903 times, amplitudes = self.effective_stf_pre().discretize_t( 

1904 store.config.deltat, self.time) 

1905 

1906 m6 = self.get_m6(store, target) 

1907 m6 *= amplitudes / self.get_factor() 

1908 

1909 return meta.DiscretizedMTSource( 

1910 m6s=m6[num.newaxis, :], 

1911 **self._dparams_base_repeated(times)) 

1912 

1913 def pyrocko_moment_tensor(self, store=None, target=None): 

1914 m6_astuple = self.get_m6_astuple(store, target) 

1915 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1916 

1917 

1918class MTSource(Source): 

1919 ''' 

1920 A moment tensor point source. 

1921 ''' 

1922 

1923 discretized_source_class = meta.DiscretizedMTSource 

1924 

1925 mnn = Float.T( 

1926 default=1., 

1927 help='north-north component of moment tensor in [Nm]') 

1928 

1929 mee = Float.T( 

1930 default=1., 

1931 help='east-east component of moment tensor in [Nm]') 

1932 

1933 mdd = Float.T( 

1934 default=1., 

1935 help='down-down component of moment tensor in [Nm]') 

1936 

1937 mne = Float.T( 

1938 default=0., 

1939 help='north-east component of moment tensor in [Nm]') 

1940 

1941 mnd = Float.T( 

1942 default=0., 

1943 help='north-down component of moment tensor in [Nm]') 

1944 

1945 med = Float.T( 

1946 default=0., 

1947 help='east-down component of moment tensor in [Nm]') 

1948 

1949 def __init__(self, **kwargs): 

1950 if 'm6' in kwargs: 

1951 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1952 kwargs.pop('m6')): 

1953 kwargs[k] = float(v) 

1954 

1955 Source.__init__(self, **kwargs) 

1956 

1957 @property 

1958 def m6(self): 

1959 return num.array(self.m6_astuple) 

1960 

1961 @property 

1962 def m6_astuple(self): 

1963 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1964 

1965 @m6.setter 

1966 def m6(self, value): 

1967 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1968 

1969 def base_key(self): 

1970 return Source.base_key(self) + self.m6_astuple 

1971 

1972 def discretize_basesource(self, store, target=None): 

1973 times, amplitudes = self.effective_stf_pre().discretize_t( 

1974 store.config.deltat, self.time) 

1975 return meta.DiscretizedMTSource( 

1976 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1977 **self._dparams_base_repeated(times)) 

1978 

1979 def get_magnitude(self, store=None, target=None): 

1980 m6 = self.m6 

1981 return pmt.moment_to_magnitude( 

1982 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1983 math.sqrt(2.)) 

1984 

1985 def pyrocko_moment_tensor(self, store=None, target=None): 

1986 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1987 

1988 def pyrocko_event(self, store=None, target=None, **kwargs): 

1989 mt = self.pyrocko_moment_tensor(store, target) 

1990 return Source.pyrocko_event( 

1991 self, store, target, 

1992 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1993 magnitude=float(mt.moment_magnitude()), 

1994 **kwargs) 

1995 

1996 @classmethod 

1997 def from_pyrocko_event(cls, ev, **kwargs): 

1998 d = {} 

1999 mt = ev.moment_tensor 

2000 if mt: 

2001 d.update(m6=tuple(map(float, mt.m6()))) 

2002 else: 

2003 if ev.magnitude is not None: 

2004 mom = pmt.magnitude_to_moment(ev.magnitude) 

2005 v = math.sqrt(2. / 3.) * mom 

2006 d.update(m6=(v, v, v, 0., 0., 0.)) 

2007 

2008 d.update(kwargs) 

2009 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2010 

2011 

2012map_anchor = { 

2013 'center': (0.0, 0.0), 

2014 'center_left': (-1.0, 0.0), 

2015 'center_right': (1.0, 0.0), 

2016 'top': (0.0, -1.0), 

2017 'top_left': (-1.0, -1.0), 

2018 'top_right': (1.0, -1.0), 

2019 'bottom': (0.0, 1.0), 

2020 'bottom_left': (-1.0, 1.0), 

2021 'bottom_right': (1.0, 1.0)} 

2022 

2023 

2024class RectangularSource(SourceWithDerivedMagnitude): 

2025 ''' 

2026 Classical Haskell source model modified for bilateral rupture. 

2027 ''' 

2028 

2029 discretized_source_class = meta.DiscretizedMTSource 

2030 

2031 magnitude = Float.T( 

2032 optional=True, 

2033 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2034 

2035 strike = Float.T( 

2036 default=0.0, 

2037 help='strike direction in [deg], measured clockwise from north') 

2038 

2039 dip = Float.T( 

2040 default=90.0, 

2041 help='dip angle in [deg], measured downward from horizontal') 

2042 

2043 rake = Float.T( 

2044 default=0.0, 

2045 help='rake angle in [deg], ' 

2046 'measured counter-clockwise from right-horizontal ' 

2047 'in on-plane view') 

2048 

2049 length = Float.T( 

2050 default=0., 

2051 help='length of rectangular source area [m]') 

2052 

2053 width = Float.T( 

2054 default=0., 

2055 help='width of rectangular source area [m]') 

2056 

2057 anchor = StringChoice.T( 

2058 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2059 'bottom_left', 'bottom_right'], 

2060 default='center', 

2061 optional=True, 

2062 help='Anchor point for positioning the plane, can be: ``top, center ' 

2063 'bottom, top_left, top_right,bottom_left,' 

2064 'bottom_right, center_left, center right``.') 

2065 

2066 nucleation_x = Float.T( 

2067 optional=True, 

2068 help='horizontal position of rupture nucleation in normalized fault ' 

2069 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2070 

2071 nucleation_y = Float.T( 

2072 optional=True, 

2073 help='down-dip position of rupture nucleation in normalized fault ' 

2074 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2075 

2076 velocity = Float.T( 

2077 default=3500., 

2078 help='speed of rupture front [m/s]') 

2079 

2080 slip = Float.T( 

2081 optional=True, 

2082 help='Slip on the rectangular source area [m]') 

2083 

2084 opening_fraction = Float.T( 

2085 default=0., 

2086 help='Determines fraction of slip related to opening. ' 

2087 '(``-1``: pure tensile closing, ' 

2088 '``0``: pure shear, ' 

2089 '``1``: pure tensile opening)') 

2090 

2091 decimation_factor = Int.T( 

2092 optional=True, 

2093 default=1, 

2094 help='Sub-source decimation factor, a larger decimation will' 

2095 ' make the result inaccurate but shorten the necessary' 

2096 ' computation time (use for testing puposes only).') 

2097 

2098 aggressive_oversampling = Bool.T( 

2099 default=False, 

2100 help='Aggressive oversampling for basesource discretization. ' 

2101 'When using \'multilinear\' interpolation oversampling has' 

2102 ' practically no effect.') 

2103 

2104 def __init__(self, **kwargs): 

2105 if 'moment' in kwargs: 

2106 mom = kwargs.pop('moment') 

2107 if 'magnitude' not in kwargs: 

2108 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2109 

2110 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2111 

2112 def base_key(self): 

2113 return SourceWithDerivedMagnitude.base_key(self) + ( 

2114 self.magnitude, 

2115 self.slip, 

2116 self.strike, 

2117 self.dip, 

2118 self.rake, 

2119 self.length, 

2120 self.width, 

2121 self.nucleation_x, 

2122 self.nucleation_y, 

2123 self.velocity, 

2124 self.decimation_factor, 

2125 self.anchor) 

2126 

2127 def check_conflicts(self): 

2128 if self.magnitude is not None and self.slip is not None: 

2129 raise DerivedMagnitudeError( 

2130 'Magnitude and slip are both defined.') 

2131 

2132 def get_magnitude(self, store=None, target=None): 

2133 self.check_conflicts() 

2134 if self.magnitude is not None: 

2135 return self.magnitude 

2136 

2137 elif self.slip is not None: 

2138 if None in (store, target): 

2139 raise DerivedMagnitudeError( 

2140 'Magnitude for a rectangular source with slip defined ' 

2141 'can only be derived when earth model and target ' 

2142 'interpolation method are available.') 

2143 

2144 amplitudes = self._discretize(store, target)[2] 

2145 if amplitudes.ndim == 2: 

2146 # CLVD component has no net moment, leave out 

2147 return float(pmt.moment_to_magnitude( 

2148 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2149 else: 

2150 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2151 

2152 else: 

2153 return float(pmt.moment_to_magnitude(1.0)) 

2154 

2155 def get_factor(self): 

2156 return 1.0 

2157 

2158 def get_slip_tensile(self): 

2159 return self.slip * self.opening_fraction 

2160 

2161 def get_slip_shear(self): 

2162 return self.slip - abs(self.get_slip_tensile) 

2163 

2164 def _discretize(self, store, target): 

2165 if self.nucleation_x is not None: 

2166 nucx = self.nucleation_x * 0.5 * self.length 

2167 else: 

2168 nucx = None 

2169 

2170 if self.nucleation_y is not None: 

2171 nucy = self.nucleation_y * 0.5 * self.width 

2172 else: 

2173 nucy = None 

2174 

2175 stf = self.effective_stf_pre() 

2176 

2177 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2178 store.config.deltas, store.config.deltat, 

2179 self.time, self.north_shift, self.east_shift, self.depth, 

2180 self.strike, self.dip, self.length, self.width, self.anchor, 

2181 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2182 decimation_factor=self.decimation_factor, 

2183 aggressive_oversampling=self.aggressive_oversampling) 

2184 

2185 if self.slip is not None: 

2186 if target is not None: 

2187 interpolation = target.interpolation 

2188 else: 

2189 interpolation = 'nearest_neighbor' 

2190 logger.warn( 

2191 'no target information available, will use ' 

2192 '"nearest_neighbor" interpolation when extracting shear ' 

2193 'modulus from earth model') 

2194 

2195 shear_moduli = store.config.get_shear_moduli( 

2196 self.lat, self.lon, 

2197 points=points, 

2198 interpolation=interpolation) 

2199 

2200 tensile_slip = self.get_slip_tensile() 

2201 shear_slip = self.slip - abs(tensile_slip) 

2202 

2203 amplitudes_total = [shear_moduli * shear_slip] 

2204 if tensile_slip != 0: 

2205 bulk_moduli = store.config.get_bulk_moduli( 

2206 self.lat, self.lon, 

2207 points=points, 

2208 interpolation=interpolation) 

2209 

2210 tensile_iso = bulk_moduli * tensile_slip 

2211 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2212 

2213 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2214 

2215 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2216 amplitudes * dl * dw 

2217 

2218 else: 

2219 # normalization to retain total moment 

2220 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2221 moment = self.get_moment(store, target) 

2222 

2223 amplitudes_total = [ 

2224 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2225 if self.opening_fraction != 0.: 

2226 amplitudes_total.append( 

2227 amplitudes_norm * self.opening_fraction * moment) 

2228 

2229 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2230 

2231 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2232 

2233 def discretize_basesource(self, store, target=None): 

2234 

2235 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2236 store, target) 

2237 

2238 mot = pmt.MomentTensor( 

2239 strike=self.strike, dip=self.dip, rake=self.rake) 

2240 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2241 

2242 if amplitudes.ndim == 1: 

2243 m6s[:, :] *= amplitudes[:, num.newaxis] 

2244 elif amplitudes.ndim == 2: 

2245 # shear MT components 

2246 rotmat1 = pmt.euler_to_matrix( 

2247 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2248 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2249 

2250 if amplitudes.shape[0] == 2: 

2251 # tensile MT components - moment/magnitude input 

2252 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2253 rot_tensile = pmt.to6(rotmat1.T * tensile * rotmat1) 

2254 

2255 m6s_tensile = rot_tensile[ 

2256 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2257 m6s += m6s_tensile 

2258 

2259 elif amplitudes.shape[0] == 3: 

2260 # tensile MT components - slip input 

2261 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2262 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2263 

2264 rot_iso = pmt.to6(rotmat1.T * iso * rotmat1) 

2265 rot_clvd = pmt.to6(rotmat1.T * clvd * rotmat1) 

2266 

2267 m6s_iso = rot_iso[ 

2268 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2269 m6s_clvd = rot_clvd[ 

2270 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2271 m6s += m6s_iso + m6s_clvd 

2272 else: 

2273 raise ValueError('Unknwown amplitudes shape!') 

2274 else: 

2275 raise ValueError( 

2276 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2277 

2278 ds = meta.DiscretizedMTSource( 

2279 lat=self.lat, 

2280 lon=self.lon, 

2281 times=times, 

2282 north_shifts=points[:, 0], 

2283 east_shifts=points[:, 1], 

2284 depths=points[:, 2], 

2285 m6s=m6s, 

2286 dl=dl, 

2287 dw=dw, 

2288 nl=nl, 

2289 nw=nw) 

2290 

2291 return ds 

2292 

2293 def outline(self, cs='xyz'): 

2294 points = outline_rect_source(self.strike, self.dip, self.length, 

2295 self.width, self.anchor) 

2296 

2297 points[:, 0] += self.north_shift 

2298 points[:, 1] += self.east_shift 

2299 points[:, 2] += self.depth 

2300 if cs == 'xyz': 

2301 return points 

2302 elif cs == 'xy': 

2303 return points[:, :2] 

2304 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2305 latlon = ne_to_latlon( 

2306 self.lat, self.lon, points[:, 0], points[:, 1]) 

2307 

2308 latlon = num.array(latlon).T 

2309 if cs == 'latlon': 

2310 return latlon 

2311 elif cs == 'lonlat': 

2312 return latlon[:, ::-1] 

2313 else: 

2314 return num.concatenate( 

2315 (latlon, points[:, 2].reshape((len(points), 1))), 

2316 axis=1) 

2317 

2318 def points_on_source(self, cs='xyz', **kwargs): 

2319 

2320 points = points_on_rect_source( 

2321 self.strike, self.dip, self.length, self.width, 

2322 self.anchor, **kwargs) 

2323 

2324 points[:, 0] += self.north_shift 

2325 points[:, 1] += self.east_shift 

2326 points[:, 2] += self.depth 

2327 if cs == 'xyz': 

2328 return points 

2329 elif cs == 'xy': 

2330 return points[:, :2] 

2331 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2332 latlon = ne_to_latlon( 

2333 self.lat, self.lon, points[:, 0], points[:, 1]) 

2334 

2335 latlon = num.array(latlon).T 

2336 if cs == 'latlon': 

2337 return latlon 

2338 elif cs == 'lonlat': 

2339 return latlon[:, ::-1] 

2340 else: 

2341 return num.concatenate( 

2342 (latlon, points[:, 2].reshape((len(points), 1))), 

2343 axis=1) 

2344 

2345 def get_nucleation_abs_coord(self, cs='xy'): 

2346 

2347 if self.nucleation_x is None: 

2348 return None, None 

2349 

2350 coords = from_plane_coords(self.strike, self.dip, self.length, 

2351 self.width, self.depth, self.nucleation_x, 

2352 self.nucleation_y, lat=self.lat, 

2353 lon=self.lon, north_shift=self.north_shift, 

2354 east_shift=self.east_shift, cs=cs) 

2355 return coords 

2356 

2357 def pyrocko_moment_tensor(self, store=None, target=None): 

2358 return pmt.MomentTensor( 

2359 strike=self.strike, 

2360 dip=self.dip, 

2361 rake=self.rake, 

2362 scalar_moment=self.get_moment(store, target)) 

2363 

2364 def pyrocko_event(self, store=None, target=None, **kwargs): 

2365 return SourceWithDerivedMagnitude.pyrocko_event( 

2366 self, store, target, 

2367 **kwargs) 

2368 

2369 @classmethod 

2370 def from_pyrocko_event(cls, ev, **kwargs): 

2371 d = {} 

2372 mt = ev.moment_tensor 

2373 if mt: 

2374 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2375 d.update( 

2376 strike=float(strike), 

2377 dip=float(dip), 

2378 rake=float(rake), 

2379 magnitude=float(mt.moment_magnitude())) 

2380 

2381 d.update(kwargs) 

2382 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2383 

2384 

2385class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2386 ''' 

2387 Combined Eikonal and Okada quasi-dynamic rupture model. 

2388 

2389 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2390 Note: attribute `stf` is not used so far, but kept for future applications. 

2391 ''' 

2392 

2393 discretized_source_class = meta.DiscretizedMTSource 

2394 

2395 strike = Float.T( 

2396 default=0.0, 

2397 help='Strike direction in [deg], measured clockwise from north.') 

2398 

2399 dip = Float.T( 

2400 default=0.0, 

2401 help='Dip angle in [deg], measured downward from horizontal.') 

2402 

2403 length = Float.T( 

2404 default=10. * km, 

2405 help='Length of rectangular source area in [m].') 

2406 

2407 width = Float.T( 

2408 default=5. * km, 

2409 help='Width of rectangular source area in [m].') 

2410 

2411 anchor = StringChoice.T( 

2412 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2413 'bottom_left', 'bottom_right'], 

2414 default='center', 

2415 optional=True, 

2416 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2417 'bottom, top_left, top_right, bottom_left, ' 

2418 'bottom_right, center_left, center_right``.') 

2419 

2420 nucleation_x__ = Array.T( 

2421 default=num.array([0.]), 

2422 dtype=num.float, 

2423 serialize_as='list', 

2424 help='Horizontal position of rupture nucleation in normalized fault ' 

2425 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2426 

2427 nucleation_y__ = Array.T( 

2428 default=num.array([0.]), 

2429 dtype=num.float, 

2430 serialize_as='list', 

2431 help='Down-dip position of rupture nucleation in normalized fault ' 

2432 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2433 

2434 nucleation_time__ = Array.T( 

2435 optional=True, 

2436 help='Time in [s] after origin, when nucleation points defined by ' 

2437 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2438 dtype=num.float, 

2439 serialize_as='list') 

2440 

2441 gamma = Float.T( 

2442 default=0.8, 

2443 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2444 r':math:`v_r = \gamma * v_s`.') 

2445 

2446 nx = Int.T( 

2447 default=2, 

2448 help='Number of discrete source patches in x direction (along ' 

2449 'strike).') 

2450 

2451 ny = Int.T( 

2452 default=2, 

2453 help='Number of discrete source patches in y direction (down dip).') 

2454 

2455 slip = Float.T( 

2456 optional=True, 

2457 help='Maximum slip of the rectangular source [m]. ' 

2458 'Setting the slip the tractions/stress field ' 

2459 'will be normalized to accomodate the desired maximum slip.') 

2460 

2461 rake = Float.T( 

2462 optional=True, 

2463 help='Rake angle in [deg], ' 

2464 'measured counter-clockwise from right-horizontal ' 

2465 'in on-plane view. Rake is translated into homogenous tractions ' 

2466 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2467 'with tractions parameter.') 

2468 

2469 patches = List.T( 

2470 OkadaSource.T(), 

2471 optional=True, 

2472 help='List of all boundary elements/sub faults/fault patches.') 

2473 

2474 patch_mask__ = Array.T( 

2475 dtype=num.bool, 

2476 serialize_as='list', 

2477 shape=(None,), 

2478 optional=True, 

2479 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2480 'leaves the patch in the calculation, False excludes the patch.') 

2481 

2482 tractions = TractionField.T( 

2483 optional=True, 

2484 help='Traction field the rupture plane is exposed to. See the' 

2485 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2486 'If ``tractions=None`` and ``rake`` is given' 

2487 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2488 ' be used.') 

2489 

2490 coef_mat = Array.T( 

2491 optional=True, 

2492 help='Coefficient matrix linking traction and dislocation field.', 

2493 dtype=num.float, 

2494 shape=(None, None)) 

2495 

2496 eikonal_decimation = Int.T( 

2497 optional=True, 

2498 default=1, 

2499 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2500 ' increase the accuracy of rupture front calculation but' 

2501 ' increases also the computation time.') 

2502 

2503 decimation_factor = Int.T( 

2504 optional=True, 

2505 default=1, 

2506 help='Sub-source decimation factor, a larger decimation will' 

2507 ' make the result inaccurate but shorten the necessary' 

2508 ' computation time (use for testing puposes only).') 

2509 

2510 nthreads = Int.T( 

2511 optional=True, 

2512 default=1, 

2513 help='Number of threads for Okada forward modelling, ' 

2514 'matrix inversion and calculation of point subsources. ' 

2515 'Note: for small/medium matrices 1 thread is most efficient.') 

2516 

2517 pure_shear = Bool.T( 

2518 optional=True, 

2519 default=False, 

2520 help='Calculate only shear tractions and omit tensile tractions.') 

2521 

2522 smooth_rupture = Bool.T( 

2523 default=True, 

2524 help='Smooth the tractions by weighting partially ruptured' 

2525 ' fault patches.') 

2526 

2527 aggressive_oversampling = Bool.T( 

2528 default=False, 

2529 help='Aggressive oversampling for basesource discretization. ' 

2530 'When using \'multilinear\' interpolation oversampling has' 

2531 ' practically no effect.') 

2532 

2533 def __init__(self, **kwargs): 

2534 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2535 self._interpolators = {} 

2536 self.check_conflicts() 

2537 

2538 @property 

2539 def nucleation_x(self): 

2540 return self.nucleation_x__ 

2541 

2542 @nucleation_x.setter 

2543 def nucleation_x(self, nucleation_x): 

2544 if isinstance(nucleation_x, list): 

2545 nucleation_x = num.array(nucleation_x) 

2546 

2547 elif not isinstance( 

2548 nucleation_x, num.ndarray) and nucleation_x is not None: 

2549 

2550 nucleation_x = num.array([nucleation_x]) 

2551 self.nucleation_x__ = nucleation_x 

2552 

2553 @property 

2554 def nucleation_y(self): 

2555 return self.nucleation_y__ 

2556 

2557 @nucleation_y.setter 

2558 def nucleation_y(self, nucleation_y): 

2559 if isinstance(nucleation_y, list): 

2560 nucleation_y = num.array(nucleation_y) 

2561 

2562 elif not isinstance(nucleation_y, num.ndarray) \ 

2563 and nucleation_y is not None: 

2564 nucleation_y = num.array([nucleation_y]) 

2565 

2566 self.nucleation_y__ = nucleation_y 

2567 

2568 @property 

2569 def nucleation(self): 

2570 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2571 

2572 if (nucl_x is None) or (nucl_y is None): 

2573 return None 

2574 

2575 assert nucl_x.shape[0] == nucl_y.shape[0] 

2576 

2577 return num.concatenate( 

2578 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2579 

2580 @nucleation.setter 

2581 def nucleation(self, nucleation): 

2582 if isinstance(nucleation, list): 

2583 nucleation = num.array(nucleation) 

2584 

2585 assert nucleation.shape[1] == 2 

2586 

2587 self.nucleation_x = nucleation[:, 0] 

2588 self.nucleation_y = nucleation[:, 1] 

2589 

2590 @property 

2591 def nucleation_time(self): 

2592 return self.nucleation_time__ 

2593 

2594 @nucleation_time.setter 

2595 def nucleation_time(self, nucleation_time): 

2596 if not isinstance(nucleation_time, num.ndarray) \ 

2597 and nucleation_time is not None: 

2598 nucleation_time = num.array([nucleation_time]) 

2599 

2600 self.nucleation_time__ = nucleation_time 

2601 

2602 @property 

2603 def patch_mask(self): 

2604 if (self.patch_mask__ is not None and 

2605 self.patch_mask__.shape == (self.nx * self.ny,)): 

2606 

2607 return self.patch_mask__ 

2608 else: 

2609 return num.ones(self.nx * self.ny, dtype=bool) 

2610 

2611 @patch_mask.setter 

2612 def patch_mask(self, patch_mask): 

2613 if isinstance(patch_mask, list): 

2614 patch_mask = num.array(patch_mask) 

2615 

2616 self.patch_mask__ = patch_mask 

2617 

2618 def get_tractions(self): 

2619 ''' 

2620 Get source traction vectors. 

2621 

2622 If :py:attr:`rake` is given, unit length directed traction vectors 

2623 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2624 else the given :py:attr:`tractions` are used. 

2625 

2626 :returns: 

2627 Traction vectors per patch. 

2628 :rtype: 

2629 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2630 ''' 

2631 

2632 if self.rake is not None: 

2633 if num.isnan(self.rake): 

2634 raise ValueError('Rake must be a real number, not NaN.') 

2635 

2636 logger.warning( 

2637 'Tractions are derived based on the given source rake.') 

2638 tractions = DirectedTractions(rake=self.rake) 

2639 else: 

2640 tractions = self.tractions 

2641 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2642 

2643 def base_key(self): 

2644 return SourceWithDerivedMagnitude.base_key(self) + ( 

2645 self.slip, 

2646 self.strike, 

2647 self.dip, 

2648 self.rake, 

2649 self.length, 

2650 self.width, 

2651 float(self.nucleation_x.mean()), 

2652 float(self.nucleation_y.mean()), 

2653 self.decimation_factor, 

2654 self.anchor, 

2655 self.pure_shear, 

2656 self.gamma, 

2657 tuple(self.patch_mask)) 

2658 

2659 def check_conflicts(self): 

2660 if self.tractions and self.rake: 

2661 raise AttributeError( 

2662 'Tractions and rake are mutually exclusive.') 

2663 if self.tractions is None and self.rake is None: 

2664 self.rake = 0. 

2665 

2666 def get_magnitude(self, store=None, target=None): 

2667 self.check_conflicts() 

2668 if self.slip is not None or self.tractions is not None: 

2669 if store is None: 

2670 raise DerivedMagnitudeError( 

2671 'Magnitude for a rectangular source with slip or ' 

2672 'tractions defined can only be derived when earth model ' 

2673 'is set.') 

2674 

2675 moment_rate, calc_times = self.discretize_basesource( 

2676 store, target=target).get_moment_rate(store.config.deltat) 

2677 

2678 deltat = num.concatenate(( 

2679 (num.diff(calc_times)[0],), 

2680 num.diff(calc_times))) 

2681 

2682 return float(pmt.moment_to_magnitude( 

2683 num.sum(moment_rate * deltat))) 

2684 

2685 else: 

2686 return float(pmt.moment_to_magnitude(1.0)) 

2687 

2688 def get_factor(self): 

2689 return 1.0 

2690 

2691 def outline(self, cs='xyz'): 

2692 ''' 

2693 Get source outline corner coordinates. 

2694 

2695 :param cs: 

2696 :ref:`Output coordinate system <coordinate-system-names>`. 

2697 :type cs: 

2698 optional, str 

2699 

2700 :returns: 

2701 Corner points in desired coordinate system. 

2702 :rtype: 

2703 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2704 ''' 

2705 points = outline_rect_source(self.strike, self.dip, self.length, 

2706 self.width, self.anchor) 

2707 

2708 points[:, 0] += self.north_shift 

2709 points[:, 1] += self.east_shift 

2710 points[:, 2] += self.depth 

2711 if cs == 'xyz': 

2712 return points 

2713 elif cs == 'xy': 

2714 return points[:, :2] 

2715 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2716 latlon = ne_to_latlon( 

2717 self.lat, self.lon, points[:, 0], points[:, 1]) 

2718 

2719 latlon = num.array(latlon).T 

2720 if cs == 'latlon': 

2721 return latlon 

2722 elif cs == 'lonlat': 

2723 return latlon[:, ::-1] 

2724 else: 

2725 return num.concatenate( 

2726 (latlon, points[:, 2].reshape((len(points), 1))), 

2727 axis=1) 

2728 

2729 def points_on_source(self, cs='xyz', **kwargs): 

2730 ''' 

2731 Convert relative plane coordinates to geographical coordinates. 

2732 

2733 Given x and y coordinates (relative source coordinates between -1. 

2734 and 1.) are converted to desired geographical coordinates. Coordinates 

2735 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2736 and ``points_y``. 

2737 

2738 :param cs: 

2739 :ref:`Output coordinate system <coordinate-system-names>`. 

2740 :type cs: 

2741 optional, str 

2742 

2743 :returns: 

2744 Point coordinates in desired coordinate system. 

2745 :rtype: 

2746 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2747 ''' 

2748 points = points_on_rect_source( 

2749 self.strike, self.dip, self.length, self.width, 

2750 self.anchor, **kwargs) 

2751 

2752 points[:, 0] += self.north_shift 

2753 points[:, 1] += self.east_shift 

2754 points[:, 2] += self.depth 

2755 if cs == 'xyz': 

2756 return points 

2757 elif cs == 'xy': 

2758 return points[:, :2] 

2759 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2760 latlon = ne_to_latlon( 

2761 self.lat, self.lon, points[:, 0], points[:, 1]) 

2762 

2763 latlon = num.array(latlon).T 

2764 if cs == 'latlon': 

2765 return latlon 

2766 elif cs == 'lonlat': 

2767 return latlon[:, ::-1] 

2768 else: 

2769 return num.concatenate( 

2770 (latlon, points[:, 2].reshape((len(points), 1))), 

2771 axis=1) 

2772 

2773 def pyrocko_moment_tensor(self, store=None, target=None): 

2774 if store is not None: 

2775 if not self.patches: 

2776 self.discretize_patches(store) 

2777 

2778 data = self.get_slip() 

2779 else: 

2780 data = self.get_tractions() 

2781 

2782 weights = num.linalg.norm(data, axis=1) 

2783 weights /= weights.sum() 

2784 

2785 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2786 rake = num.average(rakes, weights=weights) 

2787 

2788 return pmt.MomentTensor( 

2789 strike=self.strike, 

2790 dip=self.dip, 

2791 rake=rake, 

2792 scalar_moment=self.get_moment(store, target)) 

2793 

2794 def pyrocko_event(self, store=None, target=None, **kwargs): 

2795 return SourceWithDerivedMagnitude.pyrocko_event( 

2796 self, store, target, 

2797 **kwargs) 

2798 

2799 @classmethod 

2800 def from_pyrocko_event(cls, ev, **kwargs): 

2801 d = {} 

2802 mt = ev.moment_tensor 

2803 if mt: 

2804 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2805 d.update( 

2806 strike=float(strike), 

2807 dip=float(dip), 

2808 rake=float(rake)) 

2809 

2810 d.update(kwargs) 

2811 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2812 

2813 def _discretize_points(self, store, *args, **kwargs): 

2814 ''' 

2815 Discretize source plane with equal vertical and horizontal spacing. 

2816 

2817 Additional ``*args`` and ``**kwargs`` are passed to 

2818 :py:meth:`points_on_source`. 

2819 

2820 :param store: 

2821 Green's function database (needs to cover whole region of the 

2822 source). 

2823 :type store: 

2824 :py:class:`~pyrocko.gf.store.Store` 

2825 

2826 :returns: 

2827 Number of points in strike and dip direction, distance 

2828 between adjacent points, coordinates (latlondepth) and coordinates 

2829 (xy on fault) for discrete points. 

2830 :rtype: 

2831 (int, int, float, :py:class:`~numpy.ndarray`, 

2832 :py:class:`~numpy.ndarray`). 

2833 ''' 

2834 anch_x, anch_y = map_anchor[self.anchor] 

2835 

2836 npoints = int(self.width // km) + 1 

2837 points = num.zeros((npoints, 3)) 

2838 points[:, 1] = num.linspace(-1., 1., npoints) 

2839 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2840 

2841 rotmat = num.asarray( 

2842 pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)) 

2843 points = num.dot(rotmat.T, points.T).T 

2844 points[:, 2] += self.depth 

2845 

2846 vs_min = store.config.get_vs( 

2847 self.lat, self.lon, points, 

2848 interpolation='nearest_neighbor') 

2849 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2850 

2851 oversampling = 10. 

2852 delta_l = self.length / (self.nx * oversampling) 

2853 delta_w = self.width / (self.ny * oversampling) 

2854 

2855 delta = self.eikonal_decimation * num.min([ 

2856 store.config.deltat * vr_min / oversampling, 

2857 delta_l, delta_w] + [ 

2858 deltas for deltas in store.config.deltas]) 

2859 

2860 delta = delta_w / num.ceil(delta_w / delta) 

2861 

2862 nx = int(num.ceil(self.length / delta)) + 1 

2863 ny = int(num.ceil(self.width / delta)) + 1 

2864 

2865 rem_l = (nx-1)*delta - self.length 

2866 lim_x = rem_l / self.length 

2867 

2868 points_xy = num.zeros((nx * ny, 2)) 

2869 points_xy[:, 0] = num.repeat( 

2870 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2871 points_xy[:, 1] = num.tile( 

2872 num.linspace(-1., 1., ny), nx) 

2873 

2874 points = self.points_on_source( 

2875 points_x=points_xy[:, 0], 

2876 points_y=points_xy[:, 1], 

2877 **kwargs) 

2878 

2879 return nx, ny, delta, points, points_xy 

2880 

2881 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2882 points=None): 

2883 ''' 

2884 Get rupture velocity for discrete points on source plane. 

2885 

2886 :param store: 

2887 Green's function database (needs to cover the whole region of the 

2888 source) 

2889 :type store: 

2890 optional, :py:class:`~pyrocko.gf.store.Store` 

2891 

2892 :param interpolation: 

2893 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2894 and ``'multilinear'``). 

2895 :type interpolation: 

2896 optional, str 

2897 

2898 :param points: 

2899 Coordinates on fault (-1.:1.) of discrete points. 

2900 :type points: 

2901 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2902 

2903 :returns: 

2904 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2905 points. 

2906 :rtype: 

2907 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2908 ''' 

2909 

2910 if points is None: 

2911 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2912 

2913 return store.config.get_vs( 

2914 self.lat, self.lon, 

2915 points=points, 

2916 interpolation=interpolation) * self.gamma 

2917 

2918 def discretize_time( 

2919 self, store, interpolation='nearest_neighbor', 

2920 vr=None, times=None, *args, **kwargs): 

2921 ''' 

2922 Get rupture start time for discrete points on source plane. 

2923 

2924 :param store: 

2925 Green's function database (needs to cover whole region of the 

2926 source) 

2927 :type store: 

2928 :py:class:`~pyrocko.gf.store.Store` 

2929 

2930 :param interpolation: 

2931 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2932 and ``'multilinear'``). 

2933 :type interpolation: 

2934 optional, str 

2935 

2936 :param vr: 

2937 Array, containing rupture user defined rupture velocity values. 

2938 :type vr: 

2939 optional, :py:class:`~numpy.ndarray` 

2940 

2941 :param times: 

2942 Array, containing zeros, where rupture is starting, real positive 

2943 numbers at later secondary nucleation points and -1, where time 

2944 will be calculated. If not given, rupture starts at nucleation_x, 

2945 nucleation_y. Times are given for discrete points with equal 

2946 horizontal and vertical spacing. 

2947 :type times: 

2948 optional, :py:class:`~numpy.ndarray` 

2949 

2950 :returns: 

2951 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

2952 rupture propagation time of discrete points. 

2953 :rtype: 

2954 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

2955 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

2956 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

2957 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

2958 ''' 

2959 nx, ny, delta, points, points_xy = self._discretize_points( 

2960 store, cs='xyz') 

2961 

2962 if vr is None or vr.shape != tuple((nx, ny)): 

2963 if vr: 

2964 logger.warning( 

2965 'Given rupture velocities are not in right shape: ' 

2966 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

2967 vr = self._discretize_rupture_v(store, interpolation, points)\ 

2968 .reshape(nx, ny) 

2969 

2970 if vr.shape != tuple((nx, ny)): 

2971 logger.warning( 

2972 'Given rupture velocities are not in right shape. Therefore' 

2973 ' standard rupture velocity array is used.') 

2974 

2975 def initialize_times(): 

2976 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2977 

2978 if nucl_x.shape != nucl_y.shape: 

2979 raise ValueError( 

2980 'Nucleation coordinates have different shape.') 

2981 

2982 dist_points = num.array([ 

2983 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

2984 for x, y in zip(nucl_x, nucl_y)]) 

2985 nucl_indices = num.argmin(dist_points, axis=1) 

2986 

2987 if self.nucleation_time is None: 

2988 nucl_times = num.zeros_like(nucl_indices) 

2989 else: 

2990 if self.nucleation_time.shape == nucl_x.shape: 

2991 nucl_times = self.nucleation_time 

2992 else: 

2993 raise ValueError( 

2994 'Nucleation coordinates and times have different ' 

2995 'shapes') 

2996 

2997 t = num.full(nx * ny, -1.) 

2998 t[nucl_indices] = nucl_times 

2999 return t.reshape(nx, ny) 

3000 

3001 if times is None: 

3002 times = initialize_times() 

3003 elif times.shape != tuple((nx, ny)): 

3004 times = initialize_times() 

3005 logger.warning( 

3006 'Given times are not in right shape. Therefore standard time' 

3007 ' array is used.') 

3008 

3009 eikonal_ext.eikonal_solver_fmm_cartesian( 

3010 speeds=vr, times=times, delta=delta) 

3011 

3012 return points, points_xy, vr, times 

3013 

3014 def get_vr_time_interpolators( 

3015 self, store, interpolation='nearest_neighbor', force=False, 

3016 **kwargs): 

3017 ''' 

3018 Get interpolators for rupture velocity and rupture time. 

3019 

3020 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3021 

3022 :param store: 

3023 Green's function database (needs to cover whole region of the 

3024 source). 

3025 :type store: 

3026 :py:class:`~pyrocko.gf.store.Store` 

3027 

3028 :param interpolation: 

3029 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3030 and ``'multilinear'``). 

3031 :type interpolation: 

3032 optional, str 

3033 

3034 :param force: 

3035 Force recalculation of the interpolators (e.g. after change of 

3036 nucleation point locations/times). Default is ``False``. 

3037 :type force: 

3038 optional, bool 

3039 ''' 

3040 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3041 if interpolation not in interp_map: 

3042 raise TypeError( 

3043 'Interpolation method %s not available' % interpolation) 

3044 

3045 if not self._interpolators.get(interpolation, False) or force: 

3046 _, points_xy, vr, times = self.discretize_time( 

3047 store, **kwargs) 

3048 

3049 if self.length <= 0.: 

3050 raise ValueError( 

3051 'length must be larger then 0. not %g' % self.length) 

3052 

3053 if self.width <= 0.: 

3054 raise ValueError( 

3055 'width must be larger then 0. not %g' % self.width) 

3056 

3057 nx, ny = times.shape 

3058 anch_x, anch_y = map_anchor[self.anchor] 

3059 

3060 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3061 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3062 

3063 self._interpolators[interpolation] = ( 

3064 nx, ny, times, vr, 

3065 RegularGridInterpolator( 

3066 (points_xy[::ny, 0], points_xy[:ny, 1]), times, 

3067 method=interp_map[interpolation]), 

3068 RegularGridInterpolator( 

3069 (points_xy[::ny, 0], points_xy[:ny, 1]), vr, 

3070 method=interp_map[interpolation])) 

3071 return self._interpolators[interpolation] 

3072 

3073 def discretize_patches( 

3074 self, store, interpolation='nearest_neighbor', force=False, 

3075 grid_shape=(), 

3076 **kwargs): 

3077 ''' 

3078 Get rupture start time and OkadaSource elements for points on rupture. 

3079 

3080 All source elements and their corresponding center points are 

3081 calculated and stored in the :py:attr:`patches` attribute. 

3082 

3083 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3084 

3085 :param store: 

3086 Green's function database (needs to cover whole region of the 

3087 source). 

3088 :type store: 

3089 :py:class:`~pyrocko.gf.store.Store` 

3090 

3091 :param interpolation: 

3092 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3093 and ``'multilinear'``). 

3094 :type interpolation: 

3095 optional, str 

3096 

3097 :param force: 

3098 Force recalculation of the vr and time interpolators ( e.g. after 

3099 change of nucleation point locations/times). Default is ``False``. 

3100 :type force: 

3101 optional, bool 

3102 

3103 :param grid_shape: 

3104 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3105 or grid_shape should be set. 

3106 :type grid_shape: 

3107 optional, tuple of int 

3108 ''' 

3109 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3110 self.get_vr_time_interpolators( 

3111 store, 

3112 interpolation=interpolation, force=force, **kwargs) 

3113 anch_x, anch_y = map_anchor[self.anchor] 

3114 

3115 al = self.length / 2. 

3116 aw = self.width / 2. 

3117 al1 = -(al + anch_x * al) 

3118 al2 = al - anch_x * al 

3119 aw1 = -aw + anch_y * aw 

3120 aw2 = aw + anch_y * aw 

3121 assert num.abs([al1, al2]).sum() == self.length 

3122 assert num.abs([aw1, aw2]).sum() == self.width 

3123 

3124 def get_lame(*a, **kw): 

3125 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3126 lamb = store.config.get_vp(*a, **kw)**2 \ 

3127 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3128 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3129 

3130 shear_mod, poisson = get_lame( 

3131 self.lat, self.lon, 

3132 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3133 interpolation=interpolation) 

3134 

3135 okada_src = OkadaSource( 

3136 lat=self.lat, lon=self.lon, 

3137 strike=self.strike, dip=self.dip, 

3138 north_shift=self.north_shift, east_shift=self.east_shift, 

3139 depth=self.depth, 

3140 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3141 poisson=poisson.mean(), 

3142 shearmod=shear_mod.mean(), 

3143 opening=kwargs.get('opening', 0.)) 

3144 

3145 if not (self.nx and self.ny): 

3146 if grid_shape: 

3147 self.nx, self.ny = grid_shape 

3148 else: 

3149 self.nx = nx 

3150 self.ny = ny 

3151 

3152 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3153 

3154 shear_mod, poisson = get_lame( 

3155 self.lat, self.lon, 

3156 num.array([src.source_patch()[:3] for src in source_disc]), 

3157 interpolation=interpolation) 

3158 

3159 if (self.nx, self.ny) != (nx, ny): 

3160 times_interp = time_interpolator(source_points[:, :2]) 

3161 vr_interp = vr_interpolator(source_points[:, :2]) 

3162 else: 

3163 times_interp = times.T.ravel() 

3164 vr_interp = vr.T.ravel() 

3165 

3166 for isrc, src in enumerate(source_disc): 

3167 src.vr = vr_interp[isrc] 

3168 src.time = times_interp[isrc] + self.time 

3169 

3170 self.patches = source_disc 

3171 

3172 def discretize_basesource(self, store, target=None): 

3173 ''' 

3174 Prepare source for synthetic waveform calculation. 

3175 

3176 :param store: 

3177 Green's function database (needs to cover whole region of the 

3178 source). 

3179 :type store: 

3180 :py:class:`~pyrocko.gf.store.Store` 

3181 

3182 :param target: 

3183 Target information. 

3184 :type target: 

3185 optional, :py:class:`~pyrocko.gf.targets.Target` 

3186 

3187 :returns: 

3188 Source discretized by a set of moment tensors and times. 

3189 :rtype: 

3190 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3191 ''' 

3192 if not target: 

3193 interpolation = 'nearest_neighbor' 

3194 else: 

3195 interpolation = target.interpolation 

3196 

3197 if not self.patches: 

3198 self.discretize_patches(store, interpolation) 

3199 

3200 if self.coef_mat is None: 

3201 self.calc_coef_mat() 

3202 

3203 delta_slip, slip_times = self.get_delta_slip(store) 

3204 npatches = self.nx * self.ny 

3205 ntimes = slip_times.size 

3206 

3207 anch_x, anch_y = map_anchor[self.anchor] 

3208 

3209 pln = self.length / self.nx 

3210 pwd = self.width / self.ny 

3211 

3212 patch_coords = num.array([ 

3213 (p.ix, p.iy) 

3214 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3215 

3216 # boundary condition is zero-slip 

3217 # is not valid to avoid unwished interpolation effects 

3218 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3219 slip_grid[1:-1, 1:-1, :, :] = \ 

3220 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3221 

3222 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3223 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3224 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3225 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3226 

3227 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3228 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3229 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3230 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3231 

3232 def make_grid(patch_parameter): 

3233 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3234 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3235 

3236 grid[0, 0] = grid[1, 1] 

3237 grid[0, -1] = grid[1, -2] 

3238 grid[-1, 0] = grid[-2, 1] 

3239 grid[-1, -1] = grid[-2, -2] 

3240 

3241 grid[1:-1, 0] = grid[1:-1, 1] 

3242 grid[1:-1, -1] = grid[1:-1, -2] 

3243 grid[0, 1:-1] = grid[1, 1:-1] 

3244 grid[-1, 1:-1] = grid[-2, 1:-1] 

3245 

3246 return grid 

3247 

3248 lamb = self.get_patch_attribute('lamb') 

3249 mu = self.get_patch_attribute('shearmod') 

3250 

3251 lamb_grid = make_grid(lamb) 

3252 mu_grid = make_grid(mu) 

3253 

3254 coords_x = num.zeros(self.nx + 2) 

3255 coords_x[1:-1] = patch_coords[:, 0, 0] 

3256 coords_x[0] = coords_x[1] - pln / 2 

3257 coords_x[-1] = coords_x[-2] + pln / 2 

3258 

3259 coords_y = num.zeros(self.ny + 2) 

3260 coords_y[1:-1] = patch_coords[0, :, 1] 

3261 coords_y[0] = coords_y[1] - pwd / 2 

3262 coords_y[-1] = coords_y[-2] + pwd / 2 

3263 

3264 slip_interp = RegularGridInterpolator( 

3265 (coords_x, coords_y, slip_times), 

3266 slip_grid, method='nearest') 

3267 

3268 lamb_interp = RegularGridInterpolator( 

3269 (coords_x, coords_y), 

3270 lamb_grid, method='nearest') 

3271 

3272 mu_interp = RegularGridInterpolator( 

3273 (coords_x, coords_y), 

3274 mu_grid, method='nearest') 

3275 

3276 # discretize basesources 

3277 mindeltagf = min(tuple( 

3278 (self.length / self.nx, self.width / self.ny) + 

3279 tuple(store.config.deltas))) 

3280 

3281 nl = int((1. / self.decimation_factor) * 

3282 num.ceil(pln / mindeltagf)) + 1 

3283 nw = int((1. / self.decimation_factor) * 

3284 num.ceil(pwd / mindeltagf)) + 1 

3285 nsrc_patch = int(nl * nw) 

3286 dl = pln / nl 

3287 dw = pwd / nw 

3288 

3289 patch_area = dl * dw 

3290 

3291 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3292 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3293 

3294 base_coords = num.zeros((nsrc_patch, 3), dtype=num.float) 

3295 base_coords[:, 0] = num.tile(xl, nw) 

3296 base_coords[:, 1] = num.repeat(xw, nl) 

3297 base_coords = num.tile(base_coords, (npatches, 1)) 

3298 

3299 center_coords = num.zeros((npatches, 3)) 

3300 center_coords[:, 0] = num.repeat( 

3301 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3302 center_coords[:, 1] = num.tile( 

3303 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3304 

3305 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3306 nbaselocs = base_coords.shape[0] 

3307 

3308 base_interp = base_coords.repeat(ntimes, axis=0) 

3309 

3310 base_times = num.tile(slip_times, nbaselocs) 

3311 base_interp[:, 0] -= anch_x * self.length / 2 

3312 base_interp[:, 1] -= anch_y * self.width / 2 

3313 base_interp[:, 2] = base_times 

3314 

3315 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3316 store, interpolation=interpolation) 

3317 

3318 time_eikonal_max = time_interpolator.values.max() 

3319 

3320 nbasesrcs = base_interp.shape[0] 

3321 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3322 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3323 mu = mu_interp(base_interp[:, :2]).ravel() 

3324 

3325 if False: 

3326 try: 

3327 import matplotlib.pyplot as plt 

3328 coords = base_coords.copy() 

3329 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3330 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3331 plt.show() 

3332 except AttributeError: 

3333 pass 

3334 

3335 base_interp[:, 2] = 0. 

3336 rotmat = num.asarray( 

3337 pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)) 

3338 base_interp = num.dot(rotmat.T, base_interp.T).T 

3339 base_interp[:, 0] += self.north_shift 

3340 base_interp[:, 1] += self.east_shift 

3341 base_interp[:, 2] += self.depth 

3342 

3343 slip_strike = delta_slip[:, :, 0].ravel() 

3344 slip_dip = delta_slip[:, :, 1].ravel() 

3345 slip_norm = delta_slip[:, :, 2].ravel() 

3346 

3347 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3348 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3349 

3350 m6s = okada_ext.patch2m6( 

3351 strikes=num.full(nbasesrcs, self.strike, dtype=num.float), 

3352 dips=num.full(nbasesrcs, self.dip, dtype=num.float), 

3353 rakes=slip_rake, 

3354 disl_shear=slip_shear, 

3355 disl_norm=slip_norm, 

3356 lamb=lamb, 

3357 mu=mu, 

3358 nthreads=self.nthreads) 

3359 

3360 m6s *= patch_area 

3361 

3362 dl = -self.patches[0].al1 + self.patches[0].al2 

3363 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3364 

3365 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3366 

3367 ds = meta.DiscretizedMTSource( 

3368 lat=self.lat, 

3369 lon=self.lon, 

3370 times=base_times + self.time, 

3371 north_shifts=base_interp[:, 0], 

3372 east_shifts=base_interp[:, 1], 

3373 depths=base_interp[:, 2], 

3374 m6s=m6s, 

3375 dl=dl, 

3376 dw=dw, 

3377 nl=self.nx, 

3378 nw=self.ny) 

3379 

3380 return ds 

3381 

3382 def calc_coef_mat(self): 

3383 ''' 

3384 Calculate coefficients connecting tractions and dislocations. 

3385 ''' 

3386 if not self.patches: 

3387 raise ValueError( 

3388 'Patches are needed. Please calculate them first.') 

3389 

3390 self.coef_mat = make_okada_coefficient_matrix( 

3391 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3392 

3393 def get_patch_attribute(self, attr): 

3394 ''' 

3395 Get patch attributes. 

3396 

3397 :param attr: 

3398 Name of selected attribute (see 

3399 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3400 :type attr: 

3401 str 

3402 

3403 :returns: 

3404 Array with attribute value for each fault patch. 

3405 :rtype: 

3406 :py:class:`~numpy.ndarray` 

3407 

3408 ''' 

3409 if not self.patches: 

3410 raise ValueError( 

3411 'Patches are needed. Please calculate them first.') 

3412 return num.array([getattr(p, attr) for p in self.patches]) 

3413 

3414 def get_slip( 

3415 self, 

3416 time=None, 

3417 scale_slip=True, 

3418 interpolation='nearest_neighbor', 

3419 **kwargs): 

3420 ''' 

3421 Get slip per subfault patch for given time after rupture start. 

3422 

3423 :param time: 

3424 Time after origin [s], for which slip is computed. If not 

3425 given, final static slip is returned. 

3426 :type time: 

3427 optional, float > 0. 

3428 

3429 :param scale_slip: 

3430 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3431 to fit the given maximum slip. 

3432 :type scale_slip: 

3433 optional, bool 

3434 

3435 :param interpolation: 

3436 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3437 and ``'multilinear'``). 

3438 :type interpolation: 

3439 optional, str 

3440 

3441 :returns: 

3442 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3443 for each source patch. 

3444 :rtype: 

3445 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3446 ''' 

3447 

3448 if self.patches is None: 

3449 raise ValueError( 

3450 'Please discretize the source first (discretize_patches())') 

3451 npatches = len(self.patches) 

3452 tractions = self.get_tractions() 

3453 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3454 

3455 time_patch = time 

3456 if time is None: 

3457 time_patch = time_patch_max 

3458 

3459 if self.coef_mat is None: 

3460 self.calc_coef_mat() 

3461 

3462 if tractions.shape != (npatches, 3): 

3463 raise AttributeError( 

3464 'The traction vector is of invalid shape.' 

3465 ' Required shape is (npatches, 3)') 

3466 

3467 patch_mask = num.ones(npatches, dtype=num.bool) 

3468 if self.patch_mask is not None: 

3469 patch_mask = self.patch_mask 

3470 

3471 times = self.get_patch_attribute('time') - self.time 

3472 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3473 relevant_sources = num.nonzero(times <= time_patch)[0] 

3474 disloc_est = num.zeros_like(tractions) 

3475 

3476 if self.smooth_rupture: 

3477 patch_activation = num.zeros(npatches) 

3478 

3479 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3480 self.get_vr_time_interpolators( 

3481 store, interpolation=interpolation) 

3482 

3483 # Getting the native Eikonal grid, bit hackish 

3484 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3485 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3486 times_eikonal = time_interpolator.values 

3487 

3488 time_max = time 

3489 if time is None: 

3490 time_max = times_eikonal.max() 

3491 

3492 for ip, p in enumerate(self.patches): 

3493 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3494 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3495 

3496 idx_length = num.logical_and( 

3497 points_x >= ul[0], points_x <= lr[0]) 

3498 idx_width = num.logical_and( 

3499 points_y >= ul[1], points_y <= lr[1]) 

3500 

3501 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3502 if times_patch.size == 0: 

3503 raise AttributeError('could not use smooth_rupture') 

3504 

3505 patch_activation[ip] = \ 

3506 (times_patch <= time_max).sum() / times_patch.size 

3507 

3508 if time_patch == 0 and time_patch != time_patch_max: 

3509 patch_activation[ip] = 0. 

3510 

3511 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3512 

3513 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3514 

3515 if relevant_sources.size == 0: 

3516 return disloc_est 

3517 

3518 indices_disl = num.repeat(relevant_sources * 3, 3) 

3519 indices_disl[1::3] += 1 

3520 indices_disl[2::3] += 2 

3521 

3522 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3523 stress_field=tractions[relevant_sources, :].ravel(), 

3524 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3525 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3526 epsilon=None, 

3527 **kwargs) 

3528 

3529 if self.smooth_rupture: 

3530 disloc_est *= patch_activation[:, num.newaxis] 

3531 

3532 if scale_slip and self.slip is not None: 

3533 disloc_tmax = num.zeros(npatches) 

3534 

3535 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3536 indices_disl[1::3] += 1 

3537 indices_disl[2::3] += 2 

3538 

3539 disloc_tmax[patch_mask] = num.linalg.norm( 

3540 invert_fault_dislocations_bem( 

3541 stress_field=tractions[patch_mask, :].ravel(), 

3542 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3543 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3544 epsilon=None, 

3545 **kwargs), axis=1) 

3546 

3547 disloc_tmax_max = disloc_tmax.max() 

3548 if disloc_tmax_max == 0.: 

3549 logger.warning( 

3550 'slip scaling not performed. Maximum slip is 0.') 

3551 

3552 disloc_est *= self.slip / disloc_tmax_max 

3553 

3554 return disloc_est 

3555 

3556 def get_delta_slip( 

3557 self, 

3558 store=None, 

3559 deltat=None, 

3560 delta=True, 

3561 interpolation='nearest_neighbor', 

3562 **kwargs): 

3563 ''' 

3564 Get slip change snapshots. 

3565 

3566 The time interval, within which the slip changes are computed is 

3567 determined by the sampling rate of the Green's function database or 

3568 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3569 

3570 :param store: 

3571 Green's function database (needs to cover whole region of of the 

3572 source). Its sampling interval is used as time increment for slip 

3573 difference calculation. Either ``deltat`` or ``store`` should be 

3574 given. 

3575 :type store: 

3576 optional, :py:class:`~pyrocko.gf.store.Store` 

3577 

3578 :param deltat: 

3579 Time interval for slip difference calculation [s]. Either 

3580 ``deltat`` or ``store`` should be given. 

3581 :type deltat: 

3582 optional, float 

3583 

3584 :param delta: 

3585 If ``True``, slip differences between two time steps are given. If 

3586 ``False``, cumulative slip for all time steps. 

3587 :type delta: 

3588 optional, bool 

3589 

3590 :param interpolation: 

3591 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3592 and ``'multilinear'``). 

3593 :type interpolation: 

3594 optional, str 

3595 

3596 :returns: 

3597 Displacement changes(:math:`\\Delta u_{strike}, 

3598 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3599 time; corner times, for which delta slip is computed. The order of 

3600 displacement changes array is: 

3601 

3602 .. math:: 

3603 

3604 &[[\\\\ 

3605 &[\\Delta u_{strike, patch1, t1}, 

3606 \\Delta u_{dip, patch1, t1}, 

3607 \\Delta u_{tensile, patch1, t1}],\\\\ 

3608 &[\\Delta u_{strike, patch1, t2}, 

3609 \\Delta u_{dip, patch1, t2}, 

3610 \\Delta u_{tensile, patch1, t2}]\\\\ 

3611 &], [\\\\ 

3612 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3613 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3614 

3615 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3616 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3617 ''' 

3618 if store and deltat: 

3619 raise AttributeError( 

3620 'Argument collision. ' 

3621 'Please define only the store or the deltat argument.') 

3622 

3623 if store: 

3624 deltat = store.config.deltat 

3625 

3626 if not deltat: 

3627 raise AttributeError('Please give a GF store or set deltat.') 

3628 

3629 npatches = len(self.patches) 

3630 

3631 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3632 store, interpolation=interpolation) 

3633 tmax = time_interpolator.values.max() 

3634 

3635 calc_times = num.arange(0., tmax + deltat, deltat) 

3636 calc_times[calc_times > tmax] = tmax 

3637 

3638 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3639 

3640 for itime, t in enumerate(calc_times): 

3641 disloc_est[:, itime, :] = self.get_slip( 

3642 time=t, scale_slip=False, **kwargs) 

3643 

3644 if self.slip: 

3645 disloc_tmax = num.linalg.norm( 

3646 self.get_slip(scale_slip=False, time=tmax), 

3647 axis=1) 

3648 

3649 disloc_tmax_max = disloc_tmax.max() 

3650 if disloc_tmax_max == 0.: 

3651 logger.warning( 

3652 'Slip scaling not performed. Maximum slip is 0.') 

3653 else: 

3654 disloc_est *= self.slip / disloc_tmax_max 

3655 

3656 if not delta: 

3657 return disloc_est, calc_times 

3658 

3659 # if we have only one timestep there is no gradient 

3660 if calc_times.size > 1: 

3661 disloc_init = disloc_est[:, 0, :] 

3662 disloc_est = num.diff(disloc_est, axis=1) 

3663 disloc_est = num.concatenate(( 

3664 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3665 

3666 calc_times = calc_times 

3667 

3668 return disloc_est, calc_times 

3669 

3670 def get_slip_rate(self, *args, **kwargs): 

3671 ''' 

3672 Get slip rate inverted from patches. 

3673 

3674 The time interval, within which the slip rates are computed is 

3675 determined by the sampling rate of the Green's function database or 

3676 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3677 :py:meth:`get_delta_slip`. 

3678 

3679 :returns: 

3680 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3681 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3682 for each source patch and time; corner times, for which slip rate 

3683 is computed. The order of sliprate array is: 

3684 

3685 .. math:: 

3686 

3687 &[[\\\\ 

3688 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3689 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3690 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3691 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3692 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3693 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3694 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3695 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3696 

3697 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3698 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3699 ''' 

3700 ddisloc_est, calc_times = self.get_delta_slip( 

3701 *args, delta=True, **kwargs) 

3702 

3703 dt = num.concatenate( 

3704 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3705 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3706 

3707 return slip_rate, calc_times 

3708 

3709 def get_moment_rate_patches(self, *args, **kwargs): 

3710 ''' 

3711 Get scalar seismic moment rate for each patch individually. 

3712 

3713 Additional ``*args`` and ``**kwargs`` are passed to 

3714 :py:meth:`get_slip_rate`. 

3715 

3716 :returns: 

3717 Seismic moment rate for each source patch and time; corner times, 

3718 for which patch moment rate is computed based on slip rate. The 

3719 order of the moment rate array is: 

3720 

3721 .. math:: 

3722 

3723 &[\\\\ 

3724 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3725 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3726 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3727 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3728 &[...]]\\\\ 

3729 

3730 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3731 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3732 ''' 

3733 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3734 

3735 shear_mod = self.get_patch_attribute('shearmod') 

3736 p_length = self.get_patch_attribute('length') 

3737 p_width = self.get_patch_attribute('width') 

3738 

3739 dA = p_length * p_width 

3740 

3741 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3742 

3743 return mom_rate, calc_times 

3744 

3745 def get_moment_rate(self, store, target=None, deltat=None): 

3746 ''' 

3747 Get seismic source moment rate for the total source (STF). 

3748 

3749 :param store: 

3750 Green's function database (needs to cover whole region of of the 

3751 source). Its ``deltat`` [s] is used as time increment for slip 

3752 difference calculation. Either ``deltat`` or ``store`` should be 

3753 given. 

3754 :type store: 

3755 :py:class:`~pyrocko.gf.store.Store` 

3756 

3757 :param target: 

3758 Target information, needed for interpolation method. 

3759 :type target: 

3760 optional, :py:class:`~pyrocko.gf.targets.Target` 

3761 

3762 :param deltat: 

3763 Time increment for slip difference calculation [s]. If not given 

3764 ``store.deltat`` is used. 

3765 :type deltat: 

3766 optional, float 

3767 

3768 :return: 

3769 Seismic moment rate [Nm/s] for each time; corner times, for which 

3770 moment rate is computed. The order of the moment rate array is: 

3771 

3772 .. math:: 

3773 

3774 &[\\\\ 

3775 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3776 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3777 &...]\\\\ 

3778 

3779 :rtype: 

3780 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3781 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3782 ''' 

3783 if not deltat: 

3784 deltat = store.config.deltat 

3785 return self.discretize_basesource( 

3786 store, target=target).get_moment_rate(deltat) 

3787 

3788 def get_moment(self, *args, **kwargs): 

3789 ''' 

3790 Get seismic cumulative moment. 

3791 

3792 Additional ``*args`` and ``**kwargs`` are passed to 

3793 :py:meth:`get_magnitude`. 

3794 

3795 :returns: 

3796 Cumulative seismic moment in [Nm]. 

3797 :rtype: 

3798 float 

3799 ''' 

3800 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3801 *args, **kwargs))) 

3802 

3803 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3804 ''' 

3805 Rescale source slip based on given target magnitude or seismic moment. 

3806 

3807 Rescale the maximum source slip to fit the source moment magnitude or 

3808 seismic moment to the given target values. Either ``magnitude`` or 

3809 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3810 :py:meth:`get_moment`. 

3811 

3812 :param magnitude: 

3813 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3814 [Hanks and Kanamori, 1979] 

3815 :type magnitude: 

3816 optional, float 

3817 

3818 :param moment: 

3819 Target seismic moment :math:`M_0` [Nm]. 

3820 :type moment: 

3821 optional, float 

3822 ''' 

3823 if self.slip is None: 

3824 self.slip = 1. 

3825 logger.warning('No slip found for rescaling. ' 

3826 'An initial slip of 1 m is assumed.') 

3827 

3828 if magnitude is None and moment is None: 

3829 raise ValueError( 

3830 'Either target magnitude or moment need to be given.') 

3831 

3832 moment_init = self.get_moment(**kwargs) 

3833 

3834 if magnitude is not None: 

3835 moment = pmt.magnitude_to_moment(magnitude) 

3836 

3837 self.slip *= moment / moment_init 

3838 

3839 def get_centroid(self, store, *args, **kwargs): 

3840 ''' 

3841 Centroid of the pseudo dynamic rupture model. 

3842 

3843 The centroid location and time are derived from the locations and times 

3844 of the individual patches weighted with their moment contribution. 

3845 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3846 

3847 :param store: 

3848 Green's function database (needs to cover whole region of of the 

3849 source). Its ``deltat`` [s] is used as time increment for slip 

3850 difference calculation. Either ``deltat`` or ``store`` should be 

3851 given. 

3852 :type store: 

3853 :py:class:`~pyrocko.gf.store.Store` 

3854 

3855 :returns: 

3856 The centroid location and associated moment tensor. 

3857 :rtype: 

3858 :py:class:`pyrocko.model.Event` 

3859 ''' 

3860 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3861 t_max = time.values.max() 

3862 

3863 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3864 

3865 moment = num.sum(moment_rate * times, axis=1) 

3866 weights = moment / moment.sum() 

3867 

3868 norths = self.get_patch_attribute('north_shift') 

3869 easts = self.get_patch_attribute('east_shift') 

3870 depths = self.get_patch_attribute('depth') 

3871 

3872 centroid_n = num.sum(weights * norths) 

3873 centroid_e = num.sum(weights * easts) 

3874 centroid_d = num.sum(weights * depths) 

3875 

3876 centroid_lat, centroid_lon = ne_to_latlon( 

3877 self.lat, self.lon, centroid_n, centroid_e) 

3878 

3879 moment_rate_, times = self.get_moment_rate(store) 

3880 delta_times = num.concatenate(( 

3881 [times[1] - times[0]], 

3882 num.diff(times))) 

3883 moment_src = delta_times * moment_rate 

3884 

3885 centroid_t = num.sum( 

3886 moment_src / num.sum(moment_src) * times) + self.time 

3887 

3888 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3889 

3890 return model.Event( 

3891 lat=centroid_lat, 

3892 lon=centroid_lon, 

3893 depth=centroid_d, 

3894 time=centroid_t, 

3895 moment_tensor=mt, 

3896 magnitude=mt.magnitude, 

3897 duration=t_max) 

3898 

3899 

3900class DoubleDCSource(SourceWithMagnitude): 

3901 ''' 

3902 Two double-couple point sources separated in space and time. 

3903 Moment share between the sub-sources is controlled by the 

3904 parameter mix. 

3905 The position of the subsources is dependent on the moment 

3906 distribution between the two sources. Depth, east and north 

3907 shift are given for the centroid between the two double-couples. 

3908 The subsources will positioned according to their moment shares 

3909 around this centroid position. 

3910 This is done according to their delta parameters, which are 

3911 therefore in relation to that centroid. 

3912 Note that depth of the subsources therefore can be 

3913 depth+/-delta_depth. For shallow earthquakes therefore 

3914 the depth has to be chosen deeper to avoid sampling 

3915 above surface. 

3916 ''' 

3917 

3918 strike1 = Float.T( 

3919 default=0.0, 

3920 help='strike direction in [deg], measured clockwise from north') 

3921 

3922 dip1 = Float.T( 

3923 default=90.0, 

3924 help='dip angle in [deg], measured downward from horizontal') 

3925 

3926 azimuth = Float.T( 

3927 default=0.0, 

3928 help='azimuth to second double-couple [deg], ' 

3929 'measured at first, clockwise from north') 

3930 

3931 rake1 = Float.T( 

3932 default=0.0, 

3933 help='rake angle in [deg], ' 

3934 'measured counter-clockwise from right-horizontal ' 

3935 'in on-plane view') 

3936 

3937 strike2 = Float.T( 

3938 default=0.0, 

3939 help='strike direction in [deg], measured clockwise from north') 

3940 

3941 dip2 = Float.T( 

3942 default=90.0, 

3943 help='dip angle in [deg], measured downward from horizontal') 

3944 

3945 rake2 = Float.T( 

3946 default=0.0, 

3947 help='rake angle in [deg], ' 

3948 'measured counter-clockwise from right-horizontal ' 

3949 'in on-plane view') 

3950 

3951 delta_time = Float.T( 

3952 default=0.0, 

3953 help='separation of double-couples in time (t2-t1) [s]') 

3954 

3955 delta_depth = Float.T( 

3956 default=0.0, 

3957 help='difference in depth (z2-z1) [m]') 

3958 

3959 distance = Float.T( 

3960 default=0.0, 

3961 help='distance between the two double-couples [m]') 

3962 

3963 mix = Float.T( 

3964 default=0.5, 

3965 help='how to distribute the moment to the two doublecouples ' 

3966 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

3967 

3968 stf1 = STF.T( 

3969 optional=True, 

3970 help='Source time function of subsource 1 ' 

3971 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3972 

3973 stf2 = STF.T( 

3974 optional=True, 

3975 help='Source time function of subsource 2 ' 

3976 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3977 

3978 discretized_source_class = meta.DiscretizedMTSource 

3979 

3980 def base_key(self): 

3981 return ( 

3982 self.time, self.depth, self.lat, self.north_shift, 

3983 self.lon, self.east_shift, type(self).__name__) + \ 

3984 self.effective_stf1_pre().base_key() + \ 

3985 self.effective_stf2_pre().base_key() + ( 

3986 self.strike1, self.dip1, self.rake1, 

3987 self.strike2, self.dip2, self.rake2, 

3988 self.delta_time, self.delta_depth, 

3989 self.azimuth, self.distance, self.mix) 

3990 

3991 def get_factor(self): 

3992 return self.moment 

3993 

3994 def effective_stf1_pre(self): 

3995 return self.stf1 or self.stf or g_unit_pulse 

3996 

3997 def effective_stf2_pre(self): 

3998 return self.stf2 or self.stf or g_unit_pulse 

3999 

4000 def effective_stf_post(self): 

4001 return g_unit_pulse 

4002 

4003 def split(self): 

4004 a1 = 1.0 - self.mix 

4005 a2 = self.mix 

4006 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4007 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4008 

4009 dc1 = DCSource( 

4010 lat=self.lat, 

4011 lon=self.lon, 

4012 time=self.time - self.delta_time * a2, 

4013 north_shift=self.north_shift - delta_north * a2, 

4014 east_shift=self.east_shift - delta_east * a2, 

4015 depth=self.depth - self.delta_depth * a2, 

4016 moment=self.moment * a1, 

4017 strike=self.strike1, 

4018 dip=self.dip1, 

4019 rake=self.rake1, 

4020 stf=self.stf1 or self.stf) 

4021 

4022 dc2 = DCSource( 

4023 lat=self.lat, 

4024 lon=self.lon, 

4025 time=self.time + self.delta_time * a1, 

4026 north_shift=self.north_shift + delta_north * a1, 

4027 east_shift=self.east_shift + delta_east * a1, 

4028 depth=self.depth + self.delta_depth * a1, 

4029 moment=self.moment * a2, 

4030 strike=self.strike2, 

4031 dip=self.dip2, 

4032 rake=self.rake2, 

4033 stf=self.stf2 or self.stf) 

4034 

4035 return [dc1, dc2] 

4036 

4037 def discretize_basesource(self, store, target=None): 

4038 a1 = 1.0 - self.mix 

4039 a2 = self.mix 

4040 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4041 rake=self.rake1, scalar_moment=a1) 

4042 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4043 rake=self.rake2, scalar_moment=a2) 

4044 

4045 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4046 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4047 

4048 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4049 store.config.deltat, self.time - self.delta_time * a2) 

4050 

4051 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4052 store.config.deltat, self.time + self.delta_time * a1) 

4053 

4054 nt1 = times1.size 

4055 nt2 = times2.size 

4056 

4057 ds = meta.DiscretizedMTSource( 

4058 lat=self.lat, 

4059 lon=self.lon, 

4060 times=num.concatenate((times1, times2)), 

4061 north_shifts=num.concatenate(( 

4062 num.repeat(self.north_shift - delta_north * a2, nt1), 

4063 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4064 east_shifts=num.concatenate(( 

4065 num.repeat(self.east_shift - delta_east * a2, nt1), 

4066 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4067 depths=num.concatenate(( 

4068 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4069 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4070 m6s=num.vstack(( 

4071 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4072 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4073 

4074 return ds 

4075 

4076 def pyrocko_moment_tensor(self, store=None, target=None): 

4077 a1 = 1.0 - self.mix 

4078 a2 = self.mix 

4079 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4080 rake=self.rake1, 

4081 scalar_moment=a1 * self.moment) 

4082 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4083 rake=self.rake2, 

4084 scalar_moment=a2 * self.moment) 

4085 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4086 

4087 def pyrocko_event(self, store=None, target=None, **kwargs): 

4088 return SourceWithMagnitude.pyrocko_event( 

4089 self, store, target, 

4090 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4091 **kwargs) 

4092 

4093 @classmethod 

4094 def from_pyrocko_event(cls, ev, **kwargs): 

4095 d = {} 

4096 mt = ev.moment_tensor 

4097 if mt: 

4098 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4099 d.update( 

4100 strike1=float(strike), 

4101 dip1=float(dip), 

4102 rake1=float(rake), 

4103 strike2=float(strike), 

4104 dip2=float(dip), 

4105 rake2=float(rake), 

4106 mix=0.0, 

4107 magnitude=float(mt.moment_magnitude())) 

4108 

4109 d.update(kwargs) 

4110 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4111 source.stf1 = source.stf 

4112 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4113 source.stf = None 

4114 return source 

4115 

4116 

4117class RingfaultSource(SourceWithMagnitude): 

4118 ''' 

4119 A ring fault with vertical doublecouples. 

4120 ''' 

4121 

4122 diameter = Float.T( 

4123 default=1.0, 

4124 help='diameter of the ring in [m]') 

4125 

4126 sign = Float.T( 

4127 default=1.0, 

4128 help='inside of the ring moves up (+1) or down (-1)') 

4129 

4130 strike = Float.T( 

4131 default=0.0, 

4132 help='strike direction of the ring plane, clockwise from north,' 

4133 ' in [deg]') 

4134 

4135 dip = Float.T( 

4136 default=0.0, 

4137 help='dip angle of the ring plane from horizontal in [deg]') 

4138 

4139 npointsources = Int.T( 

4140 default=360, 

4141 help='number of point sources to use') 

4142 

4143 discretized_source_class = meta.DiscretizedMTSource 

4144 

4145 def base_key(self): 

4146 return Source.base_key(self) + ( 

4147 self.strike, self.dip, self.diameter, self.npointsources) 

4148 

4149 def get_factor(self): 

4150 return self.sign * self.moment 

4151 

4152 def discretize_basesource(self, store=None, target=None): 

4153 n = self.npointsources 

4154 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4155 

4156 points = num.zeros((n, 3)) 

4157 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4158 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4159 

4160 rotmat = num.array(pmt.euler_to_matrix( 

4161 self.dip * d2r, self.strike * d2r, 0.0)) 

4162 points = num.dot(rotmat.T, points.T).T # !!! ? 

4163 

4164 points[:, 0] += self.north_shift 

4165 points[:, 1] += self.east_shift 

4166 points[:, 2] += self.depth 

4167 

4168 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4169 scalar_moment=1.0 / n).m()) 

4170 

4171 rotmats = num.transpose( 

4172 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4173 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4174 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4175 

4176 ms = num.zeros((n, 3, 3)) 

4177 for i in range(n): 

4178 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4179 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4180 

4181 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4182 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4183 

4184 times, amplitudes = self.effective_stf_pre().discretize_t( 

4185 store.config.deltat, self.time) 

4186 

4187 nt = times.size 

4188 

4189 return meta.DiscretizedMTSource( 

4190 times=num.tile(times, n), 

4191 lat=self.lat, 

4192 lon=self.lon, 

4193 north_shifts=num.repeat(points[:, 0], nt), 

4194 east_shifts=num.repeat(points[:, 1], nt), 

4195 depths=num.repeat(points[:, 2], nt), 

4196 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4197 amplitudes, n)[:, num.newaxis]) 

4198 

4199 

4200class CombiSource(Source): 

4201 ''' 

4202 Composite source model. 

4203 ''' 

4204 

4205 discretized_source_class = meta.DiscretizedMTSource 

4206 

4207 subsources = List.T(Source.T()) 

4208 

4209 def __init__(self, subsources=[], **kwargs): 

4210 if not subsources: 

4211 raise BadRequest( 

4212 'Need at least one sub-source to create a CombiSource object.') 

4213 

4214 lats = num.array( 

4215 [subsource.lat for subsource in subsources], dtype=float) 

4216 lons = num.array( 

4217 [subsource.lon for subsource in subsources], dtype=float) 

4218 

4219 lat, lon = lats[0], lons[0] 

4220 if not num.all(lats == lat) and num.all(lons == lon): 

4221 subsources = [s.clone() for s in subsources] 

4222 for subsource in subsources[1:]: 

4223 subsource.set_origin(lat, lon) 

4224 

4225 depth = float(num.mean([p.depth for p in subsources])) 

4226 time = float(num.mean([p.time for p in subsources])) 

4227 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4228 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4229 kwargs.update( 

4230 time=time, 

4231 lat=float(lat), 

4232 lon=float(lon), 

4233 north_shift=north_shift, 

4234 east_shift=east_shift, 

4235 depth=depth) 

4236 

4237 Source.__init__(self, subsources=subsources, **kwargs) 

4238 

4239 def get_factor(self): 

4240 return 1.0 

4241 

4242 def discretize_basesource(self, store, target=None): 

4243 dsources = [] 

4244 for sf in self.subsources: 

4245 ds = sf.discretize_basesource(store, target) 

4246 ds.m6s *= sf.get_factor() 

4247 dsources.append(ds) 

4248 

4249 return meta.DiscretizedMTSource.combine(dsources) 

4250 

4251 

4252class SFSource(Source): 

4253 ''' 

4254 A single force point source. 

4255 

4256 Supported GF schemes: `'elastic5'`. 

4257 ''' 

4258 

4259 discretized_source_class = meta.DiscretizedSFSource 

4260 

4261 fn = Float.T( 

4262 default=0., 

4263 help='northward component of single force [N]') 

4264 

4265 fe = Float.T( 

4266 default=0., 

4267 help='eastward component of single force [N]') 

4268 

4269 fd = Float.T( 

4270 default=0., 

4271 help='downward component of single force [N]') 

4272 

4273 def __init__(self, **kwargs): 

4274 Source.__init__(self, **kwargs) 

4275 

4276 def base_key(self): 

4277 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4278 

4279 def get_factor(self): 

4280 return 1.0 

4281 

4282 def discretize_basesource(self, store, target=None): 

4283 times, amplitudes = self.effective_stf_pre().discretize_t( 

4284 store.config.deltat, self.time) 

4285 forces = amplitudes[:, num.newaxis] * num.array( 

4286 [[self.fn, self.fe, self.fd]], dtype=float) 

4287 

4288 return meta.DiscretizedSFSource(forces=forces, 

4289 **self._dparams_base_repeated(times)) 

4290 

4291 def pyrocko_event(self, store=None, target=None, **kwargs): 

4292 return Source.pyrocko_event( 

4293 self, store, target, 

4294 **kwargs) 

4295 

4296 @classmethod 

4297 def from_pyrocko_event(cls, ev, **kwargs): 

4298 d = {} 

4299 d.update(kwargs) 

4300 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4301 

4302 

4303class PorePressurePointSource(Source): 

4304 ''' 

4305 Excess pore pressure point source. 

4306 

4307 For poro-elastic initial value problem where an excess pore pressure is 

4308 brought into a small source volume. 

4309 ''' 

4310 

4311 discretized_source_class = meta.DiscretizedPorePressureSource 

4312 

4313 pp = Float.T( 

4314 default=1.0, 

4315 help='initial excess pore pressure in [Pa]') 

4316 

4317 def base_key(self): 

4318 return Source.base_key(self) 

4319 

4320 def get_factor(self): 

4321 return self.pp 

4322 

4323 def discretize_basesource(self, store, target=None): 

4324 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4325 **self._dparams_base()) 

4326 

4327 

4328class PorePressureLineSource(Source): 

4329 ''' 

4330 Excess pore pressure line source. 

4331 

4332 The line source is centered at (north_shift, east_shift, depth). 

4333 ''' 

4334 

4335 discretized_source_class = meta.DiscretizedPorePressureSource 

4336 

4337 pp = Float.T( 

4338 default=1.0, 

4339 help='initial excess pore pressure in [Pa]') 

4340 

4341 length = Float.T( 

4342 default=0.0, 

4343 help='length of the line source [m]') 

4344 

4345 azimuth = Float.T( 

4346 default=0.0, 

4347 help='azimuth direction, clockwise from north [deg]') 

4348 

4349 dip = Float.T( 

4350 default=90., 

4351 help='dip direction, downward from horizontal [deg]') 

4352 

4353 def base_key(self): 

4354 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4355 

4356 def get_factor(self): 

4357 return self.pp 

4358 

4359 def discretize_basesource(self, store, target=None): 

4360 

4361 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4362 

4363 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4364 

4365 sa = math.sin(self.azimuth * d2r) 

4366 ca = math.cos(self.azimuth * d2r) 

4367 sd = math.sin(self.dip * d2r) 

4368 cd = math.cos(self.dip * d2r) 

4369 

4370 points = num.zeros((n, 3)) 

4371 points[:, 0] = self.north_shift + a * ca * cd 

4372 points[:, 1] = self.east_shift + a * sa * cd 

4373 points[:, 2] = self.depth + a * sd 

4374 

4375 return meta.DiscretizedPorePressureSource( 

4376 times=util.num_full(n, self.time), 

4377 lat=self.lat, 

4378 lon=self.lon, 

4379 north_shifts=points[:, 0], 

4380 east_shifts=points[:, 1], 

4381 depths=points[:, 2], 

4382 pp=num.ones(n) / n) 

4383 

4384 

4385class Request(Object): 

4386 ''' 

4387 Synthetic seismogram computation request. 

4388 

4389 :: 

4390 

4391 Request(**kwargs) 

4392 Request(sources, targets, **kwargs) 

4393 ''' 

4394 

4395 sources = List.T( 

4396 Source.T(), 

4397 help='list of sources for which to produce synthetics.') 

4398 

4399 targets = List.T( 

4400 Target.T(), 

4401 help='list of targets for which to produce synthetics.') 

4402 

4403 @classmethod 

4404 def args2kwargs(cls, args): 

4405 if len(args) not in (0, 2, 3): 

4406 raise BadRequest('Invalid arguments.') 

4407 

4408 if len(args) == 2: 

4409 return dict(sources=args[0], targets=args[1]) 

4410 else: 

4411 return {} 

4412 

4413 def __init__(self, *args, **kwargs): 

4414 kwargs.update(self.args2kwargs(args)) 

4415 sources = kwargs.pop('sources', []) 

4416 targets = kwargs.pop('targets', []) 

4417 

4418 if isinstance(sources, Source): 

4419 sources = [sources] 

4420 

4421 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4422 targets = [targets] 

4423 

4424 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4425 

4426 @property 

4427 def targets_dynamic(self): 

4428 return [t for t in self.targets if isinstance(t, Target)] 

4429 

4430 @property 

4431 def targets_static(self): 

4432 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4433 

4434 @property 

4435 def has_dynamic(self): 

4436 return True if len(self.targets_dynamic) > 0 else False 

4437 

4438 @property 

4439 def has_statics(self): 

4440 return True if len(self.targets_static) > 0 else False 

4441 

4442 def subsources_map(self): 

4443 m = defaultdict(list) 

4444 for source in self.sources: 

4445 m[source.base_key()].append(source) 

4446 

4447 return m 

4448 

4449 def subtargets_map(self): 

4450 m = defaultdict(list) 

4451 for target in self.targets: 

4452 m[target.base_key()].append(target) 

4453 

4454 return m 

4455 

4456 def subrequest_map(self): 

4457 ms = self.subsources_map() 

4458 mt = self.subtargets_map() 

4459 m = {} 

4460 for (ks, ls) in ms.items(): 

4461 for (kt, lt) in mt.items(): 

4462 m[ks, kt] = (ls, lt) 

4463 

4464 return m 

4465 

4466 

4467class ProcessingStats(Object): 

4468 t_perc_get_store_and_receiver = Float.T(default=0.) 

4469 t_perc_discretize_source = Float.T(default=0.) 

4470 t_perc_make_base_seismogram = Float.T(default=0.) 

4471 t_perc_make_same_span = Float.T(default=0.) 

4472 t_perc_post_process = Float.T(default=0.) 

4473 t_perc_optimize = Float.T(default=0.) 

4474 t_perc_stack = Float.T(default=0.) 

4475 t_perc_static_get_store = Float.T(default=0.) 

4476 t_perc_static_discretize_basesource = Float.T(default=0.) 

4477 t_perc_static_sum_statics = Float.T(default=0.) 

4478 t_perc_static_post_process = Float.T(default=0.) 

4479 t_wallclock = Float.T(default=0.) 

4480 t_cpu = Float.T(default=0.) 

4481 n_read_blocks = Int.T(default=0) 

4482 n_results = Int.T(default=0) 

4483 n_subrequests = Int.T(default=0) 

4484 n_stores = Int.T(default=0) 

4485 n_records_stacked = Int.T(default=0) 

4486 

4487 

4488class Response(Object): 

4489 ''' 

4490 Resonse object to a synthetic seismogram computation request. 

4491 ''' 

4492 

4493 request = Request.T() 

4494 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4495 stats = ProcessingStats.T() 

4496 

4497 def pyrocko_traces(self): 

4498 ''' 

4499 Return a list of requested 

4500 :class:`~pyrocko.trace.Trace` instances. 

4501 ''' 

4502 

4503 traces = [] 

4504 for results in self.results_list: 

4505 for result in results: 

4506 if not isinstance(result, meta.Result): 

4507 continue 

4508 traces.append(result.trace.pyrocko_trace()) 

4509 

4510 return traces 

4511 

4512 def kite_scenes(self): 

4513 ''' 

4514 Return a list of requested 

4515 :class:`~kite.scenes` instances. 

4516 ''' 

4517 kite_scenes = [] 

4518 for results in self.results_list: 

4519 for result in results: 

4520 if isinstance(result, meta.KiteSceneResult): 

4521 sc = result.get_scene() 

4522 kite_scenes.append(sc) 

4523 

4524 return kite_scenes 

4525 

4526 def static_results(self): 

4527 ''' 

4528 Return a list of requested 

4529 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4530 ''' 

4531 statics = [] 

4532 for results in self.results_list: 

4533 for result in results: 

4534 if not isinstance(result, meta.StaticResult): 

4535 continue 

4536 statics.append(result) 

4537 

4538 return statics 

4539 

4540 def iter_results(self, get='pyrocko_traces'): 

4541 ''' 

4542 Generator function to iterate over results of request. 

4543 

4544 Yields associated :py:class:`Source`, 

4545 :class:`~pyrocko.gf.targets.Target`, 

4546 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4547 ''' 

4548 

4549 for isource, source in enumerate(self.request.sources): 

4550 for itarget, target in enumerate(self.request.targets): 

4551 result = self.results_list[isource][itarget] 

4552 if get == 'pyrocko_traces': 

4553 yield source, target, result.trace.pyrocko_trace() 

4554 elif get == 'results': 

4555 yield source, target, result 

4556 

4557 def snuffle(self, **kwargs): 

4558 ''' 

4559 Open *snuffler* with requested traces. 

4560 ''' 

4561 

4562 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4563 

4564 

4565class Engine(Object): 

4566 ''' 

4567 Base class for synthetic seismogram calculators. 

4568 ''' 

4569 

4570 def get_store_ids(self): 

4571 ''' 

4572 Get list of available GF store IDs 

4573 ''' 

4574 

4575 return [] 

4576 

4577 

4578class Rule(object): 

4579 pass 

4580 

4581 

4582class VectorRule(Rule): 

4583 

4584 def __init__(self, quantity, differentiate=0, integrate=0): 

4585 self.components = [quantity + '.' + c for c in 'ned'] 

4586 self.differentiate = differentiate 

4587 self.integrate = integrate 

4588 

4589 def required_components(self, target): 

4590 n, e, d = self.components 

4591 sa, ca, sd, cd = target.get_sin_cos_factors() 

4592 

4593 comps = [] 

4594 if nonzero(ca * cd): 

4595 comps.append(n) 

4596 

4597 if nonzero(sa * cd): 

4598 comps.append(e) 

4599 

4600 if nonzero(sd): 

4601 comps.append(d) 

4602 

4603 return tuple(comps) 

4604 

4605 def apply_(self, target, base_seismogram): 

4606 n, e, d = self.components 

4607 sa, ca, sd, cd = target.get_sin_cos_factors() 

4608 

4609 if nonzero(ca * cd): 

4610 data = base_seismogram[n].data * (ca * cd) 

4611 deltat = base_seismogram[n].deltat 

4612 else: 

4613 data = 0.0 

4614 

4615 if nonzero(sa * cd): 

4616 data = data + base_seismogram[e].data * (sa * cd) 

4617 deltat = base_seismogram[e].deltat 

4618 

4619 if nonzero(sd): 

4620 data = data + base_seismogram[d].data * sd 

4621 deltat = base_seismogram[d].deltat 

4622 

4623 if self.differentiate: 

4624 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4625 

4626 if self.integrate: 

4627 raise NotImplementedError('Integration is not implemented yet.') 

4628 

4629 return data 

4630 

4631 

4632class HorizontalVectorRule(Rule): 

4633 

4634 def __init__(self, quantity, differentiate=0, integrate=0): 

4635 self.components = [quantity + '.' + c for c in 'ne'] 

4636 self.differentiate = differentiate 

4637 self.integrate = integrate 

4638 

4639 def required_components(self, target): 

4640 n, e = self.components 

4641 sa, ca, _, _ = target.get_sin_cos_factors() 

4642 

4643 comps = [] 

4644 if nonzero(ca): 

4645 comps.append(n) 

4646 

4647 if nonzero(sa): 

4648 comps.append(e) 

4649 

4650 return tuple(comps) 

4651 

4652 def apply_(self, target, base_seismogram): 

4653 n, e = self.components 

4654 sa, ca, _, _ = target.get_sin_cos_factors() 

4655 

4656 if nonzero(ca): 

4657 data = base_seismogram[n].data * ca 

4658 else: 

4659 data = 0.0 

4660 

4661 if nonzero(sa): 

4662 data = data + base_seismogram[e].data * sa 

4663 

4664 if self.differentiate: 

4665 deltat = base_seismogram[e].deltat 

4666 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4667 

4668 if self.integrate: 

4669 raise NotImplementedError('Integration is not implemented yet.') 

4670 

4671 return data 

4672 

4673 

4674class ScalarRule(Rule): 

4675 

4676 def __init__(self, quantity, differentiate=0): 

4677 self.c = quantity 

4678 

4679 def required_components(self, target): 

4680 return (self.c, ) 

4681 

4682 def apply_(self, target, base_seismogram): 

4683 data = base_seismogram[self.c].data.copy() 

4684 deltat = base_seismogram[self.c].deltat 

4685 if self.differentiate: 

4686 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4687 

4688 return data 

4689 

4690 

4691class StaticDisplacement(Rule): 

4692 

4693 def required_components(self, target): 

4694 return tuple(['displacement.%s' % c for c in list('ned')]) 

4695 

4696 def apply_(self, target, base_statics): 

4697 if isinstance(target, SatelliteTarget): 

4698 los_fac = target.get_los_factors() 

4699 base_statics['displacement.los'] =\ 

4700 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4701 los_fac[:, 1] * base_statics['displacement.e'] + 

4702 los_fac[:, 2] * base_statics['displacement.n']) 

4703 return base_statics 

4704 

4705 

4706channel_rules = { 

4707 'displacement': [VectorRule('displacement')], 

4708 'rotation': [VectorRule('rotation')], 

4709 'velocity': [ 

4710 VectorRule('velocity'), 

4711 VectorRule('displacement', differentiate=1)], 

4712 'acceleration': [ 

4713 VectorRule('acceleration'), 

4714 VectorRule('velocity', differentiate=1), 

4715 VectorRule('displacement', differentiate=2)], 

4716 'pore_pressure': [ScalarRule('pore_pressure')], 

4717 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4718 'darcy_velocity': [VectorRule('darcy_velocity')], 

4719} 

4720 

4721static_rules = { 

4722 'displacement': [StaticDisplacement()] 

4723} 

4724 

4725 

4726class OutOfBoundsContext(Object): 

4727 source = Source.T() 

4728 target = Target.T() 

4729 distance = Float.T() 

4730 components = List.T(String.T()) 

4731 

4732 

4733def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4734 dsource_cache = {} 

4735 tcounters = list(range(6)) 

4736 

4737 store_ids = set() 

4738 sources = set() 

4739 targets = set() 

4740 

4741 for itarget, target in enumerate(ptargets): 

4742 target._id = itarget 

4743 

4744 for w in work: 

4745 _, _, isources, itargets = w 

4746 

4747 sources.update([psources[isource] for isource in isources]) 

4748 targets.update([ptargets[itarget] for itarget in itargets]) 

4749 

4750 store_ids = set([t.store_id for t in targets]) 

4751 

4752 for isource, source in enumerate(psources): 

4753 

4754 components = set() 

4755 for itarget, target in enumerate(targets): 

4756 rule = engine.get_rule(source, target) 

4757 components.update(rule.required_components(target)) 

4758 

4759 for store_id in store_ids: 

4760 store_targets = [t for t in targets if t.store_id == store_id] 

4761 

4762 sample_rates = set([t.sample_rate for t in store_targets]) 

4763 interpolations = set([t.interpolation for t in store_targets]) 

4764 

4765 base_seismograms = [] 

4766 store_targets_out = [] 

4767 

4768 for samp_rate in sample_rates: 

4769 for interp in interpolations: 

4770 engine_targets = [ 

4771 t for t in store_targets if t.sample_rate == samp_rate 

4772 and t.interpolation == interp] 

4773 

4774 if not engine_targets: 

4775 continue 

4776 

4777 store_targets_out += engine_targets 

4778 

4779 base_seismograms += engine.base_seismograms( 

4780 source, 

4781 engine_targets, 

4782 components, 

4783 dsource_cache, 

4784 nthreads) 

4785 

4786 for iseis, seismogram in enumerate(base_seismograms): 

4787 for tr in seismogram.values(): 

4788 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4789 e = SeismosizerError( 

4790 'Seismosizer failed with return code %i\n%s' % ( 

4791 tr.err, str( 

4792 OutOfBoundsContext( 

4793 source=source, 

4794 target=store_targets[iseis], 

4795 distance=source.distance_to( 

4796 store_targets[iseis]), 

4797 components=components)))) 

4798 raise e 

4799 

4800 for seismogram, target in zip(base_seismograms, store_targets_out): 

4801 

4802 try: 

4803 result = engine._post_process_dynamic( 

4804 seismogram, source, target) 

4805 except SeismosizerError as e: 

4806 result = e 

4807 

4808 yield (isource, target._id, result), tcounters 

4809 

4810 

4811def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4812 dsource_cache = {} 

4813 

4814 for w in work: 

4815 _, _, isources, itargets = w 

4816 

4817 sources = [psources[isource] for isource in isources] 

4818 targets = [ptargets[itarget] for itarget in itargets] 

4819 

4820 components = set() 

4821 for target in targets: 

4822 rule = engine.get_rule(sources[0], target) 

4823 components.update(rule.required_components(target)) 

4824 

4825 for isource, source in zip(isources, sources): 

4826 for itarget, target in zip(itargets, targets): 

4827 

4828 try: 

4829 base_seismogram, tcounters = engine.base_seismogram( 

4830 source, target, components, dsource_cache, nthreads) 

4831 except meta.OutOfBounds as e: 

4832 e.context = OutOfBoundsContext( 

4833 source=sources[0], 

4834 target=targets[0], 

4835 distance=sources[0].distance_to(targets[0]), 

4836 components=components) 

4837 raise 

4838 

4839 n_records_stacked = 0 

4840 t_optimize = 0.0 

4841 t_stack = 0.0 

4842 

4843 for _, tr in base_seismogram.items(): 

4844 n_records_stacked += tr.n_records_stacked 

4845 t_optimize += tr.t_optimize 

4846 t_stack += tr.t_stack 

4847 

4848 try: 

4849 result = engine._post_process_dynamic( 

4850 base_seismogram, source, target) 

4851 result.n_records_stacked = n_records_stacked 

4852 result.n_shared_stacking = len(sources) *\ 

4853 len(targets) 

4854 result.t_optimize = t_optimize 

4855 result.t_stack = t_stack 

4856 except SeismosizerError as e: 

4857 result = e 

4858 

4859 tcounters.append(xtime()) 

4860 yield (isource, itarget, result), tcounters 

4861 

4862 

4863def process_static(work, psources, ptargets, engine, nthreads=0): 

4864 for w in work: 

4865 _, _, isources, itargets = w 

4866 

4867 sources = [psources[isource] for isource in isources] 

4868 targets = [ptargets[itarget] for itarget in itargets] 

4869 

4870 for isource, source in zip(isources, sources): 

4871 for itarget, target in zip(itargets, targets): 

4872 components = engine.get_rule(source, target)\ 

4873 .required_components(target) 

4874 

4875 try: 

4876 base_statics, tcounters = engine.base_statics( 

4877 source, target, components, nthreads) 

4878 except meta.OutOfBounds as e: 

4879 e.context = OutOfBoundsContext( 

4880 source=sources[0], 

4881 target=targets[0], 

4882 distance=float('nan'), 

4883 components=components) 

4884 raise 

4885 result = engine._post_process_statics( 

4886 base_statics, source, target) 

4887 tcounters.append(xtime()) 

4888 

4889 yield (isource, itarget, result), tcounters 

4890 

4891 

4892class LocalEngine(Engine): 

4893 ''' 

4894 Offline synthetic seismogram calculator. 

4895 

4896 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4897 :py:attr:`store_dirs` with paths set in environment variables 

4898 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4899 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4900 :py:attr:`store_dirs` with paths set in the user's config file. 

4901 

4902 The config file can be found at :file:`~/.pyrocko/config.pf` 

4903 

4904 .. code-block :: python 

4905 

4906 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4907 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4908 ''' 

4909 

4910 store_superdirs = List.T( 

4911 String.T(), 

4912 help='directories which are searched for Green\'s function stores') 

4913 

4914 store_dirs = List.T( 

4915 String.T(), 

4916 help='additional individual Green\'s function store directories') 

4917 

4918 default_store_id = String.T( 

4919 optional=True, 

4920 help='default store ID to be used when a request does not provide ' 

4921 'one') 

4922 

4923 def __init__(self, **kwargs): 

4924 use_env = kwargs.pop('use_env', False) 

4925 use_config = kwargs.pop('use_config', False) 

4926 Engine.__init__(self, **kwargs) 

4927 if use_env: 

4928 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4929 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4930 if env_store_superdirs: 

4931 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4932 

4933 if env_store_dirs: 

4934 self.store_dirs.extend(env_store_dirs.split(':')) 

4935 

4936 if use_config: 

4937 c = config.config() 

4938 self.store_superdirs.extend(c.gf_store_superdirs) 

4939 self.store_dirs.extend(c.gf_store_dirs) 

4940 

4941 self._check_store_dirs_type() 

4942 self._id_to_store_dir = {} 

4943 self._open_stores = {} 

4944 self._effective_default_store_id = None 

4945 

4946 def _check_store_dirs_type(self): 

4947 for sdir in ['store_dirs', 'store_superdirs']: 

4948 if not isinstance(self.__getattribute__(sdir), list): 

4949 raise TypeError("{} of {} is not of type list".format( 

4950 sdir, self.__class__.__name__)) 

4951 

4952 def _get_store_id(self, store_dir): 

4953 store_ = store.Store(store_dir) 

4954 store_id = store_.config.id 

4955 store_.close() 

4956 return store_id 

4957 

4958 def _looks_like_store_dir(self, store_dir): 

4959 return os.path.isdir(store_dir) and \ 

4960 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4961 ('index', 'traces', 'config')) 

4962 

4963 def iter_store_dirs(self): 

4964 store_dirs = set() 

4965 for d in self.store_superdirs: 

4966 if not os.path.exists(d): 

4967 logger.warning('store_superdir not available: %s' % d) 

4968 continue 

4969 

4970 for entry in os.listdir(d): 

4971 store_dir = os.path.realpath(pjoin(d, entry)) 

4972 if self._looks_like_store_dir(store_dir): 

4973 store_dirs.add(store_dir) 

4974 

4975 for store_dir in self.store_dirs: 

4976 store_dirs.add(os.path.realpath(store_dir)) 

4977 

4978 return store_dirs 

4979 

4980 def _scan_stores(self): 

4981 for store_dir in self.iter_store_dirs(): 

4982 store_id = self._get_store_id(store_dir) 

4983 if store_id not in self._id_to_store_dir: 

4984 self._id_to_store_dir[store_id] = store_dir 

4985 else: 

4986 if store_dir != self._id_to_store_dir[store_id]: 

4987 raise DuplicateStoreId( 

4988 'GF store ID %s is used in (at least) two ' 

4989 'different stores. Locations are: %s and %s' % 

4990 (store_id, self._id_to_store_dir[store_id], store_dir)) 

4991 

4992 def get_store_dir(self, store_id): 

4993 ''' 

4994 Lookup directory given a GF store ID. 

4995 ''' 

4996 

4997 if store_id not in self._id_to_store_dir: 

4998 self._scan_stores() 

4999 

5000 if store_id not in self._id_to_store_dir: 

5001 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5002 

5003 return self._id_to_store_dir[store_id] 

5004 

5005 def get_store_ids(self): 

5006 ''' 

5007 Get list of available store IDs. 

5008 ''' 

5009 

5010 self._scan_stores() 

5011 return sorted(self._id_to_store_dir.keys()) 

5012 

5013 def effective_default_store_id(self): 

5014 if self._effective_default_store_id is None: 

5015 if self.default_store_id is None: 

5016 store_ids = self.get_store_ids() 

5017 if len(store_ids) == 1: 

5018 self._effective_default_store_id = self.get_store_ids()[0] 

5019 else: 

5020 raise NoDefaultStoreSet() 

5021 else: 

5022 self._effective_default_store_id = self.default_store_id 

5023 

5024 return self._effective_default_store_id 

5025 

5026 def get_store(self, store_id=None): 

5027 ''' 

5028 Get a store from the engine. 

5029 

5030 :param store_id: identifier of the store (optional) 

5031 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5032 

5033 If no ``store_id`` is provided the store 

5034 associated with the :py:gattr:`default_store_id` is returned. 

5035 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5036 undefined. 

5037 ''' 

5038 

5039 if store_id is None: 

5040 store_id = self.effective_default_store_id() 

5041 

5042 if store_id not in self._open_stores: 

5043 store_dir = self.get_store_dir(store_id) 

5044 self._open_stores[store_id] = store.Store(store_dir) 

5045 

5046 return self._open_stores[store_id] 

5047 

5048 def get_store_config(self, store_id): 

5049 store = self.get_store(store_id) 

5050 return store.config 

5051 

5052 def get_store_extra(self, store_id, key): 

5053 store = self.get_store(store_id) 

5054 return store.get_extra(key) 

5055 

5056 def close_cashed_stores(self): 

5057 ''' 

5058 Close and remove ids from cashed stores. 

5059 ''' 

5060 store_ids = [] 

5061 for store_id, store_ in self._open_stores.items(): 

5062 store_.close() 

5063 store_ids.append(store_id) 

5064 

5065 for store_id in store_ids: 

5066 self._open_stores.pop(store_id) 

5067 

5068 def get_rule(self, source, target): 

5069 cprovided = self.get_store(target.store_id).get_provided_components() 

5070 

5071 if isinstance(target, StaticTarget): 

5072 quantity = target.quantity 

5073 available_rules = static_rules 

5074 elif isinstance(target, Target): 

5075 quantity = target.effective_quantity() 

5076 available_rules = channel_rules 

5077 

5078 try: 

5079 for rule in available_rules[quantity]: 

5080 cneeded = rule.required_components(target) 

5081 if all(c in cprovided for c in cneeded): 

5082 return rule 

5083 

5084 except KeyError: 

5085 pass 

5086 

5087 raise BadRequest( 

5088 'No rule to calculate "%s" with GFs from store "%s" ' 

5089 'for source model "%s".' % ( 

5090 target.effective_quantity(), 

5091 target.store_id, 

5092 source.__class__.__name__)) 

5093 

5094 def _cached_discretize_basesource(self, source, store, cache, target): 

5095 if (source, store) not in cache: 

5096 cache[source, store] = source.discretize_basesource(store, target) 

5097 

5098 return cache[source, store] 

5099 

5100 def base_seismograms(self, source, targets, components, dsource_cache, 

5101 nthreads=0): 

5102 

5103 target = targets[0] 

5104 

5105 interp = set([t.interpolation for t in targets]) 

5106 if len(interp) > 1: 

5107 raise BadRequest('Targets have different interpolation schemes.') 

5108 

5109 rates = set([t.sample_rate for t in targets]) 

5110 if len(rates) > 1: 

5111 raise BadRequest('Targets have different sample rates.') 

5112 

5113 store_ = self.get_store(target.store_id) 

5114 receivers = [t.receiver(store_) for t in targets] 

5115 

5116 if target.sample_rate is not None: 

5117 deltat = 1. / target.sample_rate 

5118 rate = target.sample_rate 

5119 else: 

5120 deltat = None 

5121 rate = store_.config.sample_rate 

5122 

5123 tmin = num.fromiter( 

5124 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5125 tmax = num.fromiter( 

5126 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5127 

5128 itmin = num.floor(tmin * rate).astype(num.int64) 

5129 itmax = num.ceil(tmax * rate).astype(num.int64) 

5130 nsamples = itmax - itmin + 1 

5131 

5132 mask = num.isnan(tmin) 

5133 itmin[mask] = 0 

5134 nsamples[mask] = -1 

5135 

5136 base_source = self._cached_discretize_basesource( 

5137 source, store_, dsource_cache, target) 

5138 

5139 base_seismograms = store_.calc_seismograms( 

5140 base_source, receivers, components, 

5141 deltat=deltat, 

5142 itmin=itmin, nsamples=nsamples, 

5143 interpolation=target.interpolation, 

5144 optimization=target.optimization, 

5145 nthreads=nthreads) 

5146 

5147 for i, base_seismogram in enumerate(base_seismograms): 

5148 base_seismograms[i] = store.make_same_span(base_seismogram) 

5149 

5150 return base_seismograms 

5151 

5152 def base_seismogram(self, source, target, components, dsource_cache, 

5153 nthreads): 

5154 

5155 tcounters = [xtime()] 

5156 

5157 store_ = self.get_store(target.store_id) 

5158 receiver = target.receiver(store_) 

5159 

5160 if target.tmin and target.tmax is not None: 

5161 rate = store_.config.sample_rate 

5162 itmin = int(num.floor(target.tmin * rate)) 

5163 itmax = int(num.ceil(target.tmax * rate)) 

5164 nsamples = itmax - itmin + 1 

5165 else: 

5166 itmin = None 

5167 nsamples = None 

5168 

5169 tcounters.append(xtime()) 

5170 base_source = self._cached_discretize_basesource( 

5171 source, store_, dsource_cache, target) 

5172 

5173 tcounters.append(xtime()) 

5174 

5175 if target.sample_rate is not None: 

5176 deltat = 1. / target.sample_rate 

5177 else: 

5178 deltat = None 

5179 

5180 base_seismogram = store_.seismogram( 

5181 base_source, receiver, components, 

5182 deltat=deltat, 

5183 itmin=itmin, nsamples=nsamples, 

5184 interpolation=target.interpolation, 

5185 optimization=target.optimization, 

5186 nthreads=nthreads) 

5187 

5188 tcounters.append(xtime()) 

5189 

5190 base_seismogram = store.make_same_span(base_seismogram) 

5191 

5192 tcounters.append(xtime()) 

5193 

5194 return base_seismogram, tcounters 

5195 

5196 def base_statics(self, source, target, components, nthreads): 

5197 tcounters = [xtime()] 

5198 store_ = self.get_store(target.store_id) 

5199 

5200 if target.tsnapshot is not None: 

5201 rate = store_.config.sample_rate 

5202 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5203 else: 

5204 itsnapshot = None 

5205 tcounters.append(xtime()) 

5206 

5207 base_source = source.discretize_basesource(store_, target=target) 

5208 

5209 tcounters.append(xtime()) 

5210 

5211 base_statics = store_.statics( 

5212 base_source, 

5213 target, 

5214 itsnapshot, 

5215 components, 

5216 target.interpolation, 

5217 nthreads) 

5218 

5219 tcounters.append(xtime()) 

5220 

5221 return base_statics, tcounters 

5222 

5223 def _post_process_dynamic(self, base_seismogram, source, target): 

5224 base_any = next(iter(base_seismogram.values())) 

5225 deltat = base_any.deltat 

5226 itmin = base_any.itmin 

5227 

5228 rule = self.get_rule(source, target) 

5229 data = rule.apply_(target, base_seismogram) 

5230 

5231 factor = source.get_factor() * target.get_factor() 

5232 if factor != 1.0: 

5233 data = data * factor 

5234 

5235 stf = source.effective_stf_post() 

5236 

5237 times, amplitudes = stf.discretize_t( 

5238 deltat, 0.0) 

5239 

5240 # repeat end point to prevent boundary effects 

5241 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5242 padded_data[:data.size] = data 

5243 padded_data[data.size:] = data[-1] 

5244 data = num.convolve(amplitudes, padded_data) 

5245 

5246 tmin = itmin * deltat + times[0] 

5247 

5248 tr = meta.SeismosizerTrace( 

5249 codes=target.codes, 

5250 data=data[:-amplitudes.size], 

5251 deltat=deltat, 

5252 tmin=tmin) 

5253 

5254 return target.post_process(self, source, tr) 

5255 

5256 def _post_process_statics(self, base_statics, source, starget): 

5257 rule = self.get_rule(source, starget) 

5258 data = rule.apply_(starget, base_statics) 

5259 

5260 factor = source.get_factor() 

5261 if factor != 1.0: 

5262 for v in data.values(): 

5263 v *= factor 

5264 

5265 return starget.post_process(self, source, base_statics) 

5266 

5267 def process(self, *args, **kwargs): 

5268 ''' 

5269 Process a request. 

5270 

5271 :: 

5272 

5273 process(**kwargs) 

5274 process(request, **kwargs) 

5275 process(sources, targets, **kwargs) 

5276 

5277 The request can be given a a :py:class:`Request` object, or such an 

5278 object is created using ``Request(**kwargs)`` for convenience. 

5279 

5280 :returns: :py:class:`Response` object 

5281 ''' 

5282 

5283 if len(args) not in (0, 1, 2): 

5284 raise BadRequest('Invalid arguments.') 

5285 

5286 if len(args) == 1: 

5287 kwargs['request'] = args[0] 

5288 

5289 elif len(args) == 2: 

5290 kwargs.update(Request.args2kwargs(args)) 

5291 

5292 request = kwargs.pop('request', None) 

5293 status_callback = kwargs.pop('status_callback', None) 

5294 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5295 

5296 nprocs = kwargs.pop('nprocs', None) 

5297 nthreads = kwargs.pop('nthreads', 1) 

5298 if nprocs is not None: 

5299 nthreads = nprocs 

5300 

5301 if request is None: 

5302 request = Request(**kwargs) 

5303 

5304 if resource: 

5305 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5306 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5307 tt0 = xtime() 

5308 

5309 # make sure stores are open before fork() 

5310 store_ids = set(target.store_id for target in request.targets) 

5311 for store_id in store_ids: 

5312 self.get_store(store_id) 

5313 

5314 source_index = dict((x, i) for (i, x) in 

5315 enumerate(request.sources)) 

5316 target_index = dict((x, i) for (i, x) in 

5317 enumerate(request.targets)) 

5318 

5319 m = request.subrequest_map() 

5320 

5321 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5322 results_list = [] 

5323 

5324 for i in range(len(request.sources)): 

5325 results_list.append([None] * len(request.targets)) 

5326 

5327 tcounters_dyn_list = [] 

5328 tcounters_static_list = [] 

5329 nsub = len(skeys) 

5330 isub = 0 

5331 

5332 # Processing dynamic targets through 

5333 # parimap(process_subrequest_dynamic) 

5334 

5335 if calc_timeseries: 

5336 _process_dynamic = process_dynamic_timeseries 

5337 else: 

5338 _process_dynamic = process_dynamic 

5339 

5340 if request.has_dynamic: 

5341 work_dynamic = [ 

5342 (i, nsub, 

5343 [source_index[source] for source in m[k][0]], 

5344 [target_index[target] for target in m[k][1] 

5345 if not isinstance(target, StaticTarget)]) 

5346 for (i, k) in enumerate(skeys)] 

5347 

5348 for ii_results, tcounters_dyn in _process_dynamic( 

5349 work_dynamic, request.sources, request.targets, self, 

5350 nthreads): 

5351 

5352 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5353 isource, itarget, result = ii_results 

5354 results_list[isource][itarget] = result 

5355 

5356 if status_callback: 

5357 status_callback(isub, nsub) 

5358 

5359 isub += 1 

5360 

5361 # Processing static targets through process_static 

5362 if request.has_statics: 

5363 work_static = [ 

5364 (i, nsub, 

5365 [source_index[source] for source in m[k][0]], 

5366 [target_index[target] for target in m[k][1] 

5367 if isinstance(target, StaticTarget)]) 

5368 for (i, k) in enumerate(skeys)] 

5369 

5370 for ii_results, tcounters_static in process_static( 

5371 work_static, request.sources, request.targets, self, 

5372 nthreads=nthreads): 

5373 

5374 tcounters_static_list.append(num.diff(tcounters_static)) 

5375 isource, itarget, result = ii_results 

5376 results_list[isource][itarget] = result 

5377 

5378 if status_callback: 

5379 status_callback(isub, nsub) 

5380 

5381 isub += 1 

5382 

5383 if status_callback: 

5384 status_callback(nsub, nsub) 

5385 

5386 tt1 = time.time() 

5387 if resource: 

5388 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5389 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5390 

5391 s = ProcessingStats() 

5392 

5393 if request.has_dynamic: 

5394 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5395 t_dyn = float(num.sum(tcumu_dyn)) 

5396 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5397 (s.t_perc_get_store_and_receiver, 

5398 s.t_perc_discretize_source, 

5399 s.t_perc_make_base_seismogram, 

5400 s.t_perc_make_same_span, 

5401 s.t_perc_post_process) = perc_dyn 

5402 else: 

5403 t_dyn = 0. 

5404 

5405 if request.has_statics: 

5406 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5407 t_static = num.sum(tcumu_static) 

5408 perc_static = map(float, tcumu_static / t_static * 100.) 

5409 (s.t_perc_static_get_store, 

5410 s.t_perc_static_discretize_basesource, 

5411 s.t_perc_static_sum_statics, 

5412 s.t_perc_static_post_process) = perc_static 

5413 

5414 s.t_wallclock = tt1 - tt0 

5415 if resource: 

5416 s.t_cpu = ( 

5417 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5418 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5419 s.n_read_blocks = ( 

5420 (rs1.ru_inblock + rc1.ru_inblock) - 

5421 (rs0.ru_inblock + rc0.ru_inblock)) 

5422 

5423 n_records_stacked = 0. 

5424 for results in results_list: 

5425 for result in results: 

5426 if not isinstance(result, meta.Result): 

5427 continue 

5428 shr = float(result.n_shared_stacking) 

5429 n_records_stacked += result.n_records_stacked / shr 

5430 s.t_perc_optimize += result.t_optimize / shr 

5431 s.t_perc_stack += result.t_stack / shr 

5432 s.n_records_stacked = int(n_records_stacked) 

5433 if t_dyn != 0.: 

5434 s.t_perc_optimize /= t_dyn * 100 

5435 s.t_perc_stack /= t_dyn * 100 

5436 

5437 return Response( 

5438 request=request, 

5439 results_list=results_list, 

5440 stats=s) 

5441 

5442 

5443class RemoteEngine(Engine): 

5444 ''' 

5445 Client for remote synthetic seismogram calculator. 

5446 ''' 

5447 

5448 site = String.T(default=ws.g_default_site, optional=True) 

5449 url = String.T(default=ws.g_url, optional=True) 

5450 

5451 def process(self, request=None, status_callback=None, **kwargs): 

5452 

5453 if request is None: 

5454 request = Request(**kwargs) 

5455 

5456 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5457 

5458 

5459g_engine = None 

5460 

5461 

5462def get_engine(store_superdirs=[]): 

5463 global g_engine 

5464 if g_engine is None: 

5465 g_engine = LocalEngine(use_env=True, use_config=True) 

5466 

5467 for d in store_superdirs: 

5468 if d not in g_engine.store_superdirs: 

5469 g_engine.store_superdirs.append(d) 

5470 

5471 return g_engine 

5472 

5473 

5474class SourceGroup(Object): 

5475 

5476 def __getattr__(self, k): 

5477 return num.fromiter((getattr(s, k) for s in self), 

5478 dtype=float) 

5479 

5480 def __iter__(self): 

5481 raise NotImplementedError( 

5482 'This method should be implemented in subclass.') 

5483 

5484 def __len__(self): 

5485 raise NotImplementedError( 

5486 'This method should be implemented in subclass.') 

5487 

5488 

5489class SourceList(SourceGroup): 

5490 sources = List.T(Source.T()) 

5491 

5492 def append(self, s): 

5493 self.sources.append(s) 

5494 

5495 def __iter__(self): 

5496 return iter(self.sources) 

5497 

5498 def __len__(self): 

5499 return len(self.sources) 

5500 

5501 

5502class SourceGrid(SourceGroup): 

5503 

5504 base = Source.T() 

5505 variables = Dict.T(String.T(), Range.T()) 

5506 order = List.T(String.T()) 

5507 

5508 def __len__(self): 

5509 n = 1 

5510 for (k, v) in self.make_coords(self.base): 

5511 n *= len(list(v)) 

5512 

5513 return n 

5514 

5515 def __iter__(self): 

5516 for items in permudef(self.make_coords(self.base)): 

5517 s = self.base.clone(**{k: v for (k, v) in items}) 

5518 s.regularize() 

5519 yield s 

5520 

5521 def ordered_params(self): 

5522 ks = list(self.variables.keys()) 

5523 for k in self.order + list(self.base.keys()): 

5524 if k in ks: 

5525 yield k 

5526 ks.remove(k) 

5527 if ks: 

5528 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5529 (ks[0], self.base.__class__.__name__)) 

5530 

5531 def make_coords(self, base): 

5532 return [(param, self.variables[param].make(base=base[param])) 

5533 for param in self.ordered_params()] 

5534 

5535 

5536source_classes = [ 

5537 Source, 

5538 SourceWithMagnitude, 

5539 SourceWithDerivedMagnitude, 

5540 ExplosionSource, 

5541 RectangularExplosionSource, 

5542 DCSource, 

5543 CLVDSource, 

5544 VLVDSource, 

5545 MTSource, 

5546 RectangularSource, 

5547 PseudoDynamicRupture, 

5548 DoubleDCSource, 

5549 RingfaultSource, 

5550 CombiSource, 

5551 SFSource, 

5552 PorePressurePointSource, 

5553 PorePressureLineSource, 

5554] 

5555 

5556stf_classes = [ 

5557 STF, 

5558 BoxcarSTF, 

5559 TriangularSTF, 

5560 HalfSinusoidSTF, 

5561 ResonatorSTF, 

5562] 

5563 

5564__all__ = ''' 

5565SeismosizerError 

5566BadRequest 

5567NoSuchStore 

5568DerivedMagnitudeError 

5569STFMode 

5570'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5571Request 

5572ProcessingStats 

5573Response 

5574Engine 

5575LocalEngine 

5576RemoteEngine 

5577source_classes 

5578get_engine 

5579Range 

5580SourceGroup 

5581SourceList 

5582SourceGrid 

5583map_anchor 

5584'''.split()