1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28 

29from __future__ import absolute_import, division, print_function 

30 

31from collections import defaultdict 

32from functools import cmp_to_key 

33import time 

34import math 

35import os 

36import re 

37import logging 

38try: 

39 import resource 

40except ImportError: 

41 resource = None 

42from hashlib import sha1 

43 

44import numpy as num 

45from scipy.interpolate import RegularGridInterpolator 

46 

47from pyrocko.guts import (Object, Float, String, StringChoice, List, 

48 Timestamp, Int, SObject, ArgumentError, Dict, 

49 ValidationError, Bool) 

50from pyrocko.guts_array import Array 

51 

52from pyrocko import moment_tensor as pmt 

53from pyrocko import trace, util, config, model, eikonal_ext 

54from pyrocko.orthodrome import ne_to_latlon 

55from pyrocko.model import Location 

56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

57 okada_ext, invert_fault_dislocations_bem 

58 

59from . import meta, store, ws 

60from .tractions import TractionField, DirectedTractions 

61from .targets import Target, StaticTarget, SatelliteTarget 

62 

63pjoin = os.path.join 

64 

65guts_prefix = 'pf' 

66 

67d2r = math.pi / 180. 

68r2d = 180. / math.pi 

69km = 1e3 

70 

71logger = logging.getLogger('pyrocko.gf.seismosizer') 

72 

73 

74def cmp_none_aware(a, b): 

75 if isinstance(a, tuple) and isinstance(b, tuple): 

76 for xa, xb in zip(a, b): 

77 rv = cmp_none_aware(xa, xb) 

78 if rv != 0: 

79 return rv 

80 

81 return 0 

82 

83 anone = a is None 

84 bnone = b is None 

85 

86 if anone and bnone: 

87 return 0 

88 

89 if anone: 

90 return -1 

91 

92 if bnone: 

93 return 1 

94 

95 return bool(a > b) - bool(a < b) 

96 

97 

98def xtime(): 

99 return time.time() 

100 

101 

102class SeismosizerError(Exception): 

103 pass 

104 

105 

106class BadRequest(SeismosizerError): 

107 pass 

108 

109 

110class DuplicateStoreId(Exception): 

111 pass 

112 

113 

114class NoDefaultStoreSet(Exception): 

115 pass 

116 

117 

118class ConversionError(Exception): 

119 pass 

120 

121 

122class NoSuchStore(BadRequest): 

123 

124 def __init__(self, store_id=None, dirs=None): 

125 BadRequest.__init__(self) 

126 self.store_id = store_id 

127 self.dirs = dirs 

128 

129 def __str__(self): 

130 if self.store_id is not None: 

131 rstr = 'no GF store with id "%s" found.' % self.store_id 

132 else: 

133 rstr = 'GF store not found.' 

134 

135 if self.dirs is not None: 

136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

137 return rstr 

138 

139 

140def ufloat(s): 

141 units = { 

142 'k': 1e3, 

143 'M': 1e6, 

144 } 

145 

146 factor = 1.0 

147 if s and s[-1] in units: 

148 factor = units[s[-1]] 

149 s = s[:-1] 

150 if not s: 

151 raise ValueError('unit without a number: \'%s\'' % s) 

152 

153 return float(s) * factor 

154 

155 

156def ufloat_or_none(s): 

157 if s: 

158 return ufloat(s) 

159 else: 

160 return None 

161 

162 

163def int_or_none(s): 

164 if s: 

165 return int(s) 

166 else: 

167 return None 

168 

169 

170def nonzero(x, eps=1e-15): 

171 return abs(x) > eps 

172 

173 

174def permudef(ln, j=0): 

175 if j < len(ln): 

176 k, v = ln[j] 

177 for y in v: 

178 ln[j] = k, y 

179 for s in permudef(ln, j + 1): 

180 yield s 

181 

182 ln[j] = k, v 

183 return 

184 else: 

185 yield ln 

186 

187 

188def arr(x): 

189 return num.atleast_1d(num.asarray(x)) 

190 

191 

192def discretize_rect_source(deltas, deltat, time, north, east, depth, 

193 strike, dip, length, width, 

194 anchor, velocity=None, stf=None, 

195 nucleation_x=None, nucleation_y=None, 

196 decimation_factor=1, pointsonly=False, 

197 plane_coords=False, 

198 aggressive_oversampling=False): 

199 

200 if stf is None: 

201 stf = STF() 

202 

203 if not velocity and not pointsonly: 

204 raise AttributeError('velocity is required in time mode') 

205 

206 mindeltagf = float(num.min(deltas)) 

207 if velocity: 

208 mindeltagf = min(mindeltagf, deltat * velocity) 

209 

210 ln = length 

211 wd = width 

212 

213 if aggressive_oversampling: 

214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 else: 

217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

219 

220 n = int(nl * nw) 

221 

222 dl = ln / nl 

223 dw = wd / nw 

224 

225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

227 

228 points = num.zeros((n, 3)) 

229 points[:, 0] = num.tile(xl, nw) 

230 points[:, 1] = num.repeat(xw, nl) 

231 

232 if nucleation_x is not None: 

233 dist_x = num.abs(nucleation_x - points[:, 0]) 

234 else: 

235 dist_x = num.zeros(n) 

236 

237 if nucleation_y is not None: 

238 dist_y = num.abs(nucleation_y - points[:, 1]) 

239 else: 

240 dist_y = num.zeros(n) 

241 

242 dist = num.sqrt(dist_x**2 + dist_y**2) 

243 times = dist / velocity 

244 

245 anch_x, anch_y = map_anchor[anchor] 

246 

247 points[:, 0] -= anch_x * 0.5 * length 

248 points[:, 1] -= anch_y * 0.5 * width 

249 

250 if plane_coords: 

251 return points, dl, dw, nl, nw 

252 

253 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

254 points = num.dot(rotmat.T, points.T).T 

255 

256 points[:, 0] += north 

257 points[:, 1] += east 

258 points[:, 2] += depth 

259 

260 if pointsonly: 

261 return points, dl, dw, nl, nw 

262 

263 xtau, amplitudes = stf.discretize_t(deltat, time) 

264 nt = xtau.size 

265 

266 points2 = num.repeat(points, nt, axis=0) 

267 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

268 amplitudes2 = num.tile(amplitudes, n) 

269 

270 return points2, times2, amplitudes2, dl, dw, nl, nw 

271 

272 

273def check_rect_source_discretisation(points2, nl, nw, store): 

274 # We assume a non-rotated fault plane 

275 N_CRITICAL = 8 

276 points = points2.T.reshape((3, nl, nw)) 

277 if points.size <= N_CRITICAL: 

278 logger.warning('RectangularSource is defined by only %d sub-sources!' 

279 % points.size) 

280 return True 

281 

282 distances = num.sqrt( 

283 (points[0, 0, :] - points[0, 1, :])**2 + 

284 (points[1, 0, :] - points[1, 1, :])**2 + 

285 (points[2, 0, :] - points[2, 1, :])**2) 

286 

287 depths = points[2, 0, :] 

288 vs_profile = store.config.get_vs( 

289 lat=0., lon=0., 

290 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

291 interpolation='multilinear') 

292 

293 min_wavelength = vs_profile * (store.config.deltat * 2) 

294 if not num.all(min_wavelength > distances / 2): 

295 return False 

296 return True 

297 

298 

299def outline_rect_source(strike, dip, length, width, anchor): 

300 ln = length 

301 wd = width 

302 points = num.array( 

303 [[-0.5 * ln, -0.5 * wd, 0.], 

304 [0.5 * ln, -0.5 * wd, 0.], 

305 [0.5 * ln, 0.5 * wd, 0.], 

306 [-0.5 * ln, 0.5 * wd, 0.], 

307 [-0.5 * ln, -0.5 * wd, 0.]]) 

308 

309 anch_x, anch_y = map_anchor[anchor] 

310 points[:, 0] -= anch_x * 0.5 * length 

311 points[:, 1] -= anch_y * 0.5 * width 

312 

313 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

314 

315 return num.dot(rotmat.T, points.T).T 

316 

317 

318def from_plane_coords( 

319 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

320 lat=0., lon=0., 

321 north_shift=0, east_shift=0, 

322 anchor='top', cs='xy'): 

323 

324 ln = length 

325 wd = width 

326 x_abs = [] 

327 y_abs = [] 

328 if not isinstance(x_plane_coords, list): 

329 x_plane_coords = [x_plane_coords] 

330 y_plane_coords = [y_plane_coords] 

331 

332 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

333 points = num.array( 

334 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

335 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

336 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

337 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

338 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

339 

340 anch_x, anch_y = map_anchor[anchor] 

341 points[:, 0] -= anch_x * 0.5 * length 

342 points[:, 1] -= anch_y * 0.5 * width 

343 

344 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

345 

346 points = num.dot(rotmat.T, points.T).T 

347 points[:, 0] += north_shift 

348 points[:, 1] += east_shift 

349 points[:, 2] += depth 

350 if cs in ('latlon', 'lonlat'): 

351 latlon = ne_to_latlon(lat, lon, 

352 points[:, 0], points[:, 1]) 

353 latlon = num.array(latlon).T 

354 x_abs.append(latlon[1:2, 1]) 

355 y_abs.append(latlon[2:3, 0]) 

356 if cs == 'xy': 

357 x_abs.append(points[1:2, 1]) 

358 y_abs.append(points[2:3, 0]) 

359 

360 if cs == 'lonlat': 

361 return y_abs, x_abs 

362 else: 

363 return x_abs, y_abs 

364 

365 

366def points_on_rect_source( 

367 strike, dip, length, width, anchor, 

368 discretized_basesource=None, points_x=None, points_y=None): 

369 

370 ln = length 

371 wd = width 

372 

373 if isinstance(points_x, list) or isinstance(points_x, float): 

374 points_x = num.array([points_x]) 

375 if isinstance(points_y, list) or isinstance(points_y, float): 

376 points_y = num.array([points_y]) 

377 

378 if discretized_basesource: 

379 ds = discretized_basesource 

380 

381 nl_patches = ds.nl + 1 

382 nw_patches = ds.nw + 1 

383 

384 npoints = nl_patches * nw_patches 

385 points = num.zeros((npoints, 3)) 

386 ln_patches = num.array([il for il in range(nl_patches)]) 

387 wd_patches = num.array([iw for iw in range(nw_patches)]) 

388 

389 points_ln =\ 

390 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

391 points_wd =\ 

392 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

393 

394 for il in range(nl_patches): 

395 for iw in range(nw_patches): 

396 points[il * nw_patches + iw, :] = num.array([ 

397 points_ln[il] * ln * 0.5, 

398 points_wd[iw] * wd * 0.5, 0.0]) 

399 

400 elif points_x.any() and points_y.any(): 

401 points = num.zeros(shape=((len(points_x), 3))) 

402 for i, (x, y) in enumerate(zip(points_x, points_y)): 

403 points[i, :] = num.array( 

404 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

405 

406 anch_x, anch_y = map_anchor[anchor] 

407 

408 points[:, 0] -= anch_x * 0.5 * ln 

409 points[:, 1] -= anch_y * 0.5 * wd 

410 

411 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

412 

413 return num.dot(rotmat.T, points.T).T 

414 

415 

416class InvalidGridDef(Exception): 

417 pass 

418 

419 

420class Range(SObject): 

421 ''' 

422 Convenient range specification. 

423 

424 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

425 

426 Range('0 .. 10k : 1k') 

427 Range(start=0., stop=10e3, step=1e3) 

428 Range(0, 10e3, 1e3) 

429 Range('0 .. 10k @ 11') 

430 Range(start=0., stop=10*km, n=11) 

431 

432 Range(0, 10e3, n=11) 

433 Range(values=[x*1e3 for x in range(11)]) 

434 

435 Depending on the use context, it can be possible to omit any part of the 

436 specification. E.g. in the context of extracting a subset of an already 

437 existing range, the existing range's specification values would be filled 

438 in where missing. 

439 

440 The values are distributed with equal spacing, unless the ``spacing`` 

441 argument is modified. The values can be created offset or relative to an 

442 external base value with the ``relative`` argument if the use context 

443 supports this. 

444 

445 The range specification can be expressed with a short string 

446 representation:: 

447 

448 'start .. stop @ num | spacing, relative' 

449 'start .. stop : step | spacing, relative' 

450 

451 most parts of the expression can be omitted if not needed. Whitespace is 

452 allowed for readability but can also be omitted. 

453 ''' 

454 

455 start = Float.T(optional=True) 

456 stop = Float.T(optional=True) 

457 step = Float.T(optional=True) 

458 n = Int.T(optional=True) 

459 values = Array.T(optional=True, dtype=float, shape=(None,)) 

460 

461 spacing = StringChoice.T( 

462 choices=['lin', 'log', 'symlog'], 

463 default='lin', 

464 optional=True) 

465 

466 relative = StringChoice.T( 

467 choices=['', 'add', 'mult'], 

468 default='', 

469 optional=True) 

470 

471 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

472 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

473 r'(\|(?P<stuff>.+))?$') 

474 

475 def __init__(self, *args, **kwargs): 

476 d = {} 

477 if len(args) == 1: 

478 d = self.parse(args[0]) 

479 elif len(args) in (2, 3): 

480 d['start'], d['stop'] = [float(x) for x in args[:2]] 

481 if len(args) == 3: 

482 d['step'] = float(args[2]) 

483 

484 for k, v in kwargs.items(): 

485 if k in d: 

486 raise ArgumentError('%s specified more than once' % k) 

487 

488 d[k] = v 

489 

490 SObject.__init__(self, **d) 

491 

492 def __str__(self): 

493 def sfloat(x): 

494 if x is not None: 

495 return '%g' % x 

496 else: 

497 return '' 

498 

499 if self.values: 

500 return ','.join('%g' % x for x in self.values) 

501 

502 if self.start is None and self.stop is None: 

503 s0 = '' 

504 else: 

505 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

506 

507 s1 = '' 

508 if self.step is not None: 

509 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

510 elif self.n is not None: 

511 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

512 

513 if self.spacing == 'lin' and self.relative == '': 

514 s2 = '' 

515 else: 

516 x = [] 

517 if self.spacing != 'lin': 

518 x.append(self.spacing) 

519 

520 if self.relative != '': 

521 x.append(self.relative) 

522 

523 s2 = ' | %s' % ','.join(x) 

524 

525 return s0 + s1 + s2 

526 

527 @classmethod 

528 def parse(cls, s): 

529 s = re.sub(r'\s+', '', s) 

530 m = cls.pattern.match(s) 

531 if not m: 

532 try: 

533 vals = [ufloat(x) for x in s.split(',')] 

534 except Exception: 

535 raise InvalidGridDef( 

536 '"%s" is not a valid range specification' % s) 

537 

538 return dict(values=num.array(vals, dtype=float)) 

539 

540 d = m.groupdict() 

541 try: 

542 start = ufloat_or_none(d['start']) 

543 stop = ufloat_or_none(d['stop']) 

544 step = ufloat_or_none(d['step']) 

545 n = int_or_none(d['n']) 

546 except Exception: 

547 raise InvalidGridDef( 

548 '"%s" is not a valid range specification' % s) 

549 

550 spacing = 'lin' 

551 relative = '' 

552 

553 if d['stuff'] is not None: 

554 t = d['stuff'].split(',') 

555 for x in t: 

556 if x in cls.spacing.choices: 

557 spacing = x 

558 elif x and x in cls.relative.choices: 

559 relative = x 

560 else: 

561 raise InvalidGridDef( 

562 '"%s" is not a valid range specification' % s) 

563 

564 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

565 relative=relative) 

566 

567 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

568 if self.values: 

569 return self.values 

570 

571 start = self.start 

572 stop = self.stop 

573 step = self.step 

574 n = self.n 

575 

576 swap = step is not None and step < 0. 

577 if start is None: 

578 start = [mi, ma][swap] 

579 if stop is None: 

580 stop = [ma, mi][swap] 

581 if step is None and inc is not None: 

582 step = [inc, -inc][ma < mi] 

583 

584 if start is None or stop is None: 

585 raise InvalidGridDef( 

586 'Cannot use range specification "%s" without start ' 

587 'and stop in this context' % self) 

588 

589 if step is None and n is None: 

590 step = stop - start 

591 

592 if n is None: 

593 if (step < 0) != (stop - start < 0): 

594 raise InvalidGridDef( 

595 'Range specification "%s" has inconsistent ordering ' 

596 '(step < 0 => stop > start)' % self) 

597 

598 n = int(round((stop - start) / step)) + 1 

599 stop2 = start + (n - 1) * step 

600 if abs(stop - stop2) > eps: 

601 n = int(math.floor((stop - start) / step)) + 1 

602 stop = start + (n - 1) * step 

603 else: 

604 stop = stop2 

605 

606 if start == stop: 

607 n = 1 

608 

609 if self.spacing == 'lin': 

610 vals = num.linspace(start, stop, n) 

611 

612 elif self.spacing in ('log', 'symlog'): 

613 if start > 0. and stop > 0.: 

614 vals = num.exp(num.linspace(num.log(start), 

615 num.log(stop), n)) 

616 elif start < 0. and stop < 0.: 

617 vals = -num.exp(num.linspace(num.log(-start), 

618 num.log(-stop), n)) 

619 else: 

620 raise InvalidGridDef( 

621 'Log ranges should not include or cross zero ' 

622 '(in range specification "%s").' % self) 

623 

624 if self.spacing == 'symlog': 

625 nvals = - vals 

626 vals = num.concatenate((nvals[::-1], vals)) 

627 

628 if self.relative in ('add', 'mult') and base is None: 

629 raise InvalidGridDef( 

630 'Cannot use relative range specification in this context.') 

631 

632 vals = self.make_relative(base, vals) 

633 

634 return list(map(float, vals)) 

635 

636 def make_relative(self, base, vals): 

637 if self.relative == 'add': 

638 vals += base 

639 

640 if self.relative == 'mult': 

641 vals *= base 

642 

643 return vals 

644 

645 

646class GridDefElement(Object): 

647 

648 param = meta.StringID.T() 

649 rs = Range.T() 

650 

651 def __init__(self, shorthand=None, **kwargs): 

652 if shorthand is not None: 

653 t = shorthand.split('=') 

654 if len(t) != 2: 

655 raise InvalidGridDef( 

656 'Invalid grid specification element: %s' % shorthand) 

657 

658 sp, sr = t[0].strip(), t[1].strip() 

659 

660 kwargs['param'] = sp 

661 kwargs['rs'] = Range(sr) 

662 

663 Object.__init__(self, **kwargs) 

664 

665 def shorthand(self): 

666 return self.param + ' = ' + str(self.rs) 

667 

668 

669class GridDef(Object): 

670 

671 elements = List.T(GridDefElement.T()) 

672 

673 def __init__(self, shorthand=None, **kwargs): 

674 if shorthand is not None: 

675 t = shorthand.splitlines() 

676 tt = [] 

677 for x in t: 

678 x = x.strip() 

679 if x: 

680 tt.extend(x.split(';')) 

681 

682 elements = [] 

683 for se in tt: 

684 elements.append(GridDef(se)) 

685 

686 kwargs['elements'] = elements 

687 

688 Object.__init__(self, **kwargs) 

689 

690 def shorthand(self): 

691 return '; '.join(str(x) for x in self.elements) 

692 

693 

694class Cloneable(object): 

695 

696 def __iter__(self): 

697 return iter(self.T.propnames) 

698 

699 def __getitem__(self, k): 

700 if k not in self.keys(): 

701 raise KeyError(k) 

702 

703 return getattr(self, k) 

704 

705 def __setitem__(self, k, v): 

706 if k not in self.keys(): 

707 raise KeyError(k) 

708 

709 return setattr(self, k, v) 

710 

711 def clone(self, **kwargs): 

712 ''' 

713 Make a copy of the object. 

714 

715 A new object of the same class is created and initialized with the 

716 parameters of the object on which this method is called on. If 

717 ``kwargs`` are given, these are used to override any of the 

718 initialization parameters. 

719 ''' 

720 

721 d = dict(self) 

722 for k in d: 

723 v = d[k] 

724 if isinstance(v, Cloneable): 

725 d[k] = v.clone() 

726 

727 d.update(kwargs) 

728 return self.__class__(**d) 

729 

730 @classmethod 

731 def keys(cls): 

732 ''' 

733 Get list of the source model's parameter names. 

734 ''' 

735 

736 return cls.T.propnames 

737 

738 

739class STF(Object, Cloneable): 

740 

741 ''' 

742 Base class for source time functions. 

743 ''' 

744 

745 def __init__(self, effective_duration=None, **kwargs): 

746 if effective_duration is not None: 

747 kwargs['duration'] = effective_duration / \ 

748 self.factor_duration_to_effective() 

749 

750 Object.__init__(self, **kwargs) 

751 

752 @classmethod 

753 def factor_duration_to_effective(cls): 

754 return 1.0 

755 

756 def centroid_time(self, tref): 

757 return tref 

758 

759 @property 

760 def effective_duration(self): 

761 return self.duration * self.factor_duration_to_effective() 

762 

763 def discretize_t(self, deltat, tref): 

764 tl = math.floor(tref / deltat) * deltat 

765 th = math.ceil(tref / deltat) * deltat 

766 if tl == th: 

767 return num.array([tl], dtype=float), num.ones(1) 

768 else: 

769 return ( 

770 num.array([tl, th], dtype=float), 

771 num.array([th - tref, tref - tl], dtype=float) / deltat) 

772 

773 def base_key(self): 

774 return (type(self).__name__,) 

775 

776 

777g_unit_pulse = STF() 

778 

779 

780def sshift(times, amplitudes, tshift, deltat): 

781 

782 t0 = math.floor(tshift / deltat) * deltat 

783 t1 = math.ceil(tshift / deltat) * deltat 

784 if t0 == t1: 

785 return times, amplitudes 

786 

787 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

788 

789 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

790 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

791 

792 times2 = num.arange(times.size + 1, dtype=float) * \ 

793 deltat + times[0] + t0 

794 

795 return times2, amplitudes2 

796 

797 

798class BoxcarSTF(STF): 

799 

800 ''' 

801 Boxcar type source time function. 

802 

803 .. figure :: /static/stf-BoxcarSTF.svg 

804 :width: 40% 

805 :align: center 

806 :alt: boxcar source time function 

807 ''' 

808 

809 duration = Float.T( 

810 default=0.0, 

811 help='duration of the boxcar') 

812 

813 anchor = Float.T( 

814 default=0.0, 

815 help='anchor point with respect to source.time: (' 

816 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

817 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

818 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

819 

820 @classmethod 

821 def factor_duration_to_effective(cls): 

822 return 1.0 

823 

824 def centroid_time(self, tref): 

825 return tref - 0.5 * self.duration * self.anchor 

826 

827 def discretize_t(self, deltat, tref): 

828 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

829 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

830 tmin = round(tmin_stf / deltat) * deltat 

831 tmax = round(tmax_stf / deltat) * deltat 

832 nt = int(round((tmax - tmin) / deltat)) + 1 

833 times = num.linspace(tmin, tmax, nt) 

834 amplitudes = num.ones_like(times) 

835 if times.size > 1: 

836 t_edges = num.linspace( 

837 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

838 t = tmin_stf + self.duration * num.array( 

839 [0.0, 0.0, 1.0, 1.0], dtype=float) 

840 f = num.array([0., 1., 1., 0.], dtype=float) 

841 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

842 amplitudes /= num.sum(amplitudes) 

843 

844 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

845 

846 return sshift(times, amplitudes, -tshift, deltat) 

847 

848 def base_key(self): 

849 return (type(self).__name__, self.duration, self.anchor) 

850 

851 

852class TriangularSTF(STF): 

853 

854 ''' 

855 Triangular type source time function. 

856 

857 .. figure :: /static/stf-TriangularSTF.svg 

858 :width: 40% 

859 :align: center 

860 :alt: triangular source time function 

861 ''' 

862 

863 duration = Float.T( 

864 default=0.0, 

865 help='baseline of the triangle') 

866 

867 peak_ratio = Float.T( 

868 default=0.5, 

869 help='fraction of time compared to duration, ' 

870 'when the maximum amplitude is reached') 

871 

872 anchor = Float.T( 

873 default=0.0, 

874 help='anchor point with respect to source.time: (' 

875 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

876 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

877 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

878 

879 @classmethod 

880 def factor_duration_to_effective(cls, peak_ratio=None): 

881 if peak_ratio is None: 

882 peak_ratio = cls.peak_ratio.default() 

883 

884 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

885 

886 def __init__(self, effective_duration=None, **kwargs): 

887 if effective_duration is not None: 

888 kwargs['duration'] = effective_duration / \ 

889 self.factor_duration_to_effective( 

890 kwargs.get('peak_ratio', None)) 

891 

892 STF.__init__(self, **kwargs) 

893 

894 @property 

895 def centroid_ratio(self): 

896 ra = self.peak_ratio 

897 rb = 1.0 - ra 

898 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

899 

900 def centroid_time(self, tref): 

901 ca = self.centroid_ratio 

902 cb = 1.0 - ca 

903 if self.anchor <= 0.: 

904 return tref - ca * self.duration * self.anchor 

905 else: 

906 return tref - cb * self.duration * self.anchor 

907 

908 @property 

909 def effective_duration(self): 

910 return self.duration * self.factor_duration_to_effective( 

911 self.peak_ratio) 

912 

913 def tminmax_stf(self, tref): 

914 ca = self.centroid_ratio 

915 cb = 1.0 - ca 

916 if self.anchor <= 0.: 

917 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

918 tmax_stf = tmin_stf + self.duration 

919 else: 

920 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

921 tmin_stf = tmax_stf - self.duration 

922 

923 return tmin_stf, tmax_stf 

924 

925 def discretize_t(self, deltat, tref): 

926 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

927 

928 tmin = round(tmin_stf / deltat) * deltat 

929 tmax = round(tmax_stf / deltat) * deltat 

930 nt = int(round((tmax - tmin) / deltat)) + 1 

931 if nt > 1: 

932 t_edges = num.linspace( 

933 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

934 t = tmin_stf + self.duration * num.array( 

935 [0.0, self.peak_ratio, 1.0], dtype=float) 

936 f = num.array([0., 1., 0.], dtype=float) 

937 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

938 amplitudes /= num.sum(amplitudes) 

939 else: 

940 amplitudes = num.ones(1) 

941 

942 times = num.linspace(tmin, tmax, nt) 

943 return times, amplitudes 

944 

945 def base_key(self): 

946 return ( 

947 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

948 

949 

950class HalfSinusoidSTF(STF): 

951 

952 ''' 

953 Half sinusoid type source time function. 

954 

955 .. figure :: /static/stf-HalfSinusoidSTF.svg 

956 :width: 40% 

957 :align: center 

958 :alt: half-sinusouid source time function 

959 ''' 

960 

961 duration = Float.T( 

962 default=0.0, 

963 help='duration of the half-sinusoid (baseline)') 

964 

965 anchor = Float.T( 

966 default=0.0, 

967 help='anchor point with respect to source.time: (' 

968 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

969 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

970 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

971 

972 exponent = Int.T( 

973 default=1, 

974 help='set to 2 to use square of the half-period sinusoidal function.') 

975 

976 def __init__(self, effective_duration=None, **kwargs): 

977 if effective_duration is not None: 

978 kwargs['duration'] = effective_duration / \ 

979 self.factor_duration_to_effective( 

980 kwargs.get('exponent', 1)) 

981 

982 STF.__init__(self, **kwargs) 

983 

984 @classmethod 

985 def factor_duration_to_effective(cls, exponent): 

986 if exponent == 1: 

987 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

988 elif exponent == 2: 

989 return math.sqrt(math.pi**2 - 6) / math.pi 

990 else: 

991 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

992 

993 @property 

994 def effective_duration(self): 

995 return self.duration * self.factor_duration_to_effective(self.exponent) 

996 

997 def centroid_time(self, tref): 

998 return tref - 0.5 * self.duration * self.anchor 

999 

1000 def discretize_t(self, deltat, tref): 

1001 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1002 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1003 tmin = round(tmin_stf / deltat) * deltat 

1004 tmax = round(tmax_stf / deltat) * deltat 

1005 nt = int(round((tmax - tmin) / deltat)) + 1 

1006 if nt > 1: 

1007 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1008 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1009 

1010 if self.exponent == 1: 

1011 fint = -num.cos( 

1012 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1013 

1014 elif self.exponent == 2: 

1015 fint = (t_edges - tmin_stf) / self.duration \ 

1016 - 1.0 / (2.0 * math.pi) * num.sin( 

1017 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1018 else: 

1019 raise ValueError( 

1020 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1021 

1022 amplitudes = fint[1:] - fint[:-1] 

1023 amplitudes /= num.sum(amplitudes) 

1024 else: 

1025 amplitudes = num.ones(1) 

1026 

1027 times = num.linspace(tmin, tmax, nt) 

1028 return times, amplitudes 

1029 

1030 def base_key(self): 

1031 return (type(self).__name__, self.duration, self.anchor) 

1032 

1033 

1034class SmoothRampSTF(STF): 

1035 ''' 

1036 Smooth-ramp type source time function for near-field displacement. 

1037 Based on moment function of double-couple point source proposed by Bruestle 

1038 and Mueller (PEPI, 1983). 

1039 

1040 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1041 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1042 312-324. 

1043 

1044 .. figure :: /static/stf-SmoothRampSTF.svg 

1045 :width: 40% 

1046 :alt: smooth ramp source time function 

1047 ''' 

1048 duration = Float.T( 

1049 default=0.0, 

1050 help='duration of the ramp (baseline)') 

1051 

1052 rise_ratio = Float.T( 

1053 default=0.5, 

1054 help='fraction of time compared to duration, ' 

1055 'when the maximum amplitude is reached') 

1056 

1057 anchor = Float.T( 

1058 default=0.0, 

1059 help='anchor point with respect to source.time: (' 

1060 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1061 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1062 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1063 

1064 def discretize_t(self, deltat, tref): 

1065 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1066 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1067 tmin = round(tmin_stf / deltat) * deltat 

1068 tmax = round(tmax_stf / deltat) * deltat 

1069 D = round((tmax - tmin) / deltat) * deltat 

1070 nt = int(round(D / deltat)) + 1 

1071 times = num.linspace(tmin, tmax, nt) 

1072 if nt > 1: 

1073 rise_time = self.rise_ratio * self.duration 

1074 amplitudes = num.ones_like(times) 

1075 tp = tmin + rise_time 

1076 ii = num.where(times <= tp) 

1077 t_inc = times[ii] 

1078 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1079 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1080 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1081 

1082 amplitudes /= num.sum(amplitudes) 

1083 else: 

1084 amplitudes = num.ones(1) 

1085 

1086 return times, amplitudes 

1087 

1088 def base_key(self): 

1089 return (type(self).__name__, 

1090 self.duration, self.rise_ratio, self.anchor) 

1091 

1092 

1093class ResonatorSTF(STF): 

1094 ''' 

1095 Simple resonator like source time function. 

1096 

1097 .. math :: 

1098 

1099 f(t) = 0 for t < 0 

1100 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1101 

1102 

1103 .. figure :: /static/stf-SmoothRampSTF.svg 

1104 :width: 40% 

1105 :alt: smooth ramp source time function 

1106 

1107 ''' 

1108 

1109 duration = Float.T( 

1110 default=0.0, 

1111 help='decay time') 

1112 

1113 frequency = Float.T( 

1114 default=1.0, 

1115 help='resonance frequency') 

1116 

1117 def discretize_t(self, deltat, tref): 

1118 tmin_stf = tref 

1119 tmax_stf = tref + self.duration * 3 

1120 tmin = math.floor(tmin_stf / deltat) * deltat 

1121 tmax = math.ceil(tmax_stf / deltat) * deltat 

1122 times = util.arange2(tmin, tmax, deltat) 

1123 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1124 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1125 

1126 return times, amplitudes 

1127 

1128 def base_key(self): 

1129 return (type(self).__name__, 

1130 self.duration, self.frequency) 

1131 

1132 

1133class STFMode(StringChoice): 

1134 choices = ['pre', 'post'] 

1135 

1136 

1137class Source(Location, Cloneable): 

1138 ''' 

1139 Base class for all source models. 

1140 ''' 

1141 

1142 name = String.T(optional=True, default='') 

1143 

1144 time = Timestamp.T( 

1145 default=Timestamp.D('1970-01-01 00:00:00'), 

1146 help='source origin time.') 

1147 

1148 stf = STF.T( 

1149 optional=True, 

1150 help='source time function.') 

1151 

1152 stf_mode = STFMode.T( 

1153 default='post', 

1154 help='whether to apply source time function in pre or ' 

1155 'post-processing.') 

1156 

1157 def __init__(self, **kwargs): 

1158 Location.__init__(self, **kwargs) 

1159 

1160 def update(self, **kwargs): 

1161 ''' 

1162 Change some of the source models parameters. 

1163 

1164 Example:: 

1165 

1166 >>> from pyrocko import gf 

1167 >>> s = gf.DCSource() 

1168 >>> s.update(strike=66., dip=33.) 

1169 >>> print(s) 

1170 --- !pf.DCSource 

1171 depth: 0.0 

1172 time: 1970-01-01 00:00:00 

1173 magnitude: 6.0 

1174 strike: 66.0 

1175 dip: 33.0 

1176 rake: 0.0 

1177 

1178 ''' 

1179 

1180 for (k, v) in kwargs.items(): 

1181 self[k] = v 

1182 

1183 def grid(self, **variables): 

1184 ''' 

1185 Create grid of source model variations. 

1186 

1187 :returns: :py:class:`SourceGrid` instance. 

1188 

1189 Example:: 

1190 

1191 >>> from pyrocko import gf 

1192 >>> base = DCSource() 

1193 >>> R = gf.Range 

1194 >>> for s in base.grid(R(' 

1195 

1196 ''' 

1197 return SourceGrid(base=self, variables=variables) 

1198 

1199 def base_key(self): 

1200 ''' 

1201 Get key to decide about source discretization / GF stack sharing. 

1202 

1203 When two source models differ only in amplitude and origin time, the 

1204 discretization and the GF stacking can be done only once for a unit 

1205 amplitude and a zero origin time and the amplitude and origin times of 

1206 the seismograms can be applied during post-processing of the synthetic 

1207 seismogram. 

1208 

1209 For any derived parameterized source model, this method is called to 

1210 decide if discretization and stacking of the source should be shared. 

1211 When two source models return an equal vector of values discretization 

1212 is shared. 

1213 ''' 

1214 return (self.depth, self.lat, self.north_shift, 

1215 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1216 self.effective_stf_pre().base_key() 

1217 

1218 def get_factor(self): 

1219 ''' 

1220 Get the scaling factor to be applied during post-processing. 

1221 

1222 Discretization of the base seismogram is usually done for a unit 

1223 amplitude, because a common factor can be efficiently multiplied to 

1224 final seismograms. This eliminates to do repeat the stacking when 

1225 creating seismograms for a series of source models only differing in 

1226 amplitude. 

1227 

1228 This method should return the scaling factor to apply in the 

1229 post-processing (often this is simply the scalar moment of the source). 

1230 ''' 

1231 

1232 return 1.0 

1233 

1234 def effective_stf_pre(self): 

1235 ''' 

1236 Return the STF applied before stacking of the Green's functions. 

1237 

1238 This STF is used during discretization of the parameterized source 

1239 models, i.e. to produce a temporal distribution of point sources. 

1240 

1241 Handling of the STF before stacking of the GFs is less efficient but 

1242 allows to use different source time functions for different parts of 

1243 the source. 

1244 ''' 

1245 

1246 if self.stf is not None and self.stf_mode == 'pre': 

1247 return self.stf 

1248 else: 

1249 return g_unit_pulse 

1250 

1251 def effective_stf_post(self): 

1252 ''' 

1253 Return the STF applied after stacking of the Green's fuctions. 

1254 

1255 This STF is used in the post-processing of the synthetic seismograms. 

1256 

1257 Handling of the STF after stacking of the GFs is usually more efficient 

1258 but is only possible when a common STF is used for all subsources. 

1259 ''' 

1260 

1261 if self.stf is not None and self.stf_mode == 'post': 

1262 return self.stf 

1263 else: 

1264 return g_unit_pulse 

1265 

1266 def _dparams_base(self): 

1267 return dict(times=arr(self.time), 

1268 lat=self.lat, lon=self.lon, 

1269 north_shifts=arr(self.north_shift), 

1270 east_shifts=arr(self.east_shift), 

1271 depths=arr(self.depth)) 

1272 

1273 def _hash(self): 

1274 sha = sha1() 

1275 for k in self.base_key(): 

1276 sha.update(str(k).encode()) 

1277 return sha.hexdigest() 

1278 

1279 def _dparams_base_repeated(self, times): 

1280 if times is None: 

1281 return self._dparams_base() 

1282 

1283 nt = times.size 

1284 north_shifts = num.repeat(self.north_shift, nt) 

1285 east_shifts = num.repeat(self.east_shift, nt) 

1286 depths = num.repeat(self.depth, nt) 

1287 return dict(times=times, 

1288 lat=self.lat, lon=self.lon, 

1289 north_shifts=north_shifts, 

1290 east_shifts=east_shifts, 

1291 depths=depths) 

1292 

1293 def pyrocko_event(self, store=None, target=None, **kwargs): 

1294 duration = None 

1295 if self.stf: 

1296 duration = self.stf.effective_duration 

1297 

1298 return model.Event( 

1299 lat=self.lat, 

1300 lon=self.lon, 

1301 north_shift=self.north_shift, 

1302 east_shift=self.east_shift, 

1303 time=self.time, 

1304 name=self.name, 

1305 depth=self.depth, 

1306 duration=duration, 

1307 **kwargs) 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 except meta.OutOfBounds: 

1527 raise DerivedMagnitudeError( 

1528 'Could not get shear modulus at source position.') 

1529 

1530 return float(3. * shear_moduli) 

1531 

1532 def get_factor(self): 

1533 return 1.0 

1534 

1535 def discretize_basesource(self, store, target=None): 

1536 times, amplitudes = self.effective_stf_pre().discretize_t( 

1537 store.config.deltat, self.time) 

1538 

1539 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1540 

1541 if self.volume_change is not None: 

1542 if self.volume_change < 0.: 

1543 amplitudes *= -1 

1544 

1545 return meta.DiscretizedExplosionSource( 

1546 m0s=amplitudes, 

1547 **self._dparams_base_repeated(times)) 

1548 

1549 def pyrocko_moment_tensor(self, store=None, target=None): 

1550 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1551 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1552 

1553 

1554class RectangularExplosionSource(ExplosionSource): 

1555 ''' 

1556 Rectangular or line explosion source. 

1557 ''' 

1558 

1559 discretized_source_class = meta.DiscretizedExplosionSource 

1560 

1561 strike = Float.T( 

1562 default=0.0, 

1563 help='strike direction in [deg], measured clockwise from north') 

1564 

1565 dip = Float.T( 

1566 default=90.0, 

1567 help='dip angle in [deg], measured downward from horizontal') 

1568 

1569 length = Float.T( 

1570 default=0., 

1571 help='length of rectangular source area [m]') 

1572 

1573 width = Float.T( 

1574 default=0., 

1575 help='width of rectangular source area [m]') 

1576 

1577 anchor = StringChoice.T( 

1578 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1579 'bottom_left', 'bottom_right'], 

1580 default='center', 

1581 optional=True, 

1582 help='Anchor point for positioning the plane, can be: top, center or' 

1583 'bottom and also top_left, top_right,bottom_left,' 

1584 'bottom_right, center_left and center right') 

1585 

1586 nucleation_x = Float.T( 

1587 optional=True, 

1588 help='horizontal position of rupture nucleation in normalized fault ' 

1589 'plane coordinates (-1 = left edge, +1 = right edge)') 

1590 

1591 nucleation_y = Float.T( 

1592 optional=True, 

1593 help='down-dip position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1595 

1596 velocity = Float.T( 

1597 default=3500., 

1598 help='speed of explosion front [m/s]') 

1599 

1600 aggressive_oversampling = Bool.T( 

1601 default=False, 

1602 help='Aggressive oversampling for basesource discretization. ' 

1603 'When using \'multilinear\' interpolation oversampling has' 

1604 ' practically no effect.') 

1605 

1606 def base_key(self): 

1607 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1608 self.width, self.nucleation_x, 

1609 self.nucleation_y, self.velocity, 

1610 self.anchor) 

1611 

1612 def discretize_basesource(self, store, target=None): 

1613 

1614 if self.nucleation_x is not None: 

1615 nucx = self.nucleation_x * 0.5 * self.length 

1616 else: 

1617 nucx = None 

1618 

1619 if self.nucleation_y is not None: 

1620 nucy = self.nucleation_y * 0.5 * self.width 

1621 else: 

1622 nucy = None 

1623 

1624 stf = self.effective_stf_pre() 

1625 

1626 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1627 store.config.deltas, store.config.deltat, 

1628 self.time, self.north_shift, self.east_shift, self.depth, 

1629 self.strike, self.dip, self.length, self.width, self.anchor, 

1630 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1631 

1632 amplitudes /= num.sum(amplitudes) 

1633 amplitudes *= self.get_moment(store, target) 

1634 

1635 return meta.DiscretizedExplosionSource( 

1636 lat=self.lat, 

1637 lon=self.lon, 

1638 times=times, 

1639 north_shifts=points[:, 0], 

1640 east_shifts=points[:, 1], 

1641 depths=points[:, 2], 

1642 m0s=amplitudes) 

1643 

1644 def outline(self, cs='xyz'): 

1645 points = outline_rect_source(self.strike, self.dip, self.length, 

1646 self.width, self.anchor) 

1647 

1648 points[:, 0] += self.north_shift 

1649 points[:, 1] += self.east_shift 

1650 points[:, 2] += self.depth 

1651 if cs == 'xyz': 

1652 return points 

1653 elif cs == 'xy': 

1654 return points[:, :2] 

1655 elif cs in ('latlon', 'lonlat'): 

1656 latlon = ne_to_latlon( 

1657 self.lat, self.lon, points[:, 0], points[:, 1]) 

1658 

1659 latlon = num.array(latlon).T 

1660 if cs == 'latlon': 

1661 return latlon 

1662 else: 

1663 return latlon[:, ::-1] 

1664 

1665 def get_nucleation_abs_coord(self, cs='xy'): 

1666 

1667 if self.nucleation_x is None: 

1668 return None, None 

1669 

1670 coords = from_plane_coords(self.strike, self.dip, self.length, 

1671 self.width, self.depth, self.nucleation_x, 

1672 self.nucleation_y, lat=self.lat, 

1673 lon=self.lon, north_shift=self.north_shift, 

1674 east_shift=self.east_shift, cs=cs) 

1675 return coords 

1676 

1677 

1678class DCSource(SourceWithMagnitude): 

1679 ''' 

1680 A double-couple point source. 

1681 ''' 

1682 

1683 strike = Float.T( 

1684 default=0.0, 

1685 help='strike direction in [deg], measured clockwise from north') 

1686 

1687 dip = Float.T( 

1688 default=90.0, 

1689 help='dip angle in [deg], measured downward from horizontal') 

1690 

1691 rake = Float.T( 

1692 default=0.0, 

1693 help='rake angle in [deg], ' 

1694 'measured counter-clockwise from right-horizontal ' 

1695 'in on-plane view') 

1696 

1697 discretized_source_class = meta.DiscretizedMTSource 

1698 

1699 def base_key(self): 

1700 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1701 

1702 def get_factor(self): 

1703 return float(pmt.magnitude_to_moment(self.magnitude)) 

1704 

1705 def discretize_basesource(self, store, target=None): 

1706 mot = pmt.MomentTensor( 

1707 strike=self.strike, dip=self.dip, rake=self.rake) 

1708 

1709 times, amplitudes = self.effective_stf_pre().discretize_t( 

1710 store.config.deltat, self.time) 

1711 return meta.DiscretizedMTSource( 

1712 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1713 **self._dparams_base_repeated(times)) 

1714 

1715 def pyrocko_moment_tensor(self, store=None, target=None): 

1716 return pmt.MomentTensor( 

1717 strike=self.strike, 

1718 dip=self.dip, 

1719 rake=self.rake, 

1720 scalar_moment=self.moment) 

1721 

1722 def pyrocko_event(self, store=None, target=None, **kwargs): 

1723 return SourceWithMagnitude.pyrocko_event( 

1724 self, store, target, 

1725 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1726 **kwargs) 

1727 

1728 @classmethod 

1729 def from_pyrocko_event(cls, ev, **kwargs): 

1730 d = {} 

1731 mt = ev.moment_tensor 

1732 if mt: 

1733 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1734 d.update( 

1735 strike=float(strike), 

1736 dip=float(dip), 

1737 rake=float(rake), 

1738 magnitude=float(mt.moment_magnitude())) 

1739 

1740 d.update(kwargs) 

1741 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1742 

1743 

1744class CLVDSource(SourceWithMagnitude): 

1745 ''' 

1746 A pure CLVD point source. 

1747 ''' 

1748 

1749 discretized_source_class = meta.DiscretizedMTSource 

1750 

1751 azimuth = Float.T( 

1752 default=0.0, 

1753 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1754 

1755 dip = Float.T( 

1756 default=90., 

1757 help='dip direction of largest dipole, downward from horizontal [deg]') 

1758 

1759 def base_key(self): 

1760 return Source.base_key(self) + (self.azimuth, self.dip) 

1761 

1762 def get_factor(self): 

1763 return float(pmt.magnitude_to_moment(self.magnitude)) 

1764 

1765 @property 

1766 def m6(self): 

1767 a = math.sqrt(4. / 3.) * self.get_factor() 

1768 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1769 rotmat1 = pmt.euler_to_matrix( 

1770 d2r * (self.dip - 90.), 

1771 d2r * (self.azimuth - 90.), 

1772 0.) 

1773 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1774 return pmt.to6(m) 

1775 

1776 @property 

1777 def m6_astuple(self): 

1778 return tuple(self.m6.tolist()) 

1779 

1780 def discretize_basesource(self, store, target=None): 

1781 factor = self.get_factor() 

1782 times, amplitudes = self.effective_stf_pre().discretize_t( 

1783 store.config.deltat, self.time) 

1784 return meta.DiscretizedMTSource( 

1785 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1786 **self._dparams_base_repeated(times)) 

1787 

1788 def pyrocko_moment_tensor(self, store=None, target=None): 

1789 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1790 

1791 def pyrocko_event(self, store=None, target=None, **kwargs): 

1792 mt = self.pyrocko_moment_tensor(store, target) 

1793 return Source.pyrocko_event( 

1794 self, store, target, 

1795 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1796 magnitude=float(mt.moment_magnitude()), 

1797 **kwargs) 

1798 

1799 

1800class VLVDSource(SourceWithDerivedMagnitude): 

1801 ''' 

1802 Volumetric linear vector dipole source. 

1803 

1804 This source is a parameterization for a restricted moment tensor point 

1805 source, useful to represent dyke or sill like inflation or deflation 

1806 sources. The restriction is such that the moment tensor is rotational 

1807 symmetric. It can be represented by a superposition of a linear vector 

1808 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1809 restricted moment tensor has 4 degrees of freedom: 2 independent 

1810 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1811 

1812 In this parameterization, the isotropic component is controlled by 

1813 ``volume_change``. To define the moment tensor, it must be converted to the 

1814 scalar moment of the the MT's isotropic component. For the conversion, the 

1815 shear modulus at the source's position must be known. This value is 

1816 extracted from the earth model defined in the GF store in use. 

1817 

1818 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1819 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1820 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1821 ''' 

1822 

1823 discretized_source_class = meta.DiscretizedMTSource 

1824 

1825 azimuth = Float.T( 

1826 default=0.0, 

1827 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1828 

1829 dip = Float.T( 

1830 default=90., 

1831 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1832 

1833 volume_change = Float.T( 

1834 default=0., 

1835 help='volume change of the inflation/deflation [m^3].') 

1836 

1837 clvd_moment = Float.T( 

1838 default=0., 

1839 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1840 'controls the sign of the CLVD (the sign of its largest ' 

1841 'eigenvalue).') 

1842 

1843 def get_moment_to_volume_change_ratio(self, store, target): 

1844 if store is None or target is None: 

1845 raise DerivedMagnitudeError( 

1846 'Need earth model to convert between volume change and ' 

1847 'magnitude.') 

1848 

1849 points = num.array( 

1850 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1851 

1852 try: 

1853 shear_moduli = store.config.get_shear_moduli( 

1854 self.lat, self.lon, 

1855 points=points, 

1856 interpolation=target.interpolation)[0] 

1857 except meta.OutOfBounds: 

1858 raise DerivedMagnitudeError( 

1859 'Could not get shear modulus at source position.') 

1860 

1861 return float(3. * shear_moduli) 

1862 

1863 def base_key(self): 

1864 return Source.base_key(self) + \ 

1865 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1866 

1867 def get_magnitude(self, store=None, target=None): 

1868 mt = self.pyrocko_moment_tensor(store, target) 

1869 return float(pmt.moment_to_magnitude(mt.moment)) 

1870 

1871 def get_m6(self, store, target): 

1872 a = math.sqrt(4. / 3.) * self.clvd_moment 

1873 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1874 

1875 rotmat1 = pmt.euler_to_matrix( 

1876 d2r * (self.dip - 90.), 

1877 d2r * (self.azimuth - 90.), 

1878 0.) 

1879 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1880 

1881 m_iso = self.volume_change * \ 

1882 self.get_moment_to_volume_change_ratio(store, target) 

1883 

1884 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1885 0., 0.,) * math.sqrt(2. / 3) 

1886 

1887 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1888 return m 

1889 

1890 def get_moment(self, store=None, target=None): 

1891 return float(pmt.magnitude_to_moment( 

1892 self.get_magnitude(store, target))) 

1893 

1894 def get_m6_astuple(self, store, target): 

1895 m6 = self.get_m6(store, target) 

1896 return tuple(m6.tolist()) 

1897 

1898 def discretize_basesource(self, store, target=None): 

1899 times, amplitudes = self.effective_stf_pre().discretize_t( 

1900 store.config.deltat, self.time) 

1901 

1902 m6 = self.get_m6(store, target) 

1903 m6 *= amplitudes / self.get_factor() 

1904 

1905 return meta.DiscretizedMTSource( 

1906 m6s=m6[num.newaxis, :], 

1907 **self._dparams_base_repeated(times)) 

1908 

1909 def pyrocko_moment_tensor(self, store=None, target=None): 

1910 m6_astuple = self.get_m6_astuple(store, target) 

1911 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1912 

1913 

1914class MTSource(Source): 

1915 ''' 

1916 A moment tensor point source. 

1917 ''' 

1918 

1919 discretized_source_class = meta.DiscretizedMTSource 

1920 

1921 mnn = Float.T( 

1922 default=1., 

1923 help='north-north component of moment tensor in [Nm]') 

1924 

1925 mee = Float.T( 

1926 default=1., 

1927 help='east-east component of moment tensor in [Nm]') 

1928 

1929 mdd = Float.T( 

1930 default=1., 

1931 help='down-down component of moment tensor in [Nm]') 

1932 

1933 mne = Float.T( 

1934 default=0., 

1935 help='north-east component of moment tensor in [Nm]') 

1936 

1937 mnd = Float.T( 

1938 default=0., 

1939 help='north-down component of moment tensor in [Nm]') 

1940 

1941 med = Float.T( 

1942 default=0., 

1943 help='east-down component of moment tensor in [Nm]') 

1944 

1945 def __init__(self, **kwargs): 

1946 if 'm6' in kwargs: 

1947 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1948 kwargs.pop('m6')): 

1949 kwargs[k] = float(v) 

1950 

1951 Source.__init__(self, **kwargs) 

1952 

1953 @property 

1954 def m6(self): 

1955 return num.array(self.m6_astuple) 

1956 

1957 @property 

1958 def m6_astuple(self): 

1959 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1960 

1961 @m6.setter 

1962 def m6(self, value): 

1963 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1964 

1965 def base_key(self): 

1966 return Source.base_key(self) + self.m6_astuple 

1967 

1968 def discretize_basesource(self, store, target=None): 

1969 times, amplitudes = self.effective_stf_pre().discretize_t( 

1970 store.config.deltat, self.time) 

1971 return meta.DiscretizedMTSource( 

1972 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1973 **self._dparams_base_repeated(times)) 

1974 

1975 def get_magnitude(self, store=None, target=None): 

1976 m6 = self.m6 

1977 return pmt.moment_to_magnitude( 

1978 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1979 math.sqrt(2.)) 

1980 

1981 def pyrocko_moment_tensor(self, store=None, target=None): 

1982 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1983 

1984 def pyrocko_event(self, store=None, target=None, **kwargs): 

1985 mt = self.pyrocko_moment_tensor(store, target) 

1986 return Source.pyrocko_event( 

1987 self, store, target, 

1988 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1989 magnitude=float(mt.moment_magnitude()), 

1990 **kwargs) 

1991 

1992 @classmethod 

1993 def from_pyrocko_event(cls, ev, **kwargs): 

1994 d = {} 

1995 mt = ev.moment_tensor 

1996 if mt: 

1997 d.update(m6=tuple(map(float, mt.m6()))) 

1998 else: 

1999 if ev.magnitude is not None: 

2000 mom = pmt.magnitude_to_moment(ev.magnitude) 

2001 v = math.sqrt(2. / 3.) * mom 

2002 d.update(m6=(v, v, v, 0., 0., 0.)) 

2003 

2004 d.update(kwargs) 

2005 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2006 

2007 

2008map_anchor = { 

2009 'center': (0.0, 0.0), 

2010 'center_left': (-1.0, 0.0), 

2011 'center_right': (1.0, 0.0), 

2012 'top': (0.0, -1.0), 

2013 'top_left': (-1.0, -1.0), 

2014 'top_right': (1.0, -1.0), 

2015 'bottom': (0.0, 1.0), 

2016 'bottom_left': (-1.0, 1.0), 

2017 'bottom_right': (1.0, 1.0)} 

2018 

2019 

2020class RectangularSource(SourceWithDerivedMagnitude): 

2021 ''' 

2022 Classical Haskell source model modified for bilateral rupture. 

2023 ''' 

2024 

2025 discretized_source_class = meta.DiscretizedMTSource 

2026 

2027 magnitude = Float.T( 

2028 optional=True, 

2029 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2030 

2031 strike = Float.T( 

2032 default=0.0, 

2033 help='strike direction in [deg], measured clockwise from north') 

2034 

2035 dip = Float.T( 

2036 default=90.0, 

2037 help='dip angle in [deg], measured downward from horizontal') 

2038 

2039 rake = Float.T( 

2040 default=0.0, 

2041 help='rake angle in [deg], ' 

2042 'measured counter-clockwise from right-horizontal ' 

2043 'in on-plane view') 

2044 

2045 length = Float.T( 

2046 default=0., 

2047 help='length of rectangular source area [m]') 

2048 

2049 width = Float.T( 

2050 default=0., 

2051 help='width of rectangular source area [m]') 

2052 

2053 anchor = StringChoice.T( 

2054 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2055 'bottom_left', 'bottom_right'], 

2056 default='center', 

2057 optional=True, 

2058 help='Anchor point for positioning the plane, can be: ``top, center ' 

2059 'bottom, top_left, top_right,bottom_left,' 

2060 'bottom_right, center_left, center right``.') 

2061 

2062 nucleation_x = Float.T( 

2063 optional=True, 

2064 help='horizontal position of rupture nucleation in normalized fault ' 

2065 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2066 

2067 nucleation_y = Float.T( 

2068 optional=True, 

2069 help='down-dip position of rupture nucleation in normalized fault ' 

2070 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2071 

2072 velocity = Float.T( 

2073 default=3500., 

2074 help='speed of rupture front [m/s]') 

2075 

2076 slip = Float.T( 

2077 optional=True, 

2078 help='Slip on the rectangular source area [m]') 

2079 

2080 opening_fraction = Float.T( 

2081 default=0., 

2082 help='Determines fraction of slip related to opening. ' 

2083 '(``-1``: pure tensile closing, ' 

2084 '``0``: pure shear, ' 

2085 '``1``: pure tensile opening)') 

2086 

2087 decimation_factor = Int.T( 

2088 optional=True, 

2089 default=1, 

2090 help='Sub-source decimation factor, a larger decimation will' 

2091 ' make the result inaccurate but shorten the necessary' 

2092 ' computation time (use for testing puposes only).') 

2093 

2094 aggressive_oversampling = Bool.T( 

2095 default=False, 

2096 help='Aggressive oversampling for basesource discretization. ' 

2097 'When using \'multilinear\' interpolation oversampling has' 

2098 ' practically no effect.') 

2099 

2100 def __init__(self, **kwargs): 

2101 if 'moment' in kwargs: 

2102 mom = kwargs.pop('moment') 

2103 if 'magnitude' not in kwargs: 

2104 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2105 

2106 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2107 

2108 def base_key(self): 

2109 return SourceWithDerivedMagnitude.base_key(self) + ( 

2110 self.magnitude, 

2111 self.slip, 

2112 self.strike, 

2113 self.dip, 

2114 self.rake, 

2115 self.length, 

2116 self.width, 

2117 self.nucleation_x, 

2118 self.nucleation_y, 

2119 self.velocity, 

2120 self.decimation_factor, 

2121 self.anchor) 

2122 

2123 def check_conflicts(self): 

2124 if self.magnitude is not None and self.slip is not None: 

2125 raise DerivedMagnitudeError( 

2126 'Magnitude and slip are both defined.') 

2127 

2128 def get_magnitude(self, store=None, target=None): 

2129 self.check_conflicts() 

2130 if self.magnitude is not None: 

2131 return self.magnitude 

2132 

2133 elif self.slip is not None: 

2134 if None in (store, target): 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude for a rectangular source with slip defined ' 

2137 'can only be derived when earth model and target ' 

2138 'interpolation method are available.') 

2139 

2140 amplitudes = self._discretize(store, target)[2] 

2141 if amplitudes.ndim == 2: 

2142 # CLVD component has no net moment, leave out 

2143 return float(pmt.moment_to_magnitude( 

2144 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2145 else: 

2146 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2147 

2148 else: 

2149 return float(pmt.moment_to_magnitude(1.0)) 

2150 

2151 def get_factor(self): 

2152 return 1.0 

2153 

2154 def get_slip_tensile(self): 

2155 return self.slip * self.opening_fraction 

2156 

2157 def get_slip_shear(self): 

2158 return self.slip - abs(self.get_slip_tensile) 

2159 

2160 def _discretize(self, store, target): 

2161 if self.nucleation_x is not None: 

2162 nucx = self.nucleation_x * 0.5 * self.length 

2163 else: 

2164 nucx = None 

2165 

2166 if self.nucleation_y is not None: 

2167 nucy = self.nucleation_y * 0.5 * self.width 

2168 else: 

2169 nucy = None 

2170 

2171 stf = self.effective_stf_pre() 

2172 

2173 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2174 store.config.deltas, store.config.deltat, 

2175 self.time, self.north_shift, self.east_shift, self.depth, 

2176 self.strike, self.dip, self.length, self.width, self.anchor, 

2177 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2178 decimation_factor=self.decimation_factor, 

2179 aggressive_oversampling=self.aggressive_oversampling) 

2180 

2181 if self.slip is not None: 

2182 if target is not None: 

2183 interpolation = target.interpolation 

2184 else: 

2185 interpolation = 'nearest_neighbor' 

2186 logger.warning( 

2187 'no target information available, will use ' 

2188 '"nearest_neighbor" interpolation when extracting shear ' 

2189 'modulus from earth model') 

2190 

2191 shear_moduli = store.config.get_shear_moduli( 

2192 self.lat, self.lon, 

2193 points=points, 

2194 interpolation=interpolation) 

2195 

2196 tensile_slip = self.get_slip_tensile() 

2197 shear_slip = self.slip - abs(tensile_slip) 

2198 

2199 amplitudes_total = [shear_moduli * shear_slip] 

2200 if tensile_slip != 0: 

2201 bulk_moduli = store.config.get_bulk_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_iso = bulk_moduli * tensile_slip 

2207 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2208 

2209 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2210 

2211 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2212 amplitudes * dl * dw 

2213 

2214 else: 

2215 # normalization to retain total moment 

2216 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2217 moment = self.get_moment(store, target) 

2218 

2219 amplitudes_total = [ 

2220 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2221 if self.opening_fraction != 0.: 

2222 amplitudes_total.append( 

2223 amplitudes_norm * self.opening_fraction * moment) 

2224 

2225 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2226 

2227 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2228 

2229 def discretize_basesource(self, store, target=None): 

2230 

2231 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2232 store, target) 

2233 

2234 mot = pmt.MomentTensor( 

2235 strike=self.strike, dip=self.dip, rake=self.rake) 

2236 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2237 

2238 if amplitudes.ndim == 1: 

2239 m6s[:, :] *= amplitudes[:, num.newaxis] 

2240 elif amplitudes.ndim == 2: 

2241 # shear MT components 

2242 rotmat1 = pmt.euler_to_matrix( 

2243 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2244 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2245 

2246 if amplitudes.shape[0] == 2: 

2247 # tensile MT components - moment/magnitude input 

2248 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2249 rot_tensile = pmt.to6( 

2250 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2251 

2252 m6s_tensile = rot_tensile[ 

2253 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2254 m6s += m6s_tensile 

2255 

2256 elif amplitudes.shape[0] == 3: 

2257 # tensile MT components - slip input 

2258 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2259 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2260 

2261 rot_iso = pmt.to6( 

2262 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2263 rot_clvd = pmt.to6( 

2264 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2265 

2266 m6s_iso = rot_iso[ 

2267 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2268 m6s_clvd = rot_clvd[ 

2269 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2270 m6s += m6s_iso + m6s_clvd 

2271 else: 

2272 raise ValueError('Unknwown amplitudes shape!') 

2273 else: 

2274 raise ValueError( 

2275 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2276 

2277 ds = meta.DiscretizedMTSource( 

2278 lat=self.lat, 

2279 lon=self.lon, 

2280 times=times, 

2281 north_shifts=points[:, 0], 

2282 east_shifts=points[:, 1], 

2283 depths=points[:, 2], 

2284 m6s=m6s, 

2285 dl=dl, 

2286 dw=dw, 

2287 nl=nl, 

2288 nw=nw) 

2289 

2290 return ds 

2291 

2292 def outline(self, cs='xyz'): 

2293 points = outline_rect_source(self.strike, self.dip, self.length, 

2294 self.width, self.anchor) 

2295 

2296 points[:, 0] += self.north_shift 

2297 points[:, 1] += self.east_shift 

2298 points[:, 2] += self.depth 

2299 if cs == 'xyz': 

2300 return points 

2301 elif cs == 'xy': 

2302 return points[:, :2] 

2303 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2304 latlon = ne_to_latlon( 

2305 self.lat, self.lon, points[:, 0], points[:, 1]) 

2306 

2307 latlon = num.array(latlon).T 

2308 if cs == 'latlon': 

2309 return latlon 

2310 elif cs == 'lonlat': 

2311 return latlon[:, ::-1] 

2312 else: 

2313 return num.concatenate( 

2314 (latlon, points[:, 2].reshape((len(points), 1))), 

2315 axis=1) 

2316 

2317 def points_on_source(self, cs='xyz', **kwargs): 

2318 

2319 points = points_on_rect_source( 

2320 self.strike, self.dip, self.length, self.width, 

2321 self.anchor, **kwargs) 

2322 

2323 points[:, 0] += self.north_shift 

2324 points[:, 1] += self.east_shift 

2325 points[:, 2] += self.depth 

2326 if cs == 'xyz': 

2327 return points 

2328 elif cs == 'xy': 

2329 return points[:, :2] 

2330 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2331 latlon = ne_to_latlon( 

2332 self.lat, self.lon, points[:, 0], points[:, 1]) 

2333 

2334 latlon = num.array(latlon).T 

2335 if cs == 'latlon': 

2336 return latlon 

2337 elif cs == 'lonlat': 

2338 return latlon[:, ::-1] 

2339 else: 

2340 return num.concatenate( 

2341 (latlon, points[:, 2].reshape((len(points), 1))), 

2342 axis=1) 

2343 

2344 def get_nucleation_abs_coord(self, cs='xy'): 

2345 

2346 if self.nucleation_x is None: 

2347 return None, None 

2348 

2349 coords = from_plane_coords(self.strike, self.dip, self.length, 

2350 self.width, self.depth, self.nucleation_x, 

2351 self.nucleation_y, lat=self.lat, 

2352 lon=self.lon, north_shift=self.north_shift, 

2353 east_shift=self.east_shift, cs=cs) 

2354 return coords 

2355 

2356 def pyrocko_moment_tensor(self, store=None, target=None): 

2357 return pmt.MomentTensor( 

2358 strike=self.strike, 

2359 dip=self.dip, 

2360 rake=self.rake, 

2361 scalar_moment=self.get_moment(store, target)) 

2362 

2363 def pyrocko_event(self, store=None, target=None, **kwargs): 

2364 return SourceWithDerivedMagnitude.pyrocko_event( 

2365 self, store, target, 

2366 **kwargs) 

2367 

2368 @classmethod 

2369 def from_pyrocko_event(cls, ev, **kwargs): 

2370 d = {} 

2371 mt = ev.moment_tensor 

2372 if mt: 

2373 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2374 d.update( 

2375 strike=float(strike), 

2376 dip=float(dip), 

2377 rake=float(rake), 

2378 magnitude=float(mt.moment_magnitude())) 

2379 

2380 d.update(kwargs) 

2381 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2382 

2383 

2384class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2385 ''' 

2386 Combined Eikonal and Okada quasi-dynamic rupture model. 

2387 

2388 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2389 Note: attribute `stf` is not used so far, but kept for future applications. 

2390 ''' 

2391 

2392 discretized_source_class = meta.DiscretizedMTSource 

2393 

2394 strike = Float.T( 

2395 default=0.0, 

2396 help='Strike direction in [deg], measured clockwise from north.') 

2397 

2398 dip = Float.T( 

2399 default=0.0, 

2400 help='Dip angle in [deg], measured downward from horizontal.') 

2401 

2402 length = Float.T( 

2403 default=10. * km, 

2404 help='Length of rectangular source area in [m].') 

2405 

2406 width = Float.T( 

2407 default=5. * km, 

2408 help='Width of rectangular source area in [m].') 

2409 

2410 anchor = StringChoice.T( 

2411 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2412 'bottom_left', 'bottom_right'], 

2413 default='center', 

2414 optional=True, 

2415 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2416 'bottom, top_left, top_right, bottom_left, ' 

2417 'bottom_right, center_left, center_right``.') 

2418 

2419 nucleation_x__ = Array.T( 

2420 default=num.array([0.]), 

2421 dtype=num.float64, 

2422 serialize_as='list', 

2423 help='Horizontal position of rupture nucleation in normalized fault ' 

2424 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2425 

2426 nucleation_y__ = Array.T( 

2427 default=num.array([0.]), 

2428 dtype=num.float64, 

2429 serialize_as='list', 

2430 help='Down-dip position of rupture nucleation in normalized fault ' 

2431 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2432 

2433 nucleation_time__ = Array.T( 

2434 optional=True, 

2435 help='Time in [s] after origin, when nucleation points defined by ' 

2436 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2437 dtype=num.float64, 

2438 serialize_as='list') 

2439 

2440 gamma = Float.T( 

2441 default=0.8, 

2442 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2443 r':math:`v_r = \gamma * v_s`.') 

2444 

2445 nx = Int.T( 

2446 default=2, 

2447 help='Number of discrete source patches in x direction (along ' 

2448 'strike).') 

2449 

2450 ny = Int.T( 

2451 default=2, 

2452 help='Number of discrete source patches in y direction (down dip).') 

2453 

2454 slip = Float.T( 

2455 optional=True, 

2456 help='Maximum slip of the rectangular source [m]. ' 

2457 'Setting the slip the tractions/stress field ' 

2458 'will be normalized to accomodate the desired maximum slip.') 

2459 

2460 rake = Float.T( 

2461 optional=True, 

2462 help='Rake angle in [deg], ' 

2463 'measured counter-clockwise from right-horizontal ' 

2464 'in on-plane view. Rake is translated into homogenous tractions ' 

2465 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2466 'with tractions parameter.') 

2467 

2468 patches = List.T( 

2469 OkadaSource.T(), 

2470 optional=True, 

2471 help='List of all boundary elements/sub faults/fault patches.') 

2472 

2473 patch_mask__ = Array.T( 

2474 dtype=bool, 

2475 serialize_as='list', 

2476 shape=(None,), 

2477 optional=True, 

2478 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2479 'leaves the patch in the calculation, False excludes the patch.') 

2480 

2481 tractions = TractionField.T( 

2482 optional=True, 

2483 help='Traction field the rupture plane is exposed to. See the' 

2484 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2485 'If ``tractions=None`` and ``rake`` is given' 

2486 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2487 ' be used.') 

2488 

2489 coef_mat = Array.T( 

2490 optional=True, 

2491 help='Coefficient matrix linking traction and dislocation field.', 

2492 dtype=num.float64, 

2493 shape=(None, None)) 

2494 

2495 eikonal_decimation = Int.T( 

2496 optional=True, 

2497 default=1, 

2498 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2499 ' increase the accuracy of rupture front calculation but' 

2500 ' increases also the computation time.') 

2501 

2502 decimation_factor = Int.T( 

2503 optional=True, 

2504 default=1, 

2505 help='Sub-source decimation factor, a larger decimation will' 

2506 ' make the result inaccurate but shorten the necessary' 

2507 ' computation time (use for testing puposes only).') 

2508 

2509 nthreads = Int.T( 

2510 optional=True, 

2511 default=1, 

2512 help='Number of threads for Okada forward modelling, ' 

2513 'matrix inversion and calculation of point subsources. ' 

2514 'Note: for small/medium matrices 1 thread is most efficient.') 

2515 

2516 pure_shear = Bool.T( 

2517 optional=True, 

2518 default=False, 

2519 help='Calculate only shear tractions and omit tensile tractions.') 

2520 

2521 smooth_rupture = Bool.T( 

2522 default=True, 

2523 help='Smooth the tractions by weighting partially ruptured' 

2524 ' fault patches.') 

2525 

2526 aggressive_oversampling = Bool.T( 

2527 default=False, 

2528 help='Aggressive oversampling for basesource discretization. ' 

2529 'When using \'multilinear\' interpolation oversampling has' 

2530 ' practically no effect.') 

2531 

2532 def __init__(self, **kwargs): 

2533 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2534 self._interpolators = {} 

2535 self.check_conflicts() 

2536 

2537 @property 

2538 def nucleation_x(self): 

2539 return self.nucleation_x__ 

2540 

2541 @nucleation_x.setter 

2542 def nucleation_x(self, nucleation_x): 

2543 if isinstance(nucleation_x, list): 

2544 nucleation_x = num.array(nucleation_x) 

2545 

2546 elif not isinstance( 

2547 nucleation_x, num.ndarray) and nucleation_x is not None: 

2548 

2549 nucleation_x = num.array([nucleation_x]) 

2550 self.nucleation_x__ = nucleation_x 

2551 

2552 @property 

2553 def nucleation_y(self): 

2554 return self.nucleation_y__ 

2555 

2556 @nucleation_y.setter 

2557 def nucleation_y(self, nucleation_y): 

2558 if isinstance(nucleation_y, list): 

2559 nucleation_y = num.array(nucleation_y) 

2560 

2561 elif not isinstance(nucleation_y, num.ndarray) \ 

2562 and nucleation_y is not None: 

2563 nucleation_y = num.array([nucleation_y]) 

2564 

2565 self.nucleation_y__ = nucleation_y 

2566 

2567 @property 

2568 def nucleation(self): 

2569 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2570 

2571 if (nucl_x is None) or (nucl_y is None): 

2572 return None 

2573 

2574 assert nucl_x.shape[0] == nucl_y.shape[0] 

2575 

2576 return num.concatenate( 

2577 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2578 

2579 @nucleation.setter 

2580 def nucleation(self, nucleation): 

2581 if isinstance(nucleation, list): 

2582 nucleation = num.array(nucleation) 

2583 

2584 assert nucleation.shape[1] == 2 

2585 

2586 self.nucleation_x = nucleation[:, 0] 

2587 self.nucleation_y = nucleation[:, 1] 

2588 

2589 @property 

2590 def nucleation_time(self): 

2591 return self.nucleation_time__ 

2592 

2593 @nucleation_time.setter 

2594 def nucleation_time(self, nucleation_time): 

2595 if not isinstance(nucleation_time, num.ndarray) \ 

2596 and nucleation_time is not None: 

2597 nucleation_time = num.array([nucleation_time]) 

2598 

2599 self.nucleation_time__ = nucleation_time 

2600 

2601 @property 

2602 def patch_mask(self): 

2603 if (self.patch_mask__ is not None and 

2604 self.patch_mask__.shape == (self.nx * self.ny,)): 

2605 

2606 return self.patch_mask__ 

2607 else: 

2608 return num.ones(self.nx * self.ny, dtype=bool) 

2609 

2610 @patch_mask.setter 

2611 def patch_mask(self, patch_mask): 

2612 if isinstance(patch_mask, list): 

2613 patch_mask = num.array(patch_mask) 

2614 

2615 self.patch_mask__ = patch_mask 

2616 

2617 def get_tractions(self): 

2618 ''' 

2619 Get source traction vectors. 

2620 

2621 If :py:attr:`rake` is given, unit length directed traction vectors 

2622 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2623 else the given :py:attr:`tractions` are used. 

2624 

2625 :returns: 

2626 Traction vectors per patch. 

2627 :rtype: 

2628 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2629 ''' 

2630 

2631 if self.rake is not None: 

2632 if num.isnan(self.rake): 

2633 raise ValueError('Rake must be a real number, not NaN.') 

2634 

2635 logger.warning( 

2636 'Tractions are derived based on the given source rake.') 

2637 tractions = DirectedTractions(rake=self.rake) 

2638 else: 

2639 tractions = self.tractions 

2640 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2641 

2642 def base_key(self): 

2643 return SourceWithDerivedMagnitude.base_key(self) + ( 

2644 self.slip, 

2645 self.strike, 

2646 self.dip, 

2647 self.rake, 

2648 self.length, 

2649 self.width, 

2650 float(self.nucleation_x.mean()), 

2651 float(self.nucleation_y.mean()), 

2652 self.decimation_factor, 

2653 self.anchor, 

2654 self.pure_shear, 

2655 self.gamma, 

2656 tuple(self.patch_mask)) 

2657 

2658 def check_conflicts(self): 

2659 if self.tractions and self.rake: 

2660 raise AttributeError( 

2661 'Tractions and rake are mutually exclusive.') 

2662 if self.tractions is None and self.rake is None: 

2663 self.rake = 0. 

2664 

2665 def get_magnitude(self, store=None, target=None): 

2666 self.check_conflicts() 

2667 if self.slip is not None or self.tractions is not None: 

2668 if store is None: 

2669 raise DerivedMagnitudeError( 

2670 'Magnitude for a rectangular source with slip or ' 

2671 'tractions defined can only be derived when earth model ' 

2672 'is set.') 

2673 

2674 moment_rate, calc_times = self.discretize_basesource( 

2675 store, target=target).get_moment_rate(store.config.deltat) 

2676 

2677 deltat = num.concatenate(( 

2678 (num.diff(calc_times)[0],), 

2679 num.diff(calc_times))) 

2680 

2681 return float(pmt.moment_to_magnitude( 

2682 num.sum(moment_rate * deltat))) 

2683 

2684 else: 

2685 return float(pmt.moment_to_magnitude(1.0)) 

2686 

2687 def get_factor(self): 

2688 return 1.0 

2689 

2690 def outline(self, cs='xyz'): 

2691 ''' 

2692 Get source outline corner coordinates. 

2693 

2694 :param cs: 

2695 :ref:`Output coordinate system <coordinate-system-names>`. 

2696 :type cs: 

2697 optional, str 

2698 

2699 :returns: 

2700 Corner points in desired coordinate system. 

2701 :rtype: 

2702 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2703 ''' 

2704 points = outline_rect_source(self.strike, self.dip, self.length, 

2705 self.width, self.anchor) 

2706 

2707 points[:, 0] += self.north_shift 

2708 points[:, 1] += self.east_shift 

2709 points[:, 2] += self.depth 

2710 if cs == 'xyz': 

2711 return points 

2712 elif cs == 'xy': 

2713 return points[:, :2] 

2714 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2715 latlon = ne_to_latlon( 

2716 self.lat, self.lon, points[:, 0], points[:, 1]) 

2717 

2718 latlon = num.array(latlon).T 

2719 if cs == 'latlon': 

2720 return latlon 

2721 elif cs == 'lonlat': 

2722 return latlon[:, ::-1] 

2723 else: 

2724 return num.concatenate( 

2725 (latlon, points[:, 2].reshape((len(points), 1))), 

2726 axis=1) 

2727 

2728 def points_on_source(self, cs='xyz', **kwargs): 

2729 ''' 

2730 Convert relative plane coordinates to geographical coordinates. 

2731 

2732 Given x and y coordinates (relative source coordinates between -1. 

2733 and 1.) are converted to desired geographical coordinates. Coordinates 

2734 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2735 and ``points_y``. 

2736 

2737 :param cs: 

2738 :ref:`Output coordinate system <coordinate-system-names>`. 

2739 :type cs: 

2740 optional, str 

2741 

2742 :returns: 

2743 Point coordinates in desired coordinate system. 

2744 :rtype: 

2745 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2746 ''' 

2747 points = points_on_rect_source( 

2748 self.strike, self.dip, self.length, self.width, 

2749 self.anchor, **kwargs) 

2750 

2751 points[:, 0] += self.north_shift 

2752 points[:, 1] += self.east_shift 

2753 points[:, 2] += self.depth 

2754 if cs == 'xyz': 

2755 return points 

2756 elif cs == 'xy': 

2757 return points[:, :2] 

2758 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2759 latlon = ne_to_latlon( 

2760 self.lat, self.lon, points[:, 0], points[:, 1]) 

2761 

2762 latlon = num.array(latlon).T 

2763 if cs == 'latlon': 

2764 return latlon 

2765 elif cs == 'lonlat': 

2766 return latlon[:, ::-1] 

2767 else: 

2768 return num.concatenate( 

2769 (latlon, points[:, 2].reshape((len(points), 1))), 

2770 axis=1) 

2771 

2772 def pyrocko_moment_tensor(self, store=None, target=None): 

2773 # TODO: Now this should be slip, then it depends on the store. 

2774 # TODO: default to tractions is store is not given? 

2775 tractions = self.get_tractions() 

2776 tractions = tractions.mean(axis=0) 

2777 rake = num.arctan2(tractions[1], tractions[0]) # arctan2(dip, slip) 

2778 

2779 return pmt.MomentTensor( 

2780 strike=self.strike, 

2781 dip=self.dip, 

2782 rake=rake, 

2783 scalar_moment=self.get_moment(store, target)) 

2784 

2785 def pyrocko_event(self, store=None, target=None, **kwargs): 

2786 return SourceWithDerivedMagnitude.pyrocko_event( 

2787 self, store, target, 

2788 **kwargs) 

2789 

2790 @classmethod 

2791 def from_pyrocko_event(cls, ev, **kwargs): 

2792 d = {} 

2793 mt = ev.moment_tensor 

2794 if mt: 

2795 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2796 d.update( 

2797 strike=float(strike), 

2798 dip=float(dip), 

2799 rake=float(rake)) 

2800 

2801 d.update(kwargs) 

2802 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2803 

2804 def _discretize_points(self, store, *args, **kwargs): 

2805 ''' 

2806 Discretize source plane with equal vertical and horizontal spacing. 

2807 

2808 Additional ``*args`` and ``**kwargs`` are passed to 

2809 :py:meth:`points_on_source`. 

2810 

2811 :param store: 

2812 Green's function database (needs to cover whole region of the 

2813 source). 

2814 :type store: 

2815 :py:class:`~pyrocko.gf.store.Store` 

2816 

2817 :returns: 

2818 Number of points in strike and dip direction, distance 

2819 between adjacent points, coordinates (latlondepth) and coordinates 

2820 (xy on fault) for discrete points. 

2821 :rtype: 

2822 (int, int, float, :py:class:`~numpy.ndarray`, 

2823 :py:class:`~numpy.ndarray`). 

2824 ''' 

2825 anch_x, anch_y = map_anchor[self.anchor] 

2826 

2827 npoints = int(self.width // km) + 1 

2828 points = num.zeros((npoints, 3)) 

2829 points[:, 1] = num.linspace(-1., 1., npoints) 

2830 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2831 

2832 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2833 points = num.dot(rotmat.T, points.T).T 

2834 points[:, 2] += self.depth 

2835 

2836 vs_min = store.config.get_vs( 

2837 self.lat, self.lon, points, 

2838 interpolation='nearest_neighbor') 

2839 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2840 

2841 oversampling = 10. 

2842 delta_l = self.length / (self.nx * oversampling) 

2843 delta_w = self.width / (self.ny * oversampling) 

2844 

2845 delta = self.eikonal_decimation * num.min([ 

2846 store.config.deltat * vr_min / oversampling, 

2847 delta_l, delta_w] + [ 

2848 deltas for deltas in store.config.deltas]) 

2849 

2850 delta = delta_w / num.ceil(delta_w / delta) 

2851 

2852 nx = int(num.ceil(self.length / delta)) + 1 

2853 ny = int(num.ceil(self.width / delta)) + 1 

2854 

2855 rem_l = (nx-1)*delta - self.length 

2856 lim_x = rem_l / self.length 

2857 

2858 points_xy = num.zeros((nx * ny, 2)) 

2859 points_xy[:, 0] = num.repeat( 

2860 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2861 points_xy[:, 1] = num.tile( 

2862 num.linspace(-1., 1., ny), nx) 

2863 

2864 points = self.points_on_source( 

2865 points_x=points_xy[:, 0], 

2866 points_y=points_xy[:, 1], 

2867 **kwargs) 

2868 

2869 return nx, ny, delta, points, points_xy 

2870 

2871 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2872 points=None): 

2873 ''' 

2874 Get rupture velocity for discrete points on source plane. 

2875 

2876 :param store: 

2877 Green's function database (needs to cover the whole region of the 

2878 source) 

2879 :type store: 

2880 optional, :py:class:`~pyrocko.gf.store.Store` 

2881 

2882 :param interpolation: 

2883 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2884 and ``'multilinear'``). 

2885 :type interpolation: 

2886 optional, str 

2887 

2888 :param points: 

2889 Coordinates on fault (-1.:1.) of discrete points. 

2890 :type points: 

2891 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2892 

2893 :returns: 

2894 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2895 points. 

2896 :rtype: 

2897 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2898 ''' 

2899 

2900 if points is None: 

2901 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2902 

2903 return store.config.get_vs( 

2904 self.lat, self.lon, 

2905 points=points, 

2906 interpolation=interpolation) * self.gamma 

2907 

2908 def discretize_time( 

2909 self, store, interpolation='nearest_neighbor', 

2910 vr=None, times=None, *args, **kwargs): 

2911 ''' 

2912 Get rupture start time for discrete points on source plane. 

2913 

2914 :param store: 

2915 Green's function database (needs to cover whole region of the 

2916 source) 

2917 :type store: 

2918 :py:class:`~pyrocko.gf.store.Store` 

2919 

2920 :param interpolation: 

2921 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2922 and ``'multilinear'``). 

2923 :type interpolation: 

2924 optional, str 

2925 

2926 :param vr: 

2927 Array, containing rupture user defined rupture velocity values. 

2928 :type vr: 

2929 optional, :py:class:`~numpy.ndarray` 

2930 

2931 :param times: 

2932 Array, containing zeros, where rupture is starting, real positive 

2933 numbers at later secondary nucleation points and -1, where time 

2934 will be calculated. If not given, rupture starts at nucleation_x, 

2935 nucleation_y. Times are given for discrete points with equal 

2936 horizontal and vertical spacing. 

2937 :type times: 

2938 optional, :py:class:`~numpy.ndarray` 

2939 

2940 :returns: 

2941 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

2942 rupture propagation time of discrete points. 

2943 :rtype: 

2944 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

2945 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

2946 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

2947 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

2948 ''' 

2949 nx, ny, delta, points, points_xy = self._discretize_points( 

2950 store, cs='xyz') 

2951 

2952 if vr is None or vr.shape != tuple((nx, ny)): 

2953 if vr: 

2954 logger.warning( 

2955 'Given rupture velocities are not in right shape: ' 

2956 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

2957 vr = self._discretize_rupture_v(store, interpolation, points)\ 

2958 .reshape(nx, ny) 

2959 

2960 if vr.shape != tuple((nx, ny)): 

2961 logger.warning( 

2962 'Given rupture velocities are not in right shape. Therefore' 

2963 ' standard rupture velocity array is used.') 

2964 

2965 def initialize_times(): 

2966 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2967 

2968 if nucl_x.shape != nucl_y.shape: 

2969 raise ValueError( 

2970 'Nucleation coordinates have different shape.') 

2971 

2972 dist_points = num.array([ 

2973 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

2974 for x, y in zip(nucl_x, nucl_y)]) 

2975 nucl_indices = num.argmin(dist_points, axis=1) 

2976 

2977 if self.nucleation_time is None: 

2978 nucl_times = num.zeros_like(nucl_indices) 

2979 else: 

2980 if self.nucleation_time.shape == nucl_x.shape: 

2981 nucl_times = self.nucleation_time 

2982 else: 

2983 raise ValueError( 

2984 'Nucleation coordinates and times have different ' 

2985 'shapes') 

2986 

2987 t = num.full(nx * ny, -1.) 

2988 t[nucl_indices] = nucl_times 

2989 return t.reshape(nx, ny) 

2990 

2991 if times is None: 

2992 times = initialize_times() 

2993 elif times.shape != tuple((nx, ny)): 

2994 times = initialize_times() 

2995 logger.warning( 

2996 'Given times are not in right shape. Therefore standard time' 

2997 ' array is used.') 

2998 

2999 eikonal_ext.eikonal_solver_fmm_cartesian( 

3000 speeds=vr, times=times, delta=delta) 

3001 

3002 return points, points_xy, vr, times 

3003 

3004 def get_vr_time_interpolators( 

3005 self, store, interpolation='nearest_neighbor', force=False, 

3006 **kwargs): 

3007 ''' 

3008 Get interpolators for rupture velocity and rupture time. 

3009 

3010 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3011 

3012 :param store: 

3013 Green's function database (needs to cover whole region of the 

3014 source). 

3015 :type store: 

3016 :py:class:`~pyrocko.gf.store.Store` 

3017 

3018 :param interpolation: 

3019 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3020 and ``'multilinear'``). 

3021 :type interpolation: 

3022 optional, str 

3023 

3024 :param force: 

3025 Force recalculation of the interpolators (e.g. after change of 

3026 nucleation point locations/times). Default is ``False``. 

3027 :type force: 

3028 optional, bool 

3029 ''' 

3030 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3031 if interpolation not in interp_map: 

3032 raise TypeError( 

3033 'Interpolation method %s not available' % interpolation) 

3034 

3035 if not self._interpolators.get(interpolation, False) or force: 

3036 _, points_xy, vr, times = self.discretize_time( 

3037 store, **kwargs) 

3038 

3039 if self.length <= 0.: 

3040 raise ValueError( 

3041 'length must be larger then 0. not %g' % self.length) 

3042 

3043 if self.width <= 0.: 

3044 raise ValueError( 

3045 'width must be larger then 0. not %g' % self.width) 

3046 

3047 nx, ny = times.shape 

3048 anch_x, anch_y = map_anchor[self.anchor] 

3049 

3050 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3051 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3052 

3053 self._interpolators[interpolation] = ( 

3054 nx, ny, times, vr, 

3055 RegularGridInterpolator( 

3056 (points_xy[::ny, 0], points_xy[:ny, 1]), times, 

3057 method=interp_map[interpolation]), 

3058 RegularGridInterpolator( 

3059 (points_xy[::ny, 0], points_xy[:ny, 1]), vr, 

3060 method=interp_map[interpolation])) 

3061 return self._interpolators[interpolation] 

3062 

3063 def discretize_patches( 

3064 self, store, interpolation='nearest_neighbor', force=False, 

3065 grid_shape=(), 

3066 **kwargs): 

3067 ''' 

3068 Get rupture start time and OkadaSource elements for points on rupture. 

3069 

3070 All source elements and their corresponding center points are 

3071 calculated and stored in the :py:attr:`patches` attribute. 

3072 

3073 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3074 

3075 :param store: 

3076 Green's function database (needs to cover whole region of the 

3077 source). 

3078 :type store: 

3079 :py:class:`~pyrocko.gf.store.Store` 

3080 

3081 :param interpolation: 

3082 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3083 and ``'multilinear'``). 

3084 :type interpolation: 

3085 optional, str 

3086 

3087 :param force: 

3088 Force recalculation of the vr and time interpolators ( e.g. after 

3089 change of nucleation point locations/times). Default is ``False``. 

3090 :type force: 

3091 optional, bool 

3092 

3093 :param grid_shape: 

3094 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3095 or grid_shape should be set. 

3096 :type grid_shape: 

3097 optional, tuple of int 

3098 ''' 

3099 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3100 self.get_vr_time_interpolators( 

3101 store, 

3102 interpolation=interpolation, force=force, **kwargs) 

3103 anch_x, anch_y = map_anchor[self.anchor] 

3104 

3105 al = self.length / 2. 

3106 aw = self.width / 2. 

3107 al1 = -(al + anch_x * al) 

3108 al2 = al - anch_x * al 

3109 aw1 = -aw + anch_y * aw 

3110 aw2 = aw + anch_y * aw 

3111 assert num.abs([al1, al2]).sum() == self.length 

3112 assert num.abs([aw1, aw2]).sum() == self.width 

3113 

3114 def get_lame(*a, **kw): 

3115 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3116 lamb = store.config.get_vp(*a, **kw)**2 \ 

3117 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3118 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3119 

3120 shear_mod, poisson = get_lame( 

3121 self.lat, self.lon, 

3122 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3123 interpolation=interpolation) 

3124 

3125 okada_src = OkadaSource( 

3126 lat=self.lat, lon=self.lon, 

3127 strike=self.strike, dip=self.dip, 

3128 north_shift=self.north_shift, east_shift=self.east_shift, 

3129 depth=self.depth, 

3130 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3131 poisson=poisson.mean(), 

3132 shearmod=shear_mod.mean(), 

3133 opening=kwargs.get('opening', 0.)) 

3134 

3135 if not (self.nx and self.ny): 

3136 if grid_shape: 

3137 self.nx, self.ny = grid_shape 

3138 else: 

3139 self.nx = nx 

3140 self.ny = ny 

3141 

3142 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3143 

3144 shear_mod, poisson = get_lame( 

3145 self.lat, self.lon, 

3146 num.array([src.source_patch()[:3] for src in source_disc]), 

3147 interpolation=interpolation) 

3148 

3149 if (self.nx, self.ny) != (nx, ny): 

3150 times_interp = time_interpolator(source_points[:, :2]) 

3151 vr_interp = vr_interpolator(source_points[:, :2]) 

3152 else: 

3153 times_interp = times.T.ravel() 

3154 vr_interp = vr.T.ravel() 

3155 

3156 for isrc, src in enumerate(source_disc): 

3157 src.vr = vr_interp[isrc] 

3158 src.time = times_interp[isrc] + self.time 

3159 

3160 self.patches = source_disc 

3161 

3162 def discretize_basesource(self, store, target=None): 

3163 ''' 

3164 Prepare source for synthetic waveform calculation. 

3165 

3166 :param store: 

3167 Green's function database (needs to cover whole region of the 

3168 source). 

3169 :type store: 

3170 :py:class:`~pyrocko.gf.store.Store` 

3171 

3172 :param target: 

3173 Target information. 

3174 :type target: 

3175 optional, :py:class:`~pyrocko.gf.targets.Target` 

3176 

3177 :returns: 

3178 Source discretized by a set of moment tensors and times. 

3179 :rtype: 

3180 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3181 ''' 

3182 if not target: 

3183 interpolation = 'nearest_neighbor' 

3184 else: 

3185 interpolation = target.interpolation 

3186 

3187 if not self.patches: 

3188 self.discretize_patches(store, interpolation) 

3189 

3190 if self.coef_mat is None: 

3191 self.calc_coef_mat() 

3192 

3193 delta_slip, slip_times = self.get_delta_slip(store) 

3194 npatches = self.nx * self.ny 

3195 ntimes = slip_times.size 

3196 

3197 anch_x, anch_y = map_anchor[self.anchor] 

3198 

3199 pln = self.length / self.nx 

3200 pwd = self.width / self.ny 

3201 

3202 patch_coords = num.array([ 

3203 (p.ix, p.iy) 

3204 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3205 

3206 # boundary condition is zero-slip 

3207 # is not valid to avoid unwished interpolation effects 

3208 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3209 slip_grid[1:-1, 1:-1, :, :] = \ 

3210 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3211 

3212 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3213 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3214 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3215 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3216 

3217 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3218 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3219 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3220 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3221 

3222 def make_grid(patch_parameter): 

3223 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3224 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3225 

3226 grid[0, 0] = grid[1, 1] 

3227 grid[0, -1] = grid[1, -2] 

3228 grid[-1, 0] = grid[-2, 1] 

3229 grid[-1, -1] = grid[-2, -2] 

3230 

3231 grid[1:-1, 0] = grid[1:-1, 1] 

3232 grid[1:-1, -1] = grid[1:-1, -2] 

3233 grid[0, 1:-1] = grid[1, 1:-1] 

3234 grid[-1, 1:-1] = grid[-2, 1:-1] 

3235 

3236 return grid 

3237 

3238 lamb = self.get_patch_attribute('lamb') 

3239 mu = self.get_patch_attribute('shearmod') 

3240 

3241 lamb_grid = make_grid(lamb) 

3242 mu_grid = make_grid(mu) 

3243 

3244 coords_x = num.zeros(self.nx + 2) 

3245 coords_x[1:-1] = patch_coords[:, 0, 0] 

3246 coords_x[0] = coords_x[1] - pln / 2 

3247 coords_x[-1] = coords_x[-2] + pln / 2 

3248 

3249 coords_y = num.zeros(self.ny + 2) 

3250 coords_y[1:-1] = patch_coords[0, :, 1] 

3251 coords_y[0] = coords_y[1] - pwd / 2 

3252 coords_y[-1] = coords_y[-2] + pwd / 2 

3253 

3254 slip_interp = RegularGridInterpolator( 

3255 (coords_x, coords_y, slip_times), 

3256 slip_grid, method='nearest') 

3257 

3258 lamb_interp = RegularGridInterpolator( 

3259 (coords_x, coords_y), 

3260 lamb_grid, method='nearest') 

3261 

3262 mu_interp = RegularGridInterpolator( 

3263 (coords_x, coords_y), 

3264 mu_grid, method='nearest') 

3265 

3266 # discretize basesources 

3267 mindeltagf = min(tuple( 

3268 (self.length / self.nx, self.width / self.ny) + 

3269 tuple(store.config.deltas))) 

3270 

3271 nl = int((1. / self.decimation_factor) * 

3272 num.ceil(pln / mindeltagf)) + 1 

3273 nw = int((1. / self.decimation_factor) * 

3274 num.ceil(pwd / mindeltagf)) + 1 

3275 nsrc_patch = int(nl * nw) 

3276 dl = pln / nl 

3277 dw = pwd / nw 

3278 

3279 patch_area = dl * dw 

3280 

3281 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3282 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3283 

3284 base_coords = num.zeros((nsrc_patch, 3)) 

3285 base_coords[:, 0] = num.tile(xl, nw) 

3286 base_coords[:, 1] = num.repeat(xw, nl) 

3287 base_coords = num.tile(base_coords, (npatches, 1)) 

3288 

3289 center_coords = num.zeros((npatches, 3)) 

3290 center_coords[:, 0] = num.repeat( 

3291 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3292 center_coords[:, 1] = num.tile( 

3293 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3294 

3295 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3296 nbaselocs = base_coords.shape[0] 

3297 

3298 base_interp = base_coords.repeat(ntimes, axis=0) 

3299 

3300 base_times = num.tile(slip_times, nbaselocs) 

3301 base_interp[:, 0] -= anch_x * self.length / 2 

3302 base_interp[:, 1] -= anch_y * self.width / 2 

3303 base_interp[:, 2] = base_times 

3304 

3305 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3306 store, interpolation=interpolation) 

3307 

3308 time_eikonal_max = time_interpolator.values.max() 

3309 

3310 nbasesrcs = base_interp.shape[0] 

3311 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3312 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3313 mu = mu_interp(base_interp[:, :2]).ravel() 

3314 

3315 if False: 

3316 try: 

3317 import matplotlib.pyplot as plt 

3318 coords = base_coords.copy() 

3319 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3320 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3321 plt.show() 

3322 except AttributeError: 

3323 pass 

3324 

3325 base_interp[:, 2] = 0. 

3326 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3327 base_interp = num.dot(rotmat.T, base_interp.T).T 

3328 base_interp[:, 0] += self.north_shift 

3329 base_interp[:, 1] += self.east_shift 

3330 base_interp[:, 2] += self.depth 

3331 

3332 slip_strike = delta_slip[:, :, 0].ravel() 

3333 slip_dip = delta_slip[:, :, 1].ravel() 

3334 slip_norm = delta_slip[:, :, 2].ravel() 

3335 

3336 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3337 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3338 

3339 m6s = okada_ext.patch2m6( 

3340 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3341 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3342 rakes=slip_rake, 

3343 disl_shear=slip_shear, 

3344 disl_norm=slip_norm, 

3345 lamb=lamb, 

3346 mu=mu, 

3347 nthreads=self.nthreads) 

3348 

3349 m6s *= patch_area 

3350 

3351 dl = -self.patches[0].al1 + self.patches[0].al2 

3352 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3353 

3354 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3355 

3356 ds = meta.DiscretizedMTSource( 

3357 lat=self.lat, 

3358 lon=self.lon, 

3359 times=base_times + self.time, 

3360 north_shifts=base_interp[:, 0], 

3361 east_shifts=base_interp[:, 1], 

3362 depths=base_interp[:, 2], 

3363 m6s=m6s, 

3364 dl=dl, 

3365 dw=dw, 

3366 nl=self.nx, 

3367 nw=self.ny) 

3368 

3369 return ds 

3370 

3371 def calc_coef_mat(self): 

3372 ''' 

3373 Calculate coefficients connecting tractions and dislocations. 

3374 ''' 

3375 if not self.patches: 

3376 raise ValueError( 

3377 'Patches are needed. Please calculate them first.') 

3378 

3379 self.coef_mat = make_okada_coefficient_matrix( 

3380 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3381 

3382 def get_patch_attribute(self, attr): 

3383 ''' 

3384 Get patch attributes. 

3385 

3386 :param attr: 

3387 Name of selected attribute (see 

3388 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3389 :type attr: 

3390 str 

3391 

3392 :returns: 

3393 Array with attribute value for each fault patch. 

3394 :rtype: 

3395 :py:class:`~numpy.ndarray` 

3396 

3397 ''' 

3398 if not self.patches: 

3399 raise ValueError( 

3400 'Patches are needed. Please calculate them first.') 

3401 return num.array([getattr(p, attr) for p in self.patches]) 

3402 

3403 def get_slip( 

3404 self, 

3405 time=None, 

3406 scale_slip=True, 

3407 interpolation='nearest_neighbor', 

3408 **kwargs): 

3409 ''' 

3410 Get slip per subfault patch for given time after rupture start. 

3411 

3412 :param time: 

3413 Time after origin [s], for which slip is computed. If not 

3414 given, final static slip is returned. 

3415 :type time: 

3416 optional, float > 0. 

3417 

3418 :param scale_slip: 

3419 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3420 to fit the given maximum slip. 

3421 :type scale_slip: 

3422 optional, bool 

3423 

3424 :param interpolation: 

3425 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3426 and ``'multilinear'``). 

3427 :type interpolation: 

3428 optional, str 

3429 

3430 :returns: 

3431 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3432 for each source patch. 

3433 :rtype: 

3434 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3435 ''' 

3436 

3437 if self.patches is None: 

3438 raise ValueError( 

3439 'Please discretize the source first (discretize_patches())') 

3440 npatches = len(self.patches) 

3441 tractions = self.get_tractions() 

3442 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3443 

3444 time_patch = time 

3445 if time is None: 

3446 time_patch = time_patch_max 

3447 

3448 if self.coef_mat is None: 

3449 self.calc_coef_mat() 

3450 

3451 if tractions.shape != (npatches, 3): 

3452 raise AttributeError( 

3453 'The traction vector is of invalid shape.' 

3454 ' Required shape is (npatches, 3)') 

3455 

3456 patch_mask = num.ones(npatches, dtype=bool) 

3457 if self.patch_mask is not None: 

3458 patch_mask = self.patch_mask 

3459 

3460 times = self.get_patch_attribute('time') - self.time 

3461 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3462 relevant_sources = num.nonzero(times <= time_patch)[0] 

3463 disloc_est = num.zeros_like(tractions) 

3464 

3465 if self.smooth_rupture: 

3466 patch_activation = num.zeros(npatches) 

3467 

3468 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3469 self.get_vr_time_interpolators( 

3470 store, interpolation=interpolation) 

3471 

3472 # Getting the native Eikonal grid, bit hackish 

3473 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3474 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3475 times_eikonal = time_interpolator.values 

3476 

3477 time_max = time 

3478 if time is None: 

3479 time_max = times_eikonal.max() 

3480 

3481 for ip, p in enumerate(self.patches): 

3482 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3483 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3484 

3485 idx_length = num.logical_and( 

3486 points_x >= ul[0], points_x <= lr[0]) 

3487 idx_width = num.logical_and( 

3488 points_y >= ul[1], points_y <= lr[1]) 

3489 

3490 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3491 if times_patch.size == 0: 

3492 raise AttributeError('could not use smooth_rupture') 

3493 

3494 patch_activation[ip] = \ 

3495 (times_patch <= time_max).sum() / times_patch.size 

3496 

3497 if time_patch == 0 and time_patch != time_patch_max: 

3498 patch_activation[ip] = 0. 

3499 

3500 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3501 

3502 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3503 

3504 if relevant_sources.size == 0: 

3505 return disloc_est 

3506 

3507 indices_disl = num.repeat(relevant_sources * 3, 3) 

3508 indices_disl[1::3] += 1 

3509 indices_disl[2::3] += 2 

3510 

3511 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3512 stress_field=tractions[relevant_sources, :].ravel(), 

3513 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3514 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3515 epsilon=None, 

3516 **kwargs) 

3517 

3518 if self.smooth_rupture: 

3519 disloc_est *= patch_activation[:, num.newaxis] 

3520 

3521 if scale_slip and self.slip is not None: 

3522 disloc_tmax = num.zeros(npatches) 

3523 

3524 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3525 indices_disl[1::3] += 1 

3526 indices_disl[2::3] += 2 

3527 

3528 disloc_tmax[patch_mask] = num.linalg.norm( 

3529 invert_fault_dislocations_bem( 

3530 stress_field=tractions[patch_mask, :].ravel(), 

3531 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3532 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3533 epsilon=None, 

3534 **kwargs), axis=1) 

3535 

3536 disloc_tmax_max = disloc_tmax.max() 

3537 if disloc_tmax_max == 0.: 

3538 logger.warning( 

3539 'slip scaling not performed. Maximum slip is 0.') 

3540 

3541 disloc_est *= self.slip / disloc_tmax_max 

3542 

3543 return disloc_est 

3544 

3545 def get_delta_slip( 

3546 self, 

3547 store=None, 

3548 deltat=None, 

3549 delta=True, 

3550 interpolation='nearest_neighbor', 

3551 **kwargs): 

3552 ''' 

3553 Get slip change snapshots. 

3554 

3555 The time interval, within which the slip changes are computed is 

3556 determined by the sampling rate of the Green's function database or 

3557 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3558 

3559 :param store: 

3560 Green's function database (needs to cover whole region of of the 

3561 source). Its sampling interval is used as time increment for slip 

3562 difference calculation. Either ``deltat`` or ``store`` should be 

3563 given. 

3564 :type store: 

3565 optional, :py:class:`~pyrocko.gf.store.Store` 

3566 

3567 :param deltat: 

3568 Time interval for slip difference calculation [s]. Either 

3569 ``deltat`` or ``store`` should be given. 

3570 :type deltat: 

3571 optional, float 

3572 

3573 :param delta: 

3574 If ``True``, slip differences between two time steps are given. If 

3575 ``False``, cumulative slip for all time steps. 

3576 :type delta: 

3577 optional, bool 

3578 

3579 :param interpolation: 

3580 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3581 and ``'multilinear'``). 

3582 :type interpolation: 

3583 optional, str 

3584 

3585 :returns: 

3586 Displacement changes(:math:`\\Delta u_{strike}, 

3587 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3588 time; corner times, for which delta slip is computed. The order of 

3589 displacement changes array is: 

3590 

3591 .. math:: 

3592 

3593 &[[\\\\ 

3594 &[\\Delta u_{strike, patch1, t1}, 

3595 \\Delta u_{dip, patch1, t1}, 

3596 \\Delta u_{tensile, patch1, t1}],\\\\ 

3597 &[\\Delta u_{strike, patch1, t2}, 

3598 \\Delta u_{dip, patch1, t2}, 

3599 \\Delta u_{tensile, patch1, t2}]\\\\ 

3600 &], [\\\\ 

3601 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3602 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3603 

3604 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3605 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3606 ''' 

3607 if store and deltat: 

3608 raise AttributeError( 

3609 'Argument collision. ' 

3610 'Please define only the store or the deltat argument.') 

3611 

3612 if store: 

3613 deltat = store.config.deltat 

3614 

3615 if not deltat: 

3616 raise AttributeError('Please give a GF store or set deltat.') 

3617 

3618 npatches = len(self.patches) 

3619 

3620 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3621 store, interpolation=interpolation) 

3622 tmax = time_interpolator.values.max() 

3623 

3624 calc_times = num.arange(0., tmax + deltat, deltat) 

3625 calc_times[calc_times > tmax] = tmax 

3626 

3627 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3628 

3629 for itime, t in enumerate(calc_times): 

3630 disloc_est[:, itime, :] = self.get_slip( 

3631 time=t, scale_slip=False, **kwargs) 

3632 

3633 if self.slip: 

3634 disloc_tmax = num.linalg.norm( 

3635 self.get_slip(scale_slip=False, time=tmax), 

3636 axis=1) 

3637 

3638 disloc_tmax_max = disloc_tmax.max() 

3639 if disloc_tmax_max == 0.: 

3640 logger.warning( 

3641 'Slip scaling not performed. Maximum slip is 0.') 

3642 else: 

3643 disloc_est *= self.slip / disloc_tmax_max 

3644 

3645 if not delta: 

3646 return disloc_est, calc_times 

3647 

3648 # if we have only one timestep there is no gradient 

3649 if calc_times.size > 1: 

3650 disloc_init = disloc_est[:, 0, :] 

3651 disloc_est = num.diff(disloc_est, axis=1) 

3652 disloc_est = num.concatenate(( 

3653 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3654 

3655 calc_times = calc_times 

3656 

3657 return disloc_est, calc_times 

3658 

3659 def get_slip_rate(self, *args, **kwargs): 

3660 ''' 

3661 Get slip rate inverted from patches. 

3662 

3663 The time interval, within which the slip rates are computed is 

3664 determined by the sampling rate of the Green's function database or 

3665 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3666 :py:meth:`get_delta_slip`. 

3667 

3668 :returns: 

3669 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3670 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3671 for each source patch and time; corner times, for which slip rate 

3672 is computed. The order of sliprate array is: 

3673 

3674 .. math:: 

3675 

3676 &[[\\\\ 

3677 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3678 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3679 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3680 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3681 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3682 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3683 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3684 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3685 

3686 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3687 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3688 ''' 

3689 ddisloc_est, calc_times = self.get_delta_slip( 

3690 *args, delta=True, **kwargs) 

3691 

3692 dt = num.concatenate( 

3693 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3694 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3695 

3696 return slip_rate, calc_times 

3697 

3698 def get_moment_rate_patches(self, *args, **kwargs): 

3699 ''' 

3700 Get scalar seismic moment rate for each patch individually. 

3701 

3702 Additional ``*args`` and ``**kwargs`` are passed to 

3703 :py:meth:`get_slip_rate`. 

3704 

3705 :returns: 

3706 Seismic moment rate for each source patch and time; corner times, 

3707 for which patch moment rate is computed based on slip rate. The 

3708 order of the moment rate array is: 

3709 

3710 .. math:: 

3711 

3712 &[\\\\ 

3713 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3714 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3715 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3716 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3717 &[...]]\\\\ 

3718 

3719 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3720 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3721 ''' 

3722 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3723 

3724 shear_mod = self.get_patch_attribute('shearmod') 

3725 p_length = self.get_patch_attribute('length') 

3726 p_width = self.get_patch_attribute('width') 

3727 

3728 dA = p_length * p_width 

3729 

3730 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3731 

3732 return mom_rate, calc_times 

3733 

3734 def get_moment_rate(self, store, target=None, deltat=None): 

3735 ''' 

3736 Get seismic source moment rate for the total source (STF). 

3737 

3738 :param store: 

3739 Green's function database (needs to cover whole region of of the 

3740 source). Its ``deltat`` [s] is used as time increment for slip 

3741 difference calculation. Either ``deltat`` or ``store`` should be 

3742 given. 

3743 :type store: 

3744 :py:class:`~pyrocko.gf.store.Store` 

3745 

3746 :param target: 

3747 Target information, needed for interpolation method. 

3748 :type target: 

3749 optional, :py:class:`~pyrocko.gf.targets.Target` 

3750 

3751 :param deltat: 

3752 Time increment for slip difference calculation [s]. If not given 

3753 ``store.deltat`` is used. 

3754 :type deltat: 

3755 optional, float 

3756 

3757 :return: 

3758 Seismic moment rate [Nm/s] for each time; corner times, for which 

3759 moment rate is computed. The order of the moment rate array is: 

3760 

3761 .. math:: 

3762 

3763 &[\\\\ 

3764 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3765 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3766 &...]\\\\ 

3767 

3768 :rtype: 

3769 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3770 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3771 ''' 

3772 if not deltat: 

3773 deltat = store.config.deltat 

3774 return self.discretize_basesource( 

3775 store, target=target).get_moment_rate(deltat) 

3776 

3777 def get_moment(self, *args, **kwargs): 

3778 ''' 

3779 Get seismic cumulative moment. 

3780 

3781 Additional ``*args`` and ``**kwargs`` are passed to 

3782 :py:meth:`get_magnitude`. 

3783 

3784 :returns: 

3785 Cumulative seismic moment in [Nm]. 

3786 :rtype: 

3787 float 

3788 ''' 

3789 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3790 *args, **kwargs))) 

3791 

3792 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3793 ''' 

3794 Rescale source slip based on given target magnitude or seismic moment. 

3795 

3796 Rescale the maximum source slip to fit the source moment magnitude or 

3797 seismic moment to the given target values. Either ``magnitude`` or 

3798 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3799 :py:meth:`get_moment`. 

3800 

3801 :param magnitude: 

3802 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3803 [Hanks and Kanamori, 1979] 

3804 :type magnitude: 

3805 optional, float 

3806 

3807 :param moment: 

3808 Target seismic moment :math:`M_0` [Nm]. 

3809 :type moment: 

3810 optional, float 

3811 ''' 

3812 if self.slip is None: 

3813 self.slip = 1. 

3814 logger.warning('No slip found for rescaling. ' 

3815 'An initial slip of 1 m is assumed.') 

3816 

3817 if magnitude is None and moment is None: 

3818 raise ValueError( 

3819 'Either target magnitude or moment need to be given.') 

3820 

3821 moment_init = self.get_moment(**kwargs) 

3822 

3823 if magnitude is not None: 

3824 moment = pmt.magnitude_to_moment(magnitude) 

3825 

3826 self.slip *= moment / moment_init 

3827 

3828 

3829class DoubleDCSource(SourceWithMagnitude): 

3830 ''' 

3831 Two double-couple point sources separated in space and time. 

3832 Moment share between the sub-sources is controlled by the 

3833 parameter mix. 

3834 The position of the subsources is dependent on the moment 

3835 distribution between the two sources. Depth, east and north 

3836 shift are given for the centroid between the two double-couples. 

3837 The subsources will positioned according to their moment shares 

3838 around this centroid position. 

3839 This is done according to their delta parameters, which are 

3840 therefore in relation to that centroid. 

3841 Note that depth of the subsources therefore can be 

3842 depth+/-delta_depth. For shallow earthquakes therefore 

3843 the depth has to be chosen deeper to avoid sampling 

3844 above surface. 

3845 ''' 

3846 

3847 strike1 = Float.T( 

3848 default=0.0, 

3849 help='strike direction in [deg], measured clockwise from north') 

3850 

3851 dip1 = Float.T( 

3852 default=90.0, 

3853 help='dip angle in [deg], measured downward from horizontal') 

3854 

3855 azimuth = Float.T( 

3856 default=0.0, 

3857 help='azimuth to second double-couple [deg], ' 

3858 'measured at first, clockwise from north') 

3859 

3860 rake1 = Float.T( 

3861 default=0.0, 

3862 help='rake angle in [deg], ' 

3863 'measured counter-clockwise from right-horizontal ' 

3864 'in on-plane view') 

3865 

3866 strike2 = Float.T( 

3867 default=0.0, 

3868 help='strike direction in [deg], measured clockwise from north') 

3869 

3870 dip2 = Float.T( 

3871 default=90.0, 

3872 help='dip angle in [deg], measured downward from horizontal') 

3873 

3874 rake2 = Float.T( 

3875 default=0.0, 

3876 help='rake angle in [deg], ' 

3877 'measured counter-clockwise from right-horizontal ' 

3878 'in on-plane view') 

3879 

3880 delta_time = Float.T( 

3881 default=0.0, 

3882 help='separation of double-couples in time (t2-t1) [s]') 

3883 

3884 delta_depth = Float.T( 

3885 default=0.0, 

3886 help='difference in depth (z2-z1) [m]') 

3887 

3888 distance = Float.T( 

3889 default=0.0, 

3890 help='distance between the two double-couples [m]') 

3891 

3892 mix = Float.T( 

3893 default=0.5, 

3894 help='how to distribute the moment to the two doublecouples ' 

3895 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

3896 

3897 stf1 = STF.T( 

3898 optional=True, 

3899 help='Source time function of subsource 1 ' 

3900 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3901 

3902 stf2 = STF.T( 

3903 optional=True, 

3904 help='Source time function of subsource 2 ' 

3905 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3906 

3907 discretized_source_class = meta.DiscretizedMTSource 

3908 

3909 def base_key(self): 

3910 return ( 

3911 self.time, self.depth, self.lat, self.north_shift, 

3912 self.lon, self.east_shift, type(self).__name__) + \ 

3913 self.effective_stf1_pre().base_key() + \ 

3914 self.effective_stf2_pre().base_key() + ( 

3915 self.strike1, self.dip1, self.rake1, 

3916 self.strike2, self.dip2, self.rake2, 

3917 self.delta_time, self.delta_depth, 

3918 self.azimuth, self.distance, self.mix) 

3919 

3920 def get_factor(self): 

3921 return self.moment 

3922 

3923 def effective_stf1_pre(self): 

3924 return self.stf1 or self.stf or g_unit_pulse 

3925 

3926 def effective_stf2_pre(self): 

3927 return self.stf2 or self.stf or g_unit_pulse 

3928 

3929 def effective_stf_post(self): 

3930 return g_unit_pulse 

3931 

3932 def split(self): 

3933 a1 = 1.0 - self.mix 

3934 a2 = self.mix 

3935 delta_north = math.cos(self.azimuth * d2r) * self.distance 

3936 delta_east = math.sin(self.azimuth * d2r) * self.distance 

3937 

3938 dc1 = DCSource( 

3939 lat=self.lat, 

3940 lon=self.lon, 

3941 time=self.time - self.delta_time * a2, 

3942 north_shift=self.north_shift - delta_north * a2, 

3943 east_shift=self.east_shift - delta_east * a2, 

3944 depth=self.depth - self.delta_depth * a2, 

3945 moment=self.moment * a1, 

3946 strike=self.strike1, 

3947 dip=self.dip1, 

3948 rake=self.rake1, 

3949 stf=self.stf1 or self.stf) 

3950 

3951 dc2 = DCSource( 

3952 lat=self.lat, 

3953 lon=self.lon, 

3954 time=self.time + self.delta_time * a1, 

3955 north_shift=self.north_shift + delta_north * a1, 

3956 east_shift=self.east_shift + delta_east * a1, 

3957 depth=self.depth + self.delta_depth * a1, 

3958 moment=self.moment * a2, 

3959 strike=self.strike2, 

3960 dip=self.dip2, 

3961 rake=self.rake2, 

3962 stf=self.stf2 or self.stf) 

3963 

3964 return [dc1, dc2] 

3965 

3966 def discretize_basesource(self, store, target=None): 

3967 a1 = 1.0 - self.mix 

3968 a2 = self.mix 

3969 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

3970 rake=self.rake1, scalar_moment=a1) 

3971 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

3972 rake=self.rake2, scalar_moment=a2) 

3973 

3974 delta_north = math.cos(self.azimuth * d2r) * self.distance 

3975 delta_east = math.sin(self.azimuth * d2r) * self.distance 

3976 

3977 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

3978 store.config.deltat, self.time - self.delta_time * a2) 

3979 

3980 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

3981 store.config.deltat, self.time + self.delta_time * a1) 

3982 

3983 nt1 = times1.size 

3984 nt2 = times2.size 

3985 

3986 ds = meta.DiscretizedMTSource( 

3987 lat=self.lat, 

3988 lon=self.lon, 

3989 times=num.concatenate((times1, times2)), 

3990 north_shifts=num.concatenate(( 

3991 num.repeat(self.north_shift - delta_north * a2, nt1), 

3992 num.repeat(self.north_shift + delta_north * a1, nt2))), 

3993 east_shifts=num.concatenate(( 

3994 num.repeat(self.east_shift - delta_east * a2, nt1), 

3995 num.repeat(self.east_shift + delta_east * a1, nt2))), 

3996 depths=num.concatenate(( 

3997 num.repeat(self.depth - self.delta_depth * a2, nt1), 

3998 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

3999 m6s=num.vstack(( 

4000 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4001 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4002 

4003 return ds 

4004 

4005 def pyrocko_moment_tensor(self, store=None, target=None): 

4006 a1 = 1.0 - self.mix 

4007 a2 = self.mix 

4008 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4009 rake=self.rake1, 

4010 scalar_moment=a1 * self.moment) 

4011 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4012 rake=self.rake2, 

4013 scalar_moment=a2 * self.moment) 

4014 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4015 

4016 def pyrocko_event(self, store=None, target=None, **kwargs): 

4017 return SourceWithMagnitude.pyrocko_event( 

4018 self, store, target, 

4019 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4020 **kwargs) 

4021 

4022 @classmethod 

4023 def from_pyrocko_event(cls, ev, **kwargs): 

4024 d = {} 

4025 mt = ev.moment_tensor 

4026 if mt: 

4027 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4028 d.update( 

4029 strike1=float(strike), 

4030 dip1=float(dip), 

4031 rake1=float(rake), 

4032 strike2=float(strike), 

4033 dip2=float(dip), 

4034 rake2=float(rake), 

4035 mix=0.0, 

4036 magnitude=float(mt.moment_magnitude())) 

4037 

4038 d.update(kwargs) 

4039 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4040 source.stf1 = source.stf 

4041 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4042 source.stf = None 

4043 return source 

4044 

4045 

4046class RingfaultSource(SourceWithMagnitude): 

4047 ''' 

4048 A ring fault with vertical doublecouples. 

4049 ''' 

4050 

4051 diameter = Float.T( 

4052 default=1.0, 

4053 help='diameter of the ring in [m]') 

4054 

4055 sign = Float.T( 

4056 default=1.0, 

4057 help='inside of the ring moves up (+1) or down (-1)') 

4058 

4059 strike = Float.T( 

4060 default=0.0, 

4061 help='strike direction of the ring plane, clockwise from north,' 

4062 ' in [deg]') 

4063 

4064 dip = Float.T( 

4065 default=0.0, 

4066 help='dip angle of the ring plane from horizontal in [deg]') 

4067 

4068 npointsources = Int.T( 

4069 default=360, 

4070 help='number of point sources to use') 

4071 

4072 discretized_source_class = meta.DiscretizedMTSource 

4073 

4074 def base_key(self): 

4075 return Source.base_key(self) + ( 

4076 self.strike, self.dip, self.diameter, self.npointsources) 

4077 

4078 def get_factor(self): 

4079 return self.sign * self.moment 

4080 

4081 def discretize_basesource(self, store=None, target=None): 

4082 n = self.npointsources 

4083 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4084 

4085 points = num.zeros((n, 3)) 

4086 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4087 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4088 

4089 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4090 points = num.dot(rotmat.T, points.T).T # !!! ? 

4091 

4092 points[:, 0] += self.north_shift 

4093 points[:, 1] += self.east_shift 

4094 points[:, 2] += self.depth 

4095 

4096 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4097 scalar_moment=1.0 / n).m()) 

4098 

4099 rotmats = num.transpose( 

4100 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4101 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4102 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4103 

4104 ms = num.zeros((n, 3, 3)) 

4105 for i in range(n): 

4106 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4107 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4108 

4109 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4110 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4111 

4112 times, amplitudes = self.effective_stf_pre().discretize_t( 

4113 store.config.deltat, self.time) 

4114 

4115 nt = times.size 

4116 

4117 return meta.DiscretizedMTSource( 

4118 times=num.tile(times, n), 

4119 lat=self.lat, 

4120 lon=self.lon, 

4121 north_shifts=num.repeat(points[:, 0], nt), 

4122 east_shifts=num.repeat(points[:, 1], nt), 

4123 depths=num.repeat(points[:, 2], nt), 

4124 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4125 amplitudes, n)[:, num.newaxis]) 

4126 

4127 

4128class CombiSource(Source): 

4129 ''' 

4130 Composite source model. 

4131 ''' 

4132 

4133 discretized_source_class = meta.DiscretizedMTSource 

4134 

4135 subsources = List.T(Source.T()) 

4136 

4137 def __init__(self, subsources=[], **kwargs): 

4138 if not subsources: 

4139 raise BadRequest( 

4140 'Need at least one sub-source to create a CombiSource object.') 

4141 

4142 lats = num.array( 

4143 [subsource.lat for subsource in subsources], dtype=float) 

4144 lons = num.array( 

4145 [subsource.lon for subsource in subsources], dtype=float) 

4146 

4147 lat, lon = lats[0], lons[0] 

4148 if not num.all(lats == lat) and num.all(lons == lon): 

4149 subsources = [s.clone() for s in subsources] 

4150 for subsource in subsources[1:]: 

4151 subsource.set_origin(lat, lon) 

4152 

4153 depth = float(num.mean([p.depth for p in subsources])) 

4154 time = float(num.mean([p.time for p in subsources])) 

4155 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4156 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4157 kwargs.update( 

4158 time=time, 

4159 lat=float(lat), 

4160 lon=float(lon), 

4161 north_shift=north_shift, 

4162 east_shift=east_shift, 

4163 depth=depth) 

4164 

4165 Source.__init__(self, subsources=subsources, **kwargs) 

4166 

4167 def get_factor(self): 

4168 return 1.0 

4169 

4170 def discretize_basesource(self, store, target=None): 

4171 dsources = [] 

4172 for sf in self.subsources: 

4173 ds = sf.discretize_basesource(store, target) 

4174 ds.m6s *= sf.get_factor() 

4175 dsources.append(ds) 

4176 

4177 return meta.DiscretizedMTSource.combine(dsources) 

4178 

4179 

4180class SFSource(Source): 

4181 ''' 

4182 A single force point source. 

4183 

4184 Supported GF schemes: `'elastic5'`. 

4185 ''' 

4186 

4187 discretized_source_class = meta.DiscretizedSFSource 

4188 

4189 fn = Float.T( 

4190 default=0., 

4191 help='northward component of single force [N]') 

4192 

4193 fe = Float.T( 

4194 default=0., 

4195 help='eastward component of single force [N]') 

4196 

4197 fd = Float.T( 

4198 default=0., 

4199 help='downward component of single force [N]') 

4200 

4201 def __init__(self, **kwargs): 

4202 Source.__init__(self, **kwargs) 

4203 

4204 def base_key(self): 

4205 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4206 

4207 def get_factor(self): 

4208 return 1.0 

4209 

4210 def discretize_basesource(self, store, target=None): 

4211 times, amplitudes = self.effective_stf_pre().discretize_t( 

4212 store.config.deltat, self.time) 

4213 forces = amplitudes[:, num.newaxis] * num.array( 

4214 [[self.fn, self.fe, self.fd]], dtype=float) 

4215 

4216 return meta.DiscretizedSFSource(forces=forces, 

4217 **self._dparams_base_repeated(times)) 

4218 

4219 def pyrocko_event(self, store=None, target=None, **kwargs): 

4220 return Source.pyrocko_event( 

4221 self, store, target, 

4222 **kwargs) 

4223 

4224 @classmethod 

4225 def from_pyrocko_event(cls, ev, **kwargs): 

4226 d = {} 

4227 d.update(kwargs) 

4228 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4229 

4230 

4231class PorePressurePointSource(Source): 

4232 ''' 

4233 Excess pore pressure point source. 

4234 

4235 For poro-elastic initial value problem where an excess pore pressure is 

4236 brought into a small source volume. 

4237 ''' 

4238 

4239 discretized_source_class = meta.DiscretizedPorePressureSource 

4240 

4241 pp = Float.T( 

4242 default=1.0, 

4243 help='initial excess pore pressure in [Pa]') 

4244 

4245 def base_key(self): 

4246 return Source.base_key(self) 

4247 

4248 def get_factor(self): 

4249 return self.pp 

4250 

4251 def discretize_basesource(self, store, target=None): 

4252 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4253 **self._dparams_base()) 

4254 

4255 

4256class PorePressureLineSource(Source): 

4257 ''' 

4258 Excess pore pressure line source. 

4259 

4260 The line source is centered at (north_shift, east_shift, depth). 

4261 ''' 

4262 

4263 discretized_source_class = meta.DiscretizedPorePressureSource 

4264 

4265 pp = Float.T( 

4266 default=1.0, 

4267 help='initial excess pore pressure in [Pa]') 

4268 

4269 length = Float.T( 

4270 default=0.0, 

4271 help='length of the line source [m]') 

4272 

4273 azimuth = Float.T( 

4274 default=0.0, 

4275 help='azimuth direction, clockwise from north [deg]') 

4276 

4277 dip = Float.T( 

4278 default=90., 

4279 help='dip direction, downward from horizontal [deg]') 

4280 

4281 def base_key(self): 

4282 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4283 

4284 def get_factor(self): 

4285 return self.pp 

4286 

4287 def discretize_basesource(self, store, target=None): 

4288 

4289 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4290 

4291 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4292 

4293 sa = math.sin(self.azimuth * d2r) 

4294 ca = math.cos(self.azimuth * d2r) 

4295 sd = math.sin(self.dip * d2r) 

4296 cd = math.cos(self.dip * d2r) 

4297 

4298 points = num.zeros((n, 3)) 

4299 points[:, 0] = self.north_shift + a * ca * cd 

4300 points[:, 1] = self.east_shift + a * sa * cd 

4301 points[:, 2] = self.depth + a * sd 

4302 

4303 return meta.DiscretizedPorePressureSource( 

4304 times=util.num_full(n, self.time), 

4305 lat=self.lat, 

4306 lon=self.lon, 

4307 north_shifts=points[:, 0], 

4308 east_shifts=points[:, 1], 

4309 depths=points[:, 2], 

4310 pp=num.ones(n) / n) 

4311 

4312 

4313class Request(Object): 

4314 ''' 

4315 Synthetic seismogram computation request. 

4316 

4317 :: 

4318 

4319 Request(**kwargs) 

4320 Request(sources, targets, **kwargs) 

4321 ''' 

4322 

4323 sources = List.T( 

4324 Source.T(), 

4325 help='list of sources for which to produce synthetics.') 

4326 

4327 targets = List.T( 

4328 Target.T(), 

4329 help='list of targets for which to produce synthetics.') 

4330 

4331 @classmethod 

4332 def args2kwargs(cls, args): 

4333 if len(args) not in (0, 2, 3): 

4334 raise BadRequest('Invalid arguments.') 

4335 

4336 if len(args) == 2: 

4337 return dict(sources=args[0], targets=args[1]) 

4338 else: 

4339 return {} 

4340 

4341 def __init__(self, *args, **kwargs): 

4342 kwargs.update(self.args2kwargs(args)) 

4343 sources = kwargs.pop('sources', []) 

4344 targets = kwargs.pop('targets', []) 

4345 

4346 if isinstance(sources, Source): 

4347 sources = [sources] 

4348 

4349 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4350 targets = [targets] 

4351 

4352 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4353 

4354 @property 

4355 def targets_dynamic(self): 

4356 return [t for t in self.targets if isinstance(t, Target)] 

4357 

4358 @property 

4359 def targets_static(self): 

4360 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4361 

4362 @property 

4363 def has_dynamic(self): 

4364 return True if len(self.targets_dynamic) > 0 else False 

4365 

4366 @property 

4367 def has_statics(self): 

4368 return True if len(self.targets_static) > 0 else False 

4369 

4370 def subsources_map(self): 

4371 m = defaultdict(list) 

4372 for source in self.sources: 

4373 m[source.base_key()].append(source) 

4374 

4375 return m 

4376 

4377 def subtargets_map(self): 

4378 m = defaultdict(list) 

4379 for target in self.targets: 

4380 m[target.base_key()].append(target) 

4381 

4382 return m 

4383 

4384 def subrequest_map(self): 

4385 ms = self.subsources_map() 

4386 mt = self.subtargets_map() 

4387 m = {} 

4388 for (ks, ls) in ms.items(): 

4389 for (kt, lt) in mt.items(): 

4390 m[ks, kt] = (ls, lt) 

4391 

4392 return m 

4393 

4394 

4395class ProcessingStats(Object): 

4396 t_perc_get_store_and_receiver = Float.T(default=0.) 

4397 t_perc_discretize_source = Float.T(default=0.) 

4398 t_perc_make_base_seismogram = Float.T(default=0.) 

4399 t_perc_make_same_span = Float.T(default=0.) 

4400 t_perc_post_process = Float.T(default=0.) 

4401 t_perc_optimize = Float.T(default=0.) 

4402 t_perc_stack = Float.T(default=0.) 

4403 t_perc_static_get_store = Float.T(default=0.) 

4404 t_perc_static_discretize_basesource = Float.T(default=0.) 

4405 t_perc_static_sum_statics = Float.T(default=0.) 

4406 t_perc_static_post_process = Float.T(default=0.) 

4407 t_wallclock = Float.T(default=0.) 

4408 t_cpu = Float.T(default=0.) 

4409 n_read_blocks = Int.T(default=0) 

4410 n_results = Int.T(default=0) 

4411 n_subrequests = Int.T(default=0) 

4412 n_stores = Int.T(default=0) 

4413 n_records_stacked = Int.T(default=0) 

4414 

4415 

4416class Response(Object): 

4417 ''' 

4418 Resonse object to a synthetic seismogram computation request. 

4419 ''' 

4420 

4421 request = Request.T() 

4422 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4423 stats = ProcessingStats.T() 

4424 

4425 def pyrocko_traces(self): 

4426 ''' 

4427 Return a list of requested 

4428 :class:`~pyrocko.trace.Trace` instances. 

4429 ''' 

4430 

4431 traces = [] 

4432 for results in self.results_list: 

4433 for result in results: 

4434 if not isinstance(result, meta.Result): 

4435 continue 

4436 traces.append(result.trace.pyrocko_trace()) 

4437 

4438 return traces 

4439 

4440 def kite_scenes(self): 

4441 ''' 

4442 Return a list of requested 

4443 :class:`~kite.scenes` instances. 

4444 ''' 

4445 kite_scenes = [] 

4446 for results in self.results_list: 

4447 for result in results: 

4448 if isinstance(result, meta.KiteSceneResult): 

4449 sc = result.get_scene() 

4450 kite_scenes.append(sc) 

4451 

4452 return kite_scenes 

4453 

4454 def static_results(self): 

4455 ''' 

4456 Return a list of requested 

4457 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4458 ''' 

4459 statics = [] 

4460 for results in self.results_list: 

4461 for result in results: 

4462 if not isinstance(result, meta.StaticResult): 

4463 continue 

4464 statics.append(result) 

4465 

4466 return statics 

4467 

4468 def iter_results(self, get='pyrocko_traces'): 

4469 ''' 

4470 Generator function to iterate over results of request. 

4471 

4472 Yields associated :py:class:`Source`, 

4473 :class:`~pyrocko.gf.targets.Target`, 

4474 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4475 ''' 

4476 

4477 for isource, source in enumerate(self.request.sources): 

4478 for itarget, target in enumerate(self.request.targets): 

4479 result = self.results_list[isource][itarget] 

4480 if get == 'pyrocko_traces': 

4481 yield source, target, result.trace.pyrocko_trace() 

4482 elif get == 'results': 

4483 yield source, target, result 

4484 

4485 def snuffle(self, **kwargs): 

4486 ''' 

4487 Open *snuffler* with requested traces. 

4488 ''' 

4489 

4490 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4491 

4492 

4493class Engine(Object): 

4494 ''' 

4495 Base class for synthetic seismogram calculators. 

4496 ''' 

4497 

4498 def get_store_ids(self): 

4499 ''' 

4500 Get list of available GF store IDs 

4501 ''' 

4502 

4503 return [] 

4504 

4505 

4506class Rule(object): 

4507 pass 

4508 

4509 

4510class VectorRule(Rule): 

4511 

4512 def __init__(self, quantity, differentiate=0, integrate=0): 

4513 self.components = [quantity + '.' + c for c in 'ned'] 

4514 self.differentiate = differentiate 

4515 self.integrate = integrate 

4516 

4517 def required_components(self, target): 

4518 n, e, d = self.components 

4519 sa, ca, sd, cd = target.get_sin_cos_factors() 

4520 

4521 comps = [] 

4522 if nonzero(ca * cd): 

4523 comps.append(n) 

4524 

4525 if nonzero(sa * cd): 

4526 comps.append(e) 

4527 

4528 if nonzero(sd): 

4529 comps.append(d) 

4530 

4531 return tuple(comps) 

4532 

4533 def apply_(self, target, base_seismogram): 

4534 n, e, d = self.components 

4535 sa, ca, sd, cd = target.get_sin_cos_factors() 

4536 

4537 if nonzero(ca * cd): 

4538 data = base_seismogram[n].data * (ca * cd) 

4539 deltat = base_seismogram[n].deltat 

4540 else: 

4541 data = 0.0 

4542 

4543 if nonzero(sa * cd): 

4544 data = data + base_seismogram[e].data * (sa * cd) 

4545 deltat = base_seismogram[e].deltat 

4546 

4547 if nonzero(sd): 

4548 data = data + base_seismogram[d].data * sd 

4549 deltat = base_seismogram[d].deltat 

4550 

4551 if self.differentiate: 

4552 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4553 

4554 if self.integrate: 

4555 raise NotImplementedError('Integration is not implemented yet.') 

4556 

4557 return data 

4558 

4559 

4560class HorizontalVectorRule(Rule): 

4561 

4562 def __init__(self, quantity, differentiate=0, integrate=0): 

4563 self.components = [quantity + '.' + c for c in 'ne'] 

4564 self.differentiate = differentiate 

4565 self.integrate = integrate 

4566 

4567 def required_components(self, target): 

4568 n, e = self.components 

4569 sa, ca, _, _ = target.get_sin_cos_factors() 

4570 

4571 comps = [] 

4572 if nonzero(ca): 

4573 comps.append(n) 

4574 

4575 if nonzero(sa): 

4576 comps.append(e) 

4577 

4578 return tuple(comps) 

4579 

4580 def apply_(self, target, base_seismogram): 

4581 n, e = self.components 

4582 sa, ca, _, _ = target.get_sin_cos_factors() 

4583 

4584 if nonzero(ca): 

4585 data = base_seismogram[n].data * ca 

4586 else: 

4587 data = 0.0 

4588 

4589 if nonzero(sa): 

4590 data = data + base_seismogram[e].data * sa 

4591 

4592 if self.differentiate: 

4593 deltat = base_seismogram[e].deltat 

4594 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4595 

4596 if self.integrate: 

4597 raise NotImplementedError('Integration is not implemented yet.') 

4598 

4599 return data 

4600 

4601 

4602class ScalarRule(Rule): 

4603 

4604 def __init__(self, quantity, differentiate=0): 

4605 self.c = quantity 

4606 

4607 def required_components(self, target): 

4608 return (self.c, ) 

4609 

4610 def apply_(self, target, base_seismogram): 

4611 data = base_seismogram[self.c].data.copy() 

4612 deltat = base_seismogram[self.c].deltat 

4613 if self.differentiate: 

4614 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4615 

4616 return data 

4617 

4618 

4619class StaticDisplacement(Rule): 

4620 

4621 def required_components(self, target): 

4622 return tuple(['displacement.%s' % c for c in list('ned')]) 

4623 

4624 def apply_(self, target, base_statics): 

4625 if isinstance(target, SatelliteTarget): 

4626 los_fac = target.get_los_factors() 

4627 base_statics['displacement.los'] =\ 

4628 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4629 los_fac[:, 1] * base_statics['displacement.e'] + 

4630 los_fac[:, 2] * base_statics['displacement.n']) 

4631 return base_statics 

4632 

4633 

4634channel_rules = { 

4635 'displacement': [VectorRule('displacement')], 

4636 'rotation': [VectorRule('rotation')], 

4637 'velocity': [ 

4638 VectorRule('velocity'), 

4639 VectorRule('displacement', differentiate=1)], 

4640 'acceleration': [ 

4641 VectorRule('acceleration'), 

4642 VectorRule('velocity', differentiate=1), 

4643 VectorRule('displacement', differentiate=2)], 

4644 'pore_pressure': [ScalarRule('pore_pressure')], 

4645 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4646 'darcy_velocity': [VectorRule('darcy_velocity')], 

4647} 

4648 

4649static_rules = { 

4650 'displacement': [StaticDisplacement()] 

4651} 

4652 

4653 

4654class OutOfBoundsContext(Object): 

4655 source = Source.T() 

4656 target = Target.T() 

4657 distance = Float.T() 

4658 components = List.T(String.T()) 

4659 

4660 

4661def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4662 dsource_cache = {} 

4663 tcounters = list(range(6)) 

4664 

4665 store_ids = set() 

4666 sources = set() 

4667 targets = set() 

4668 

4669 for itarget, target in enumerate(ptargets): 

4670 target._id = itarget 

4671 

4672 for w in work: 

4673 _, _, isources, itargets = w 

4674 

4675 sources.update([psources[isource] for isource in isources]) 

4676 targets.update([ptargets[itarget] for itarget in itargets]) 

4677 

4678 store_ids = set([t.store_id for t in targets]) 

4679 

4680 for isource, source in enumerate(psources): 

4681 

4682 components = set() 

4683 for itarget, target in enumerate(targets): 

4684 rule = engine.get_rule(source, target) 

4685 components.update(rule.required_components(target)) 

4686 

4687 for store_id in store_ids: 

4688 store_targets = [t for t in targets if t.store_id == store_id] 

4689 

4690 sample_rates = set([t.sample_rate for t in store_targets]) 

4691 interpolations = set([t.interpolation for t in store_targets]) 

4692 

4693 base_seismograms = [] 

4694 store_targets_out = [] 

4695 

4696 for samp_rate in sample_rates: 

4697 for interp in interpolations: 

4698 engine_targets = [ 

4699 t for t in store_targets if t.sample_rate == samp_rate 

4700 and t.interpolation == interp] 

4701 

4702 if not engine_targets: 

4703 continue 

4704 

4705 store_targets_out += engine_targets 

4706 

4707 base_seismograms += engine.base_seismograms( 

4708 source, 

4709 engine_targets, 

4710 components, 

4711 dsource_cache, 

4712 nthreads) 

4713 

4714 for iseis, seismogram in enumerate(base_seismograms): 

4715 for tr in seismogram.values(): 

4716 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4717 e = SeismosizerError( 

4718 'Seismosizer failed with return code %i\n%s' % ( 

4719 tr.err, str( 

4720 OutOfBoundsContext( 

4721 source=source, 

4722 target=store_targets[iseis], 

4723 distance=source.distance_to( 

4724 store_targets[iseis]), 

4725 components=components)))) 

4726 raise e 

4727 

4728 for seismogram, target in zip(base_seismograms, store_targets_out): 

4729 

4730 try: 

4731 result = engine._post_process_dynamic( 

4732 seismogram, source, target) 

4733 except SeismosizerError as e: 

4734 result = e 

4735 

4736 yield (isource, target._id, result), tcounters 

4737 

4738 

4739def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4740 dsource_cache = {} 

4741 

4742 for w in work: 

4743 _, _, isources, itargets = w 

4744 

4745 sources = [psources[isource] for isource in isources] 

4746 targets = [ptargets[itarget] for itarget in itargets] 

4747 

4748 components = set() 

4749 for target in targets: 

4750 rule = engine.get_rule(sources[0], target) 

4751 components.update(rule.required_components(target)) 

4752 

4753 for isource, source in zip(isources, sources): 

4754 for itarget, target in zip(itargets, targets): 

4755 

4756 try: 

4757 base_seismogram, tcounters = engine.base_seismogram( 

4758 source, target, components, dsource_cache, nthreads) 

4759 except meta.OutOfBounds as e: 

4760 e.context = OutOfBoundsContext( 

4761 source=sources[0], 

4762 target=targets[0], 

4763 distance=sources[0].distance_to(targets[0]), 

4764 components=components) 

4765 raise 

4766 

4767 n_records_stacked = 0 

4768 t_optimize = 0.0 

4769 t_stack = 0.0 

4770 

4771 for _, tr in base_seismogram.items(): 

4772 n_records_stacked += tr.n_records_stacked 

4773 t_optimize += tr.t_optimize 

4774 t_stack += tr.t_stack 

4775 

4776 try: 

4777 result = engine._post_process_dynamic( 

4778 base_seismogram, source, target) 

4779 result.n_records_stacked = n_records_stacked 

4780 result.n_shared_stacking = len(sources) *\ 

4781 len(targets) 

4782 result.t_optimize = t_optimize 

4783 result.t_stack = t_stack 

4784 except SeismosizerError as e: 

4785 result = e 

4786 

4787 tcounters.append(xtime()) 

4788 yield (isource, itarget, result), tcounters 

4789 

4790 

4791def process_static(work, psources, ptargets, engine, nthreads=0): 

4792 for w in work: 

4793 _, _, isources, itargets = w 

4794 

4795 sources = [psources[isource] for isource in isources] 

4796 targets = [ptargets[itarget] for itarget in itargets] 

4797 

4798 for isource, source in zip(isources, sources): 

4799 for itarget, target in zip(itargets, targets): 

4800 components = engine.get_rule(source, target)\ 

4801 .required_components(target) 

4802 

4803 try: 

4804 base_statics, tcounters = engine.base_statics( 

4805 source, target, components, nthreads) 

4806 except meta.OutOfBounds as e: 

4807 e.context = OutOfBoundsContext( 

4808 source=sources[0], 

4809 target=targets[0], 

4810 distance=float('nan'), 

4811 components=components) 

4812 raise 

4813 result = engine._post_process_statics( 

4814 base_statics, source, target) 

4815 tcounters.append(xtime()) 

4816 

4817 yield (isource, itarget, result), tcounters 

4818 

4819 

4820class LocalEngine(Engine): 

4821 ''' 

4822 Offline synthetic seismogram calculator. 

4823 

4824 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4825 :py:attr:`store_dirs` with paths set in environment variables 

4826 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4827 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4828 :py:attr:`store_dirs` with paths set in the user's config file. 

4829 

4830 The config file can be found at :file:`~/.pyrocko/config.pf` 

4831 

4832 .. code-block :: python 

4833 

4834 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4835 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4836 ''' 

4837 

4838 store_superdirs = List.T( 

4839 String.T(), 

4840 help='directories which are searched for Green\'s function stores') 

4841 

4842 store_dirs = List.T( 

4843 String.T(), 

4844 help='additional individual Green\'s function store directories') 

4845 

4846 default_store_id = String.T( 

4847 optional=True, 

4848 help='default store ID to be used when a request does not provide ' 

4849 'one') 

4850 

4851 def __init__(self, **kwargs): 

4852 use_env = kwargs.pop('use_env', False) 

4853 use_config = kwargs.pop('use_config', False) 

4854 Engine.__init__(self, **kwargs) 

4855 if use_env: 

4856 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4857 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4858 if env_store_superdirs: 

4859 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4860 

4861 if env_store_dirs: 

4862 self.store_dirs.extend(env_store_dirs.split(':')) 

4863 

4864 if use_config: 

4865 c = config.config() 

4866 self.store_superdirs.extend(c.gf_store_superdirs) 

4867 self.store_dirs.extend(c.gf_store_dirs) 

4868 

4869 self._check_store_dirs_type() 

4870 self._id_to_store_dir = {} 

4871 self._open_stores = {} 

4872 self._effective_default_store_id = None 

4873 

4874 def _check_store_dirs_type(self): 

4875 for sdir in ['store_dirs', 'store_superdirs']: 

4876 if not isinstance(self.__getattribute__(sdir), list): 

4877 raise TypeError("{} of {} is not of type list".format( 

4878 sdir, self.__class__.__name__)) 

4879 

4880 def _get_store_id(self, store_dir): 

4881 store_ = store.Store(store_dir) 

4882 store_id = store_.config.id 

4883 store_.close() 

4884 return store_id 

4885 

4886 def _looks_like_store_dir(self, store_dir): 

4887 return os.path.isdir(store_dir) and \ 

4888 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4889 ('index', 'traces', 'config')) 

4890 

4891 def iter_store_dirs(self): 

4892 store_dirs = set() 

4893 for d in self.store_superdirs: 

4894 if not os.path.exists(d): 

4895 logger.warning('store_superdir not available: %s' % d) 

4896 continue 

4897 

4898 for entry in os.listdir(d): 

4899 store_dir = os.path.realpath(pjoin(d, entry)) 

4900 if self._looks_like_store_dir(store_dir): 

4901 store_dirs.add(store_dir) 

4902 

4903 for store_dir in self.store_dirs: 

4904 store_dirs.add(os.path.realpath(store_dir)) 

4905 

4906 return store_dirs 

4907 

4908 def _scan_stores(self): 

4909 for store_dir in self.iter_store_dirs(): 

4910 store_id = self._get_store_id(store_dir) 

4911 if store_id not in self._id_to_store_dir: 

4912 self._id_to_store_dir[store_id] = store_dir 

4913 else: 

4914 if store_dir != self._id_to_store_dir[store_id]: 

4915 raise DuplicateStoreId( 

4916 'GF store ID %s is used in (at least) two ' 

4917 'different stores. Locations are: %s and %s' % 

4918 (store_id, self._id_to_store_dir[store_id], store_dir)) 

4919 

4920 def get_store_dir(self, store_id): 

4921 ''' 

4922 Lookup directory given a GF store ID. 

4923 ''' 

4924 

4925 if store_id not in self._id_to_store_dir: 

4926 self._scan_stores() 

4927 

4928 if store_id not in self._id_to_store_dir: 

4929 raise NoSuchStore(store_id, self.iter_store_dirs()) 

4930 

4931 return self._id_to_store_dir[store_id] 

4932 

4933 def get_store_ids(self): 

4934 ''' 

4935 Get list of available store IDs. 

4936 ''' 

4937 

4938 self._scan_stores() 

4939 return sorted(self._id_to_store_dir.keys()) 

4940 

4941 def effective_default_store_id(self): 

4942 if self._effective_default_store_id is None: 

4943 if self.default_store_id is None: 

4944 store_ids = self.get_store_ids() 

4945 if len(store_ids) == 1: 

4946 self._effective_default_store_id = self.get_store_ids()[0] 

4947 else: 

4948 raise NoDefaultStoreSet() 

4949 else: 

4950 self._effective_default_store_id = self.default_store_id 

4951 

4952 return self._effective_default_store_id 

4953 

4954 def get_store(self, store_id=None): 

4955 ''' 

4956 Get a store from the engine. 

4957 

4958 :param store_id: identifier of the store (optional) 

4959 :returns: :py:class:`~pyrocko.gf.store.Store` object 

4960 

4961 If no ``store_id`` is provided the store 

4962 associated with the :py:gattr:`default_store_id` is returned. 

4963 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

4964 undefined. 

4965 ''' 

4966 

4967 if store_id is None: 

4968 store_id = self.effective_default_store_id() 

4969 

4970 if store_id not in self._open_stores: 

4971 store_dir = self.get_store_dir(store_id) 

4972 self._open_stores[store_id] = store.Store(store_dir) 

4973 

4974 return self._open_stores[store_id] 

4975 

4976 def get_store_config(self, store_id): 

4977 store = self.get_store(store_id) 

4978 return store.config 

4979 

4980 def get_store_extra(self, store_id, key): 

4981 store = self.get_store(store_id) 

4982 return store.get_extra(key) 

4983 

4984 def close_cashed_stores(self): 

4985 ''' 

4986 Close and remove ids from cashed stores. 

4987 ''' 

4988 store_ids = [] 

4989 for store_id, store_ in self._open_stores.items(): 

4990 store_.close() 

4991 store_ids.append(store_id) 

4992 

4993 for store_id in store_ids: 

4994 self._open_stores.pop(store_id) 

4995 

4996 def get_rule(self, source, target): 

4997 cprovided = self.get_store(target.store_id).get_provided_components() 

4998 

4999 if isinstance(target, StaticTarget): 

5000 quantity = target.quantity 

5001 available_rules = static_rules 

5002 elif isinstance(target, Target): 

5003 quantity = target.effective_quantity() 

5004 available_rules = channel_rules 

5005 

5006 try: 

5007 for rule in available_rules[quantity]: 

5008 cneeded = rule.required_components(target) 

5009 if all(c in cprovided for c in cneeded): 

5010 return rule 

5011 

5012 except KeyError: 

5013 pass 

5014 

5015 raise BadRequest( 

5016 'No rule to calculate "%s" with GFs from store "%s" ' 

5017 'for source model "%s".' % ( 

5018 target.effective_quantity(), 

5019 target.store_id, 

5020 source.__class__.__name__)) 

5021 

5022 def _cached_discretize_basesource(self, source, store, cache, target): 

5023 if (source, store) not in cache: 

5024 cache[source, store] = source.discretize_basesource(store, target) 

5025 

5026 return cache[source, store] 

5027 

5028 def base_seismograms(self, source, targets, components, dsource_cache, 

5029 nthreads=0): 

5030 

5031 target = targets[0] 

5032 

5033 interp = set([t.interpolation for t in targets]) 

5034 if len(interp) > 1: 

5035 raise BadRequest('Targets have different interpolation schemes.') 

5036 

5037 rates = set([t.sample_rate for t in targets]) 

5038 if len(rates) > 1: 

5039 raise BadRequest('Targets have different sample rates.') 

5040 

5041 store_ = self.get_store(target.store_id) 

5042 receivers = [t.receiver(store_) for t in targets] 

5043 

5044 if target.sample_rate is not None: 

5045 deltat = 1. / target.sample_rate 

5046 rate = target.sample_rate 

5047 else: 

5048 deltat = None 

5049 rate = store_.config.sample_rate 

5050 

5051 tmin = num.fromiter( 

5052 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5053 tmax = num.fromiter( 

5054 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5055 

5056 itmin = num.floor(tmin * rate).astype(num.int64) 

5057 itmax = num.ceil(tmax * rate).astype(num.int64) 

5058 nsamples = itmax - itmin + 1 

5059 

5060 mask = num.isnan(tmin) 

5061 itmin[mask] = 0 

5062 nsamples[mask] = -1 

5063 

5064 base_source = self._cached_discretize_basesource( 

5065 source, store_, dsource_cache, target) 

5066 

5067 base_seismograms = store_.calc_seismograms( 

5068 base_source, receivers, components, 

5069 deltat=deltat, 

5070 itmin=itmin, nsamples=nsamples, 

5071 interpolation=target.interpolation, 

5072 optimization=target.optimization, 

5073 nthreads=nthreads) 

5074 

5075 for i, base_seismogram in enumerate(base_seismograms): 

5076 base_seismograms[i] = store.make_same_span(base_seismogram) 

5077 

5078 return base_seismograms 

5079 

5080 def base_seismogram(self, source, target, components, dsource_cache, 

5081 nthreads): 

5082 

5083 tcounters = [xtime()] 

5084 

5085 store_ = self.get_store(target.store_id) 

5086 receiver = target.receiver(store_) 

5087 

5088 if target.tmin and target.tmax is not None: 

5089 rate = store_.config.sample_rate 

5090 itmin = int(num.floor(target.tmin * rate)) 

5091 itmax = int(num.ceil(target.tmax * rate)) 

5092 nsamples = itmax - itmin + 1 

5093 else: 

5094 itmin = None 

5095 nsamples = None 

5096 

5097 tcounters.append(xtime()) 

5098 base_source = self._cached_discretize_basesource( 

5099 source, store_, dsource_cache, target) 

5100 

5101 tcounters.append(xtime()) 

5102 

5103 if target.sample_rate is not None: 

5104 deltat = 1. / target.sample_rate 

5105 else: 

5106 deltat = None 

5107 

5108 base_seismogram = store_.seismogram( 

5109 base_source, receiver, components, 

5110 deltat=deltat, 

5111 itmin=itmin, nsamples=nsamples, 

5112 interpolation=target.interpolation, 

5113 optimization=target.optimization, 

5114 nthreads=nthreads) 

5115 

5116 tcounters.append(xtime()) 

5117 

5118 base_seismogram = store.make_same_span(base_seismogram) 

5119 

5120 tcounters.append(xtime()) 

5121 

5122 return base_seismogram, tcounters 

5123 

5124 def base_statics(self, source, target, components, nthreads): 

5125 tcounters = [xtime()] 

5126 store_ = self.get_store(target.store_id) 

5127 

5128 if target.tsnapshot is not None: 

5129 rate = store_.config.sample_rate 

5130 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5131 else: 

5132 itsnapshot = None 

5133 tcounters.append(xtime()) 

5134 

5135 base_source = source.discretize_basesource(store_, target=target) 

5136 

5137 tcounters.append(xtime()) 

5138 

5139 base_statics = store_.statics( 

5140 base_source, 

5141 target, 

5142 itsnapshot, 

5143 components, 

5144 target.interpolation, 

5145 nthreads) 

5146 

5147 tcounters.append(xtime()) 

5148 

5149 return base_statics, tcounters 

5150 

5151 def _post_process_dynamic(self, base_seismogram, source, target): 

5152 base_any = next(iter(base_seismogram.values())) 

5153 deltat = base_any.deltat 

5154 itmin = base_any.itmin 

5155 

5156 rule = self.get_rule(source, target) 

5157 data = rule.apply_(target, base_seismogram) 

5158 

5159 factor = source.get_factor() * target.get_factor() 

5160 if factor != 1.0: 

5161 data = data * factor 

5162 

5163 stf = source.effective_stf_post() 

5164 

5165 times, amplitudes = stf.discretize_t( 

5166 deltat, 0.0) 

5167 

5168 # repeat end point to prevent boundary effects 

5169 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5170 padded_data[:data.size] = data 

5171 padded_data[data.size:] = data[-1] 

5172 data = num.convolve(amplitudes, padded_data) 

5173 

5174 tmin = itmin * deltat + times[0] 

5175 

5176 tr = meta.SeismosizerTrace( 

5177 codes=target.codes, 

5178 data=data[:-amplitudes.size], 

5179 deltat=deltat, 

5180 tmin=tmin) 

5181 

5182 return target.post_process(self, source, tr) 

5183 

5184 def _post_process_statics(self, base_statics, source, starget): 

5185 rule = self.get_rule(source, starget) 

5186 data = rule.apply_(starget, base_statics) 

5187 

5188 factor = source.get_factor() 

5189 if factor != 1.0: 

5190 for v in data.values(): 

5191 v *= factor 

5192 

5193 return starget.post_process(self, source, base_statics) 

5194 

5195 def process(self, *args, **kwargs): 

5196 ''' 

5197 Process a request. 

5198 

5199 :: 

5200 

5201 process(**kwargs) 

5202 process(request, **kwargs) 

5203 process(sources, targets, **kwargs) 

5204 

5205 The request can be given a a :py:class:`Request` object, or such an 

5206 object is created using ``Request(**kwargs)`` for convenience. 

5207 

5208 :returns: :py:class:`Response` object 

5209 ''' 

5210 

5211 if len(args) not in (0, 1, 2): 

5212 raise BadRequest('Invalid arguments.') 

5213 

5214 if len(args) == 1: 

5215 kwargs['request'] = args[0] 

5216 

5217 elif len(args) == 2: 

5218 kwargs.update(Request.args2kwargs(args)) 

5219 

5220 request = kwargs.pop('request', None) 

5221 status_callback = kwargs.pop('status_callback', None) 

5222 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5223 

5224 nprocs = kwargs.pop('nprocs', None) 

5225 nthreads = kwargs.pop('nthreads', 1) 

5226 if nprocs is not None: 

5227 nthreads = nprocs 

5228 

5229 if request is None: 

5230 request = Request(**kwargs) 

5231 

5232 if resource: 

5233 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5234 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5235 tt0 = xtime() 

5236 

5237 # make sure stores are open before fork() 

5238 store_ids = set(target.store_id for target in request.targets) 

5239 for store_id in store_ids: 

5240 self.get_store(store_id) 

5241 

5242 source_index = dict((x, i) for (i, x) in 

5243 enumerate(request.sources)) 

5244 target_index = dict((x, i) for (i, x) in 

5245 enumerate(request.targets)) 

5246 

5247 m = request.subrequest_map() 

5248 

5249 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5250 results_list = [] 

5251 

5252 for i in range(len(request.sources)): 

5253 results_list.append([None] * len(request.targets)) 

5254 

5255 tcounters_dyn_list = [] 

5256 tcounters_static_list = [] 

5257 nsub = len(skeys) 

5258 isub = 0 

5259 

5260 # Processing dynamic targets through 

5261 # parimap(process_subrequest_dynamic) 

5262 

5263 if calc_timeseries: 

5264 _process_dynamic = process_dynamic_timeseries 

5265 else: 

5266 _process_dynamic = process_dynamic 

5267 

5268 if request.has_dynamic: 

5269 work_dynamic = [ 

5270 (i, nsub, 

5271 [source_index[source] for source in m[k][0]], 

5272 [target_index[target] for target in m[k][1] 

5273 if not isinstance(target, StaticTarget)]) 

5274 for (i, k) in enumerate(skeys)] 

5275 

5276 for ii_results, tcounters_dyn in _process_dynamic( 

5277 work_dynamic, request.sources, request.targets, self, 

5278 nthreads): 

5279 

5280 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5281 isource, itarget, result = ii_results 

5282 results_list[isource][itarget] = result 

5283 

5284 if status_callback: 

5285 status_callback(isub, nsub) 

5286 

5287 isub += 1 

5288 

5289 # Processing static targets through process_static 

5290 if request.has_statics: 

5291 work_static = [ 

5292 (i, nsub, 

5293 [source_index[source] for source in m[k][0]], 

5294 [target_index[target] for target in m[k][1] 

5295 if isinstance(target, StaticTarget)]) 

5296 for (i, k) in enumerate(skeys)] 

5297 

5298 for ii_results, tcounters_static in process_static( 

5299 work_static, request.sources, request.targets, self, 

5300 nthreads=nthreads): 

5301 

5302 tcounters_static_list.append(num.diff(tcounters_static)) 

5303 isource, itarget, result = ii_results 

5304 results_list[isource][itarget] = result 

5305 

5306 if status_callback: 

5307 status_callback(isub, nsub) 

5308 

5309 isub += 1 

5310 

5311 if status_callback: 

5312 status_callback(nsub, nsub) 

5313 

5314 tt1 = time.time() 

5315 if resource: 

5316 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5317 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5318 

5319 s = ProcessingStats() 

5320 

5321 if request.has_dynamic: 

5322 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5323 t_dyn = float(num.sum(tcumu_dyn)) 

5324 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5325 (s.t_perc_get_store_and_receiver, 

5326 s.t_perc_discretize_source, 

5327 s.t_perc_make_base_seismogram, 

5328 s.t_perc_make_same_span, 

5329 s.t_perc_post_process) = perc_dyn 

5330 else: 

5331 t_dyn = 0. 

5332 

5333 if request.has_statics: 

5334 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5335 t_static = num.sum(tcumu_static) 

5336 perc_static = map(float, tcumu_static / t_static * 100.) 

5337 (s.t_perc_static_get_store, 

5338 s.t_perc_static_discretize_basesource, 

5339 s.t_perc_static_sum_statics, 

5340 s.t_perc_static_post_process) = perc_static 

5341 

5342 s.t_wallclock = tt1 - tt0 

5343 if resource: 

5344 s.t_cpu = ( 

5345 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5346 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5347 s.n_read_blocks = ( 

5348 (rs1.ru_inblock + rc1.ru_inblock) - 

5349 (rs0.ru_inblock + rc0.ru_inblock)) 

5350 

5351 n_records_stacked = 0. 

5352 for results in results_list: 

5353 for result in results: 

5354 if not isinstance(result, meta.Result): 

5355 continue 

5356 shr = float(result.n_shared_stacking) 

5357 n_records_stacked += result.n_records_stacked / shr 

5358 s.t_perc_optimize += result.t_optimize / shr 

5359 s.t_perc_stack += result.t_stack / shr 

5360 s.n_records_stacked = int(n_records_stacked) 

5361 if t_dyn != 0.: 

5362 s.t_perc_optimize /= t_dyn * 100 

5363 s.t_perc_stack /= t_dyn * 100 

5364 

5365 return Response( 

5366 request=request, 

5367 results_list=results_list, 

5368 stats=s) 

5369 

5370 

5371class RemoteEngine(Engine): 

5372 ''' 

5373 Client for remote synthetic seismogram calculator. 

5374 ''' 

5375 

5376 site = String.T(default=ws.g_default_site, optional=True) 

5377 url = String.T(default=ws.g_url, optional=True) 

5378 

5379 def process(self, request=None, status_callback=None, **kwargs): 

5380 

5381 if request is None: 

5382 request = Request(**kwargs) 

5383 

5384 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5385 

5386 

5387g_engine = None 

5388 

5389 

5390def get_engine(store_superdirs=[]): 

5391 global g_engine 

5392 if g_engine is None: 

5393 g_engine = LocalEngine(use_env=True, use_config=True) 

5394 

5395 for d in store_superdirs: 

5396 if d not in g_engine.store_superdirs: 

5397 g_engine.store_superdirs.append(d) 

5398 

5399 return g_engine 

5400 

5401 

5402class SourceGroup(Object): 

5403 

5404 def __getattr__(self, k): 

5405 return num.fromiter((getattr(s, k) for s in self), 

5406 dtype=float) 

5407 

5408 def __iter__(self): 

5409 raise NotImplementedError( 

5410 'This method should be implemented in subclass.') 

5411 

5412 def __len__(self): 

5413 raise NotImplementedError( 

5414 'This method should be implemented in subclass.') 

5415 

5416 

5417class SourceList(SourceGroup): 

5418 sources = List.T(Source.T()) 

5419 

5420 def append(self, s): 

5421 self.sources.append(s) 

5422 

5423 def __iter__(self): 

5424 return iter(self.sources) 

5425 

5426 def __len__(self): 

5427 return len(self.sources) 

5428 

5429 

5430class SourceGrid(SourceGroup): 

5431 

5432 base = Source.T() 

5433 variables = Dict.T(String.T(), Range.T()) 

5434 order = List.T(String.T()) 

5435 

5436 def __len__(self): 

5437 n = 1 

5438 for (k, v) in self.make_coords(self.base): 

5439 n *= len(list(v)) 

5440 

5441 return n 

5442 

5443 def __iter__(self): 

5444 for items in permudef(self.make_coords(self.base)): 

5445 s = self.base.clone(**{k: v for (k, v) in items}) 

5446 s.regularize() 

5447 yield s 

5448 

5449 def ordered_params(self): 

5450 ks = list(self.variables.keys()) 

5451 for k in self.order + list(self.base.keys()): 

5452 if k in ks: 

5453 yield k 

5454 ks.remove(k) 

5455 if ks: 

5456 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5457 (ks[0], self.base.__class__.__name__)) 

5458 

5459 def make_coords(self, base): 

5460 return [(param, self.variables[param].make(base=base[param])) 

5461 for param in self.ordered_params()] 

5462 

5463 

5464source_classes = [ 

5465 Source, 

5466 SourceWithMagnitude, 

5467 SourceWithDerivedMagnitude, 

5468 ExplosionSource, 

5469 RectangularExplosionSource, 

5470 DCSource, 

5471 CLVDSource, 

5472 VLVDSource, 

5473 MTSource, 

5474 RectangularSource, 

5475 PseudoDynamicRupture, 

5476 DoubleDCSource, 

5477 RingfaultSource, 

5478 CombiSource, 

5479 SFSource, 

5480 PorePressurePointSource, 

5481 PorePressureLineSource, 

5482] 

5483 

5484stf_classes = [ 

5485 STF, 

5486 BoxcarSTF, 

5487 TriangularSTF, 

5488 HalfSinusoidSTF, 

5489 ResonatorSTF, 

5490] 

5491 

5492__all__ = ''' 

5493SeismosizerError 

5494BadRequest 

5495NoSuchStore 

5496DerivedMagnitudeError 

5497STFMode 

5498'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5499Request 

5500ProcessingStats 

5501Response 

5502Engine 

5503LocalEngine 

5504RemoteEngine 

5505source_classes 

5506get_engine 

5507Range 

5508SourceGroup 

5509SourceList 

5510SourceGrid 

5511map_anchor 

5512'''.split()