1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28 

29from __future__ import absolute_import, division, print_function 

30 

31from collections import defaultdict 

32from functools import cmp_to_key 

33import time 

34import math 

35import os 

36import re 

37import logging 

38try: 

39 import resource 

40except ImportError: 

41 resource = None 

42from hashlib import sha1 

43 

44import numpy as num 

45from scipy.interpolate import RegularGridInterpolator 

46 

47from pyrocko.guts import (Object, Float, String, StringChoice, List, 

48 Timestamp, Int, SObject, ArgumentError, Dict, 

49 ValidationError, Bool) 

50from pyrocko.guts_array import Array 

51 

52from pyrocko import moment_tensor as pmt 

53from pyrocko import trace, util, config, model, eikonal_ext 

54from pyrocko.orthodrome import ne_to_latlon 

55from pyrocko.model import Location 

56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

57 okada_ext, invert_fault_dislocations_bem 

58 

59from . import meta, store, ws 

60from .tractions import TractionField, DirectedTractions 

61from .targets import Target, StaticTarget, SatelliteTarget 

62 

63pjoin = os.path.join 

64 

65guts_prefix = 'pf' 

66 

67d2r = math.pi / 180. 

68r2d = 180. / math.pi 

69km = 1e3 

70 

71logger = logging.getLogger('pyrocko.gf.seismosizer') 

72 

73 

74def cmp_none_aware(a, b): 

75 if isinstance(a, tuple) and isinstance(b, tuple): 

76 for xa, xb in zip(a, b): 

77 rv = cmp_none_aware(xa, xb) 

78 if rv != 0: 

79 return rv 

80 

81 return 0 

82 

83 anone = a is None 

84 bnone = b is None 

85 

86 if anone and bnone: 

87 return 0 

88 

89 if anone: 

90 return -1 

91 

92 if bnone: 

93 return 1 

94 

95 return bool(a > b) - bool(a < b) 

96 

97 

98def xtime(): 

99 return time.time() 

100 

101 

102class SeismosizerError(Exception): 

103 pass 

104 

105 

106class BadRequest(SeismosizerError): 

107 pass 

108 

109 

110class DuplicateStoreId(Exception): 

111 pass 

112 

113 

114class NoDefaultStoreSet(Exception): 

115 pass 

116 

117 

118class ConversionError(Exception): 

119 pass 

120 

121 

122class NoSuchStore(BadRequest): 

123 

124 def __init__(self, store_id=None, dirs=None): 

125 BadRequest.__init__(self) 

126 self.store_id = store_id 

127 self.dirs = dirs 

128 

129 def __str__(self): 

130 if self.store_id is not None: 

131 rstr = 'no GF store with id "%s" found.' % self.store_id 

132 else: 

133 rstr = 'GF store not found.' 

134 

135 if self.dirs is not None: 

136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

137 return rstr 

138 

139 

140def ufloat(s): 

141 units = { 

142 'k': 1e3, 

143 'M': 1e6, 

144 } 

145 

146 factor = 1.0 

147 if s and s[-1] in units: 

148 factor = units[s[-1]] 

149 s = s[:-1] 

150 if not s: 

151 raise ValueError('unit without a number: \'%s\'' % s) 

152 

153 return float(s) * factor 

154 

155 

156def ufloat_or_none(s): 

157 if s: 

158 return ufloat(s) 

159 else: 

160 return None 

161 

162 

163def int_or_none(s): 

164 if s: 

165 return int(s) 

166 else: 

167 return None 

168 

169 

170def nonzero(x, eps=1e-15): 

171 return abs(x) > eps 

172 

173 

174def permudef(ln, j=0): 

175 if j < len(ln): 

176 k, v = ln[j] 

177 for y in v: 

178 ln[j] = k, y 

179 for s in permudef(ln, j + 1): 

180 yield s 

181 

182 ln[j] = k, v 

183 return 

184 else: 

185 yield ln 

186 

187 

188def arr(x): 

189 return num.atleast_1d(num.asarray(x)) 

190 

191 

192def discretize_rect_source(deltas, deltat, time, north, east, depth, 

193 strike, dip, length, width, 

194 anchor, velocity=None, stf=None, 

195 nucleation_x=None, nucleation_y=None, 

196 decimation_factor=1, pointsonly=False, 

197 plane_coords=False, 

198 aggressive_oversampling=False): 

199 

200 if stf is None: 

201 stf = STF() 

202 

203 if not velocity and not pointsonly: 

204 raise AttributeError('velocity is required in time mode') 

205 

206 mindeltagf = float(num.min(deltas)) 

207 if velocity: 

208 mindeltagf = min(mindeltagf, deltat * velocity) 

209 

210 ln = length 

211 wd = width 

212 

213 if aggressive_oversampling: 

214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 else: 

217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

219 

220 n = int(nl * nw) 

221 

222 dl = ln / nl 

223 dw = wd / nw 

224 

225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

227 

228 points = num.zeros((n, 3)) 

229 points[:, 0] = num.tile(xl, nw) 

230 points[:, 1] = num.repeat(xw, nl) 

231 

232 if nucleation_x is not None: 

233 dist_x = num.abs(nucleation_x - points[:, 0]) 

234 else: 

235 dist_x = num.zeros(n) 

236 

237 if nucleation_y is not None: 

238 dist_y = num.abs(nucleation_y - points[:, 1]) 

239 else: 

240 dist_y = num.zeros(n) 

241 

242 dist = num.sqrt(dist_x**2 + dist_y**2) 

243 times = dist / velocity 

244 

245 anch_x, anch_y = map_anchor[anchor] 

246 

247 points[:, 0] -= anch_x * 0.5 * length 

248 points[:, 1] -= anch_y * 0.5 * width 

249 

250 if plane_coords: 

251 return points, dl, dw, nl, nw 

252 

253 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

254 points = num.dot(rotmat.T, points.T).T 

255 

256 points[:, 0] += north 

257 points[:, 1] += east 

258 points[:, 2] += depth 

259 

260 if pointsonly: 

261 return points, dl, dw, nl, nw 

262 

263 xtau, amplitudes = stf.discretize_t(deltat, time) 

264 nt = xtau.size 

265 

266 points2 = num.repeat(points, nt, axis=0) 

267 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

268 amplitudes2 = num.tile(amplitudes, n) 

269 

270 return points2, times2, amplitudes2, dl, dw, nl, nw 

271 

272 

273def check_rect_source_discretisation(points2, nl, nw, store): 

274 # We assume a non-rotated fault plane 

275 N_CRITICAL = 8 

276 points = points2.T.reshape((3, nl, nw)) 

277 if points.size <= N_CRITICAL: 

278 logger.warning('RectangularSource is defined by only %d sub-sources!' 

279 % points.size) 

280 return True 

281 

282 distances = num.sqrt( 

283 (points[0, 0, :] - points[0, 1, :])**2 + 

284 (points[1, 0, :] - points[1, 1, :])**2 + 

285 (points[2, 0, :] - points[2, 1, :])**2) 

286 

287 depths = points[2, 0, :] 

288 vs_profile = store.config.get_vs( 

289 lat=0., lon=0., 

290 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

291 interpolation='multilinear') 

292 

293 min_wavelength = vs_profile * (store.config.deltat * 2) 

294 if not num.all(min_wavelength > distances / 2): 

295 return False 

296 return True 

297 

298 

299def outline_rect_source(strike, dip, length, width, anchor): 

300 ln = length 

301 wd = width 

302 points = num.array( 

303 [[-0.5 * ln, -0.5 * wd, 0.], 

304 [0.5 * ln, -0.5 * wd, 0.], 

305 [0.5 * ln, 0.5 * wd, 0.], 

306 [-0.5 * ln, 0.5 * wd, 0.], 

307 [-0.5 * ln, -0.5 * wd, 0.]]) 

308 

309 anch_x, anch_y = map_anchor[anchor] 

310 points[:, 0] -= anch_x * 0.5 * length 

311 points[:, 1] -= anch_y * 0.5 * width 

312 

313 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

314 

315 return num.dot(rotmat.T, points.T).T 

316 

317 

318def from_plane_coords( 

319 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

320 lat=0., lon=0., 

321 north_shift=0, east_shift=0, 

322 anchor='top', cs='xy'): 

323 

324 ln = length 

325 wd = width 

326 x_abs = [] 

327 y_abs = [] 

328 if not isinstance(x_plane_coords, list): 

329 x_plane_coords = [x_plane_coords] 

330 y_plane_coords = [y_plane_coords] 

331 

332 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

333 points = num.array( 

334 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

335 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

336 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

337 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

338 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

339 

340 anch_x, anch_y = map_anchor[anchor] 

341 points[:, 0] -= anch_x * 0.5 * length 

342 points[:, 1] -= anch_y * 0.5 * width 

343 

344 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

345 

346 points = num.dot(rotmat.T, points.T).T 

347 points[:, 0] += north_shift 

348 points[:, 1] += east_shift 

349 points[:, 2] += depth 

350 if cs in ('latlon', 'lonlat'): 

351 latlon = ne_to_latlon(lat, lon, 

352 points[:, 0], points[:, 1]) 

353 latlon = num.array(latlon).T 

354 x_abs.append(latlon[1:2, 1]) 

355 y_abs.append(latlon[2:3, 0]) 

356 if cs == 'xy': 

357 x_abs.append(points[1:2, 1]) 

358 y_abs.append(points[2:3, 0]) 

359 

360 if cs == 'lonlat': 

361 return y_abs, x_abs 

362 else: 

363 return x_abs, y_abs 

364 

365 

366def points_on_rect_source( 

367 strike, dip, length, width, anchor, 

368 discretized_basesource=None, points_x=None, points_y=None): 

369 

370 ln = length 

371 wd = width 

372 

373 if isinstance(points_x, list) or isinstance(points_x, float): 

374 points_x = num.array([points_x]) 

375 if isinstance(points_y, list) or isinstance(points_y, float): 

376 points_y = num.array([points_y]) 

377 

378 if discretized_basesource: 

379 ds = discretized_basesource 

380 

381 nl_patches = ds.nl + 1 

382 nw_patches = ds.nw + 1 

383 

384 npoints = nl_patches * nw_patches 

385 points = num.zeros((npoints, 3)) 

386 ln_patches = num.array([il for il in range(nl_patches)]) 

387 wd_patches = num.array([iw for iw in range(nw_patches)]) 

388 

389 points_ln =\ 

390 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

391 points_wd =\ 

392 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

393 

394 for il in range(nl_patches): 

395 for iw in range(nw_patches): 

396 points[il * nw_patches + iw, :] = num.array([ 

397 points_ln[il] * ln * 0.5, 

398 points_wd[iw] * wd * 0.5, 0.0]) 

399 

400 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

401 points = num.zeros(shape=((len(points_x), 3))) 

402 for i, (x, y) in enumerate(zip(points_x, points_y)): 

403 points[i, :] = num.array( 

404 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

405 

406 anch_x, anch_y = map_anchor[anchor] 

407 

408 points[:, 0] -= anch_x * 0.5 * ln 

409 points[:, 1] -= anch_y * 0.5 * wd 

410 

411 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

412 

413 return num.dot(rotmat.T, points.T).T 

414 

415 

416class InvalidGridDef(Exception): 

417 pass 

418 

419 

420class Range(SObject): 

421 ''' 

422 Convenient range specification. 

423 

424 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

425 

426 Range('0 .. 10k : 1k') 

427 Range(start=0., stop=10e3, step=1e3) 

428 Range(0, 10e3, 1e3) 

429 Range('0 .. 10k @ 11') 

430 Range(start=0., stop=10*km, n=11) 

431 

432 Range(0, 10e3, n=11) 

433 Range(values=[x*1e3 for x in range(11)]) 

434 

435 Depending on the use context, it can be possible to omit any part of the 

436 specification. E.g. in the context of extracting a subset of an already 

437 existing range, the existing range's specification values would be filled 

438 in where missing. 

439 

440 The values are distributed with equal spacing, unless the ``spacing`` 

441 argument is modified. The values can be created offset or relative to an 

442 external base value with the ``relative`` argument if the use context 

443 supports this. 

444 

445 The range specification can be expressed with a short string 

446 representation:: 

447 

448 'start .. stop @ num | spacing, relative' 

449 'start .. stop : step | spacing, relative' 

450 

451 most parts of the expression can be omitted if not needed. Whitespace is 

452 allowed for readability but can also be omitted. 

453 ''' 

454 

455 start = Float.T(optional=True) 

456 stop = Float.T(optional=True) 

457 step = Float.T(optional=True) 

458 n = Int.T(optional=True) 

459 values = Array.T(optional=True, dtype=float, shape=(None,)) 

460 

461 spacing = StringChoice.T( 

462 choices=['lin', 'log', 'symlog'], 

463 default='lin', 

464 optional=True) 

465 

466 relative = StringChoice.T( 

467 choices=['', 'add', 'mult'], 

468 default='', 

469 optional=True) 

470 

471 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

472 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

473 r'(\|(?P<stuff>.+))?$') 

474 

475 def __init__(self, *args, **kwargs): 

476 d = {} 

477 if len(args) == 1: 

478 d = self.parse(args[0]) 

479 elif len(args) in (2, 3): 

480 d['start'], d['stop'] = [float(x) for x in args[:2]] 

481 if len(args) == 3: 

482 d['step'] = float(args[2]) 

483 

484 for k, v in kwargs.items(): 

485 if k in d: 

486 raise ArgumentError('%s specified more than once' % k) 

487 

488 d[k] = v 

489 

490 SObject.__init__(self, **d) 

491 

492 def __str__(self): 

493 def sfloat(x): 

494 if x is not None: 

495 return '%g' % x 

496 else: 

497 return '' 

498 

499 if self.values: 

500 return ','.join('%g' % x for x in self.values) 

501 

502 if self.start is None and self.stop is None: 

503 s0 = '' 

504 else: 

505 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

506 

507 s1 = '' 

508 if self.step is not None: 

509 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

510 elif self.n is not None: 

511 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

512 

513 if self.spacing == 'lin' and self.relative == '': 

514 s2 = '' 

515 else: 

516 x = [] 

517 if self.spacing != 'lin': 

518 x.append(self.spacing) 

519 

520 if self.relative != '': 

521 x.append(self.relative) 

522 

523 s2 = ' | %s' % ','.join(x) 

524 

525 return s0 + s1 + s2 

526 

527 @classmethod 

528 def parse(cls, s): 

529 s = re.sub(r'\s+', '', s) 

530 m = cls.pattern.match(s) 

531 if not m: 

532 try: 

533 vals = [ufloat(x) for x in s.split(',')] 

534 except Exception: 

535 raise InvalidGridDef( 

536 '"%s" is not a valid range specification' % s) 

537 

538 return dict(values=num.array(vals, dtype=float)) 

539 

540 d = m.groupdict() 

541 try: 

542 start = ufloat_or_none(d['start']) 

543 stop = ufloat_or_none(d['stop']) 

544 step = ufloat_or_none(d['step']) 

545 n = int_or_none(d['n']) 

546 except Exception: 

547 raise InvalidGridDef( 

548 '"%s" is not a valid range specification' % s) 

549 

550 spacing = 'lin' 

551 relative = '' 

552 

553 if d['stuff'] is not None: 

554 t = d['stuff'].split(',') 

555 for x in t: 

556 if x in cls.spacing.choices: 

557 spacing = x 

558 elif x and x in cls.relative.choices: 

559 relative = x 

560 else: 

561 raise InvalidGridDef( 

562 '"%s" is not a valid range specification' % s) 

563 

564 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

565 relative=relative) 

566 

567 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

568 if self.values: 

569 return self.values 

570 

571 start = self.start 

572 stop = self.stop 

573 step = self.step 

574 n = self.n 

575 

576 swap = step is not None and step < 0. 

577 if start is None: 

578 start = [mi, ma][swap] 

579 if stop is None: 

580 stop = [ma, mi][swap] 

581 if step is None and inc is not None: 

582 step = [inc, -inc][ma < mi] 

583 

584 if start is None or stop is None: 

585 raise InvalidGridDef( 

586 'Cannot use range specification "%s" without start ' 

587 'and stop in this context' % self) 

588 

589 if step is None and n is None: 

590 step = stop - start 

591 

592 if n is None: 

593 if (step < 0) != (stop - start < 0): 

594 raise InvalidGridDef( 

595 'Range specification "%s" has inconsistent ordering ' 

596 '(step < 0 => stop > start)' % self) 

597 

598 n = int(round((stop - start) / step)) + 1 

599 stop2 = start + (n - 1) * step 

600 if abs(stop - stop2) > eps: 

601 n = int(math.floor((stop - start) / step)) + 1 

602 stop = start + (n - 1) * step 

603 else: 

604 stop = stop2 

605 

606 if start == stop: 

607 n = 1 

608 

609 if self.spacing == 'lin': 

610 vals = num.linspace(start, stop, n) 

611 

612 elif self.spacing in ('log', 'symlog'): 

613 if start > 0. and stop > 0.: 

614 vals = num.exp(num.linspace(num.log(start), 

615 num.log(stop), n)) 

616 elif start < 0. and stop < 0.: 

617 vals = -num.exp(num.linspace(num.log(-start), 

618 num.log(-stop), n)) 

619 else: 

620 raise InvalidGridDef( 

621 'Log ranges should not include or cross zero ' 

622 '(in range specification "%s").' % self) 

623 

624 if self.spacing == 'symlog': 

625 nvals = - vals 

626 vals = num.concatenate((nvals[::-1], vals)) 

627 

628 if self.relative in ('add', 'mult') and base is None: 

629 raise InvalidGridDef( 

630 'Cannot use relative range specification in this context.') 

631 

632 vals = self.make_relative(base, vals) 

633 

634 return list(map(float, vals)) 

635 

636 def make_relative(self, base, vals): 

637 if self.relative == 'add': 

638 vals += base 

639 

640 if self.relative == 'mult': 

641 vals *= base 

642 

643 return vals 

644 

645 

646class GridDefElement(Object): 

647 

648 param = meta.StringID.T() 

649 rs = Range.T() 

650 

651 def __init__(self, shorthand=None, **kwargs): 

652 if shorthand is not None: 

653 t = shorthand.split('=') 

654 if len(t) != 2: 

655 raise InvalidGridDef( 

656 'Invalid grid specification element: %s' % shorthand) 

657 

658 sp, sr = t[0].strip(), t[1].strip() 

659 

660 kwargs['param'] = sp 

661 kwargs['rs'] = Range(sr) 

662 

663 Object.__init__(self, **kwargs) 

664 

665 def shorthand(self): 

666 return self.param + ' = ' + str(self.rs) 

667 

668 

669class GridDef(Object): 

670 

671 elements = List.T(GridDefElement.T()) 

672 

673 def __init__(self, shorthand=None, **kwargs): 

674 if shorthand is not None: 

675 t = shorthand.splitlines() 

676 tt = [] 

677 for x in t: 

678 x = x.strip() 

679 if x: 

680 tt.extend(x.split(';')) 

681 

682 elements = [] 

683 for se in tt: 

684 elements.append(GridDef(se)) 

685 

686 kwargs['elements'] = elements 

687 

688 Object.__init__(self, **kwargs) 

689 

690 def shorthand(self): 

691 return '; '.join(str(x) for x in self.elements) 

692 

693 

694class Cloneable(object): 

695 

696 def __iter__(self): 

697 return iter(self.T.propnames) 

698 

699 def __getitem__(self, k): 

700 if k not in self.keys(): 

701 raise KeyError(k) 

702 

703 return getattr(self, k) 

704 

705 def __setitem__(self, k, v): 

706 if k not in self.keys(): 

707 raise KeyError(k) 

708 

709 return setattr(self, k, v) 

710 

711 def clone(self, **kwargs): 

712 ''' 

713 Make a copy of the object. 

714 

715 A new object of the same class is created and initialized with the 

716 parameters of the object on which this method is called on. If 

717 ``kwargs`` are given, these are used to override any of the 

718 initialization parameters. 

719 ''' 

720 

721 d = dict(self) 

722 for k in d: 

723 v = d[k] 

724 if isinstance(v, Cloneable): 

725 d[k] = v.clone() 

726 

727 d.update(kwargs) 

728 return self.__class__(**d) 

729 

730 @classmethod 

731 def keys(cls): 

732 ''' 

733 Get list of the source model's parameter names. 

734 ''' 

735 

736 return cls.T.propnames 

737 

738 

739class STF(Object, Cloneable): 

740 

741 ''' 

742 Base class for source time functions. 

743 ''' 

744 

745 def __init__(self, effective_duration=None, **kwargs): 

746 if effective_duration is not None: 

747 kwargs['duration'] = effective_duration / \ 

748 self.factor_duration_to_effective() 

749 

750 Object.__init__(self, **kwargs) 

751 

752 @classmethod 

753 def factor_duration_to_effective(cls): 

754 return 1.0 

755 

756 def centroid_time(self, tref): 

757 return tref 

758 

759 @property 

760 def effective_duration(self): 

761 return self.duration * self.factor_duration_to_effective() 

762 

763 def discretize_t(self, deltat, tref): 

764 tl = math.floor(tref / deltat) * deltat 

765 th = math.ceil(tref / deltat) * deltat 

766 if tl == th: 

767 return num.array([tl], dtype=float), num.ones(1) 

768 else: 

769 return ( 

770 num.array([tl, th], dtype=float), 

771 num.array([th - tref, tref - tl], dtype=float) / deltat) 

772 

773 def base_key(self): 

774 return (type(self).__name__,) 

775 

776 

777g_unit_pulse = STF() 

778 

779 

780def sshift(times, amplitudes, tshift, deltat): 

781 

782 t0 = math.floor(tshift / deltat) * deltat 

783 t1 = math.ceil(tshift / deltat) * deltat 

784 if t0 == t1: 

785 return times, amplitudes 

786 

787 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

788 

789 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

790 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

791 

792 times2 = num.arange(times.size + 1, dtype=float) * \ 

793 deltat + times[0] + t0 

794 

795 return times2, amplitudes2 

796 

797 

798class BoxcarSTF(STF): 

799 

800 ''' 

801 Boxcar type source time function. 

802 

803 .. figure :: /static/stf-BoxcarSTF.svg 

804 :width: 40% 

805 :align: center 

806 :alt: boxcar source time function 

807 ''' 

808 

809 duration = Float.T( 

810 default=0.0, 

811 help='duration of the boxcar') 

812 

813 anchor = Float.T( 

814 default=0.0, 

815 help='anchor point with respect to source.time: (' 

816 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

817 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

818 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

819 

820 @classmethod 

821 def factor_duration_to_effective(cls): 

822 return 1.0 

823 

824 def centroid_time(self, tref): 

825 return tref - 0.5 * self.duration * self.anchor 

826 

827 def discretize_t(self, deltat, tref): 

828 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

829 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

830 tmin = round(tmin_stf / deltat) * deltat 

831 tmax = round(tmax_stf / deltat) * deltat 

832 nt = int(round((tmax - tmin) / deltat)) + 1 

833 times = num.linspace(tmin, tmax, nt) 

834 amplitudes = num.ones_like(times) 

835 if times.size > 1: 

836 t_edges = num.linspace( 

837 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

838 t = tmin_stf + self.duration * num.array( 

839 [0.0, 0.0, 1.0, 1.0], dtype=float) 

840 f = num.array([0., 1., 1., 0.], dtype=float) 

841 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

842 amplitudes /= num.sum(amplitudes) 

843 

844 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

845 

846 return sshift(times, amplitudes, -tshift, deltat) 

847 

848 def base_key(self): 

849 return (type(self).__name__, self.duration, self.anchor) 

850 

851 

852class TriangularSTF(STF): 

853 

854 ''' 

855 Triangular type source time function. 

856 

857 .. figure :: /static/stf-TriangularSTF.svg 

858 :width: 40% 

859 :align: center 

860 :alt: triangular source time function 

861 ''' 

862 

863 duration = Float.T( 

864 default=0.0, 

865 help='baseline of the triangle') 

866 

867 peak_ratio = Float.T( 

868 default=0.5, 

869 help='fraction of time compared to duration, ' 

870 'when the maximum amplitude is reached') 

871 

872 anchor = Float.T( 

873 default=0.0, 

874 help='anchor point with respect to source.time: (' 

875 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

876 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

877 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

878 

879 @classmethod 

880 def factor_duration_to_effective(cls, peak_ratio=None): 

881 if peak_ratio is None: 

882 peak_ratio = cls.peak_ratio.default() 

883 

884 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

885 

886 def __init__(self, effective_duration=None, **kwargs): 

887 if effective_duration is not None: 

888 kwargs['duration'] = effective_duration / \ 

889 self.factor_duration_to_effective( 

890 kwargs.get('peak_ratio', None)) 

891 

892 STF.__init__(self, **kwargs) 

893 

894 @property 

895 def centroid_ratio(self): 

896 ra = self.peak_ratio 

897 rb = 1.0 - ra 

898 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

899 

900 def centroid_time(self, tref): 

901 ca = self.centroid_ratio 

902 cb = 1.0 - ca 

903 if self.anchor <= 0.: 

904 return tref - ca * self.duration * self.anchor 

905 else: 

906 return tref - cb * self.duration * self.anchor 

907 

908 @property 

909 def effective_duration(self): 

910 return self.duration * self.factor_duration_to_effective( 

911 self.peak_ratio) 

912 

913 def tminmax_stf(self, tref): 

914 ca = self.centroid_ratio 

915 cb = 1.0 - ca 

916 if self.anchor <= 0.: 

917 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

918 tmax_stf = tmin_stf + self.duration 

919 else: 

920 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

921 tmin_stf = tmax_stf - self.duration 

922 

923 return tmin_stf, tmax_stf 

924 

925 def discretize_t(self, deltat, tref): 

926 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

927 

928 tmin = round(tmin_stf / deltat) * deltat 

929 tmax = round(tmax_stf / deltat) * deltat 

930 nt = int(round((tmax - tmin) / deltat)) + 1 

931 if nt > 1: 

932 t_edges = num.linspace( 

933 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

934 t = tmin_stf + self.duration * num.array( 

935 [0.0, self.peak_ratio, 1.0], dtype=float) 

936 f = num.array([0., 1., 0.], dtype=float) 

937 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

938 amplitudes /= num.sum(amplitudes) 

939 else: 

940 amplitudes = num.ones(1) 

941 

942 times = num.linspace(tmin, tmax, nt) 

943 return times, amplitudes 

944 

945 def base_key(self): 

946 return ( 

947 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

948 

949 

950class HalfSinusoidSTF(STF): 

951 

952 ''' 

953 Half sinusoid type source time function. 

954 

955 .. figure :: /static/stf-HalfSinusoidSTF.svg 

956 :width: 40% 

957 :align: center 

958 :alt: half-sinusouid source time function 

959 ''' 

960 

961 duration = Float.T( 

962 default=0.0, 

963 help='duration of the half-sinusoid (baseline)') 

964 

965 anchor = Float.T( 

966 default=0.0, 

967 help='anchor point with respect to source.time: (' 

968 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

969 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

970 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

971 

972 exponent = Int.T( 

973 default=1, 

974 help='set to 2 to use square of the half-period sinusoidal function.') 

975 

976 def __init__(self, effective_duration=None, **kwargs): 

977 if effective_duration is not None: 

978 kwargs['duration'] = effective_duration / \ 

979 self.factor_duration_to_effective( 

980 kwargs.get('exponent', 1)) 

981 

982 STF.__init__(self, **kwargs) 

983 

984 @classmethod 

985 def factor_duration_to_effective(cls, exponent): 

986 if exponent == 1: 

987 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

988 elif exponent == 2: 

989 return math.sqrt(math.pi**2 - 6) / math.pi 

990 else: 

991 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

992 

993 @property 

994 def effective_duration(self): 

995 return self.duration * self.factor_duration_to_effective(self.exponent) 

996 

997 def centroid_time(self, tref): 

998 return tref - 0.5 * self.duration * self.anchor 

999 

1000 def discretize_t(self, deltat, tref): 

1001 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1002 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1003 tmin = round(tmin_stf / deltat) * deltat 

1004 tmax = round(tmax_stf / deltat) * deltat 

1005 nt = int(round((tmax - tmin) / deltat)) + 1 

1006 if nt > 1: 

1007 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1008 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1009 

1010 if self.exponent == 1: 

1011 fint = -num.cos( 

1012 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1013 

1014 elif self.exponent == 2: 

1015 fint = (t_edges - tmin_stf) / self.duration \ 

1016 - 1.0 / (2.0 * math.pi) * num.sin( 

1017 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1018 else: 

1019 raise ValueError( 

1020 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1021 

1022 amplitudes = fint[1:] - fint[:-1] 

1023 amplitudes /= num.sum(amplitudes) 

1024 else: 

1025 amplitudes = num.ones(1) 

1026 

1027 times = num.linspace(tmin, tmax, nt) 

1028 return times, amplitudes 

1029 

1030 def base_key(self): 

1031 return (type(self).__name__, self.duration, self.anchor) 

1032 

1033 

1034class SmoothRampSTF(STF): 

1035 ''' 

1036 Smooth-ramp type source time function for near-field displacement. 

1037 Based on moment function of double-couple point source proposed by Bruestle 

1038 and Mueller (PEPI, 1983). 

1039 

1040 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1041 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1042 312-324. 

1043 

1044 .. figure :: /static/stf-SmoothRampSTF.svg 

1045 :width: 40% 

1046 :alt: smooth ramp source time function 

1047 ''' 

1048 duration = Float.T( 

1049 default=0.0, 

1050 help='duration of the ramp (baseline)') 

1051 

1052 rise_ratio = Float.T( 

1053 default=0.5, 

1054 help='fraction of time compared to duration, ' 

1055 'when the maximum amplitude is reached') 

1056 

1057 anchor = Float.T( 

1058 default=0.0, 

1059 help='anchor point with respect to source.time: (' 

1060 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1061 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1062 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1063 

1064 def discretize_t(self, deltat, tref): 

1065 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1066 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1067 tmin = round(tmin_stf / deltat) * deltat 

1068 tmax = round(tmax_stf / deltat) * deltat 

1069 D = round((tmax - tmin) / deltat) * deltat 

1070 nt = int(round(D / deltat)) + 1 

1071 times = num.linspace(tmin, tmax, nt) 

1072 if nt > 1: 

1073 rise_time = self.rise_ratio * self.duration 

1074 amplitudes = num.ones_like(times) 

1075 tp = tmin + rise_time 

1076 ii = num.where(times <= tp) 

1077 t_inc = times[ii] 

1078 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1079 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1080 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1081 

1082 amplitudes /= num.sum(amplitudes) 

1083 else: 

1084 amplitudes = num.ones(1) 

1085 

1086 return times, amplitudes 

1087 

1088 def base_key(self): 

1089 return (type(self).__name__, 

1090 self.duration, self.rise_ratio, self.anchor) 

1091 

1092 

1093class ResonatorSTF(STF): 

1094 ''' 

1095 Simple resonator like source time function. 

1096 

1097 .. math :: 

1098 

1099 f(t) = 0 for t < 0 

1100 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1101 

1102 

1103 .. figure :: /static/stf-SmoothRampSTF.svg 

1104 :width: 40% 

1105 :alt: smooth ramp source time function 

1106 

1107 ''' 

1108 

1109 duration = Float.T( 

1110 default=0.0, 

1111 help='decay time') 

1112 

1113 frequency = Float.T( 

1114 default=1.0, 

1115 help='resonance frequency') 

1116 

1117 def discretize_t(self, deltat, tref): 

1118 tmin_stf = tref 

1119 tmax_stf = tref + self.duration * 3 

1120 tmin = math.floor(tmin_stf / deltat) * deltat 

1121 tmax = math.ceil(tmax_stf / deltat) * deltat 

1122 times = util.arange2(tmin, tmax, deltat) 

1123 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1124 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1125 

1126 return times, amplitudes 

1127 

1128 def base_key(self): 

1129 return (type(self).__name__, 

1130 self.duration, self.frequency) 

1131 

1132 

1133class STFMode(StringChoice): 

1134 choices = ['pre', 'post'] 

1135 

1136 

1137class Source(Location, Cloneable): 

1138 ''' 

1139 Base class for all source models. 

1140 ''' 

1141 

1142 name = String.T(optional=True, default='') 

1143 

1144 time = Timestamp.T( 

1145 default=Timestamp.D('1970-01-01 00:00:00'), 

1146 help='source origin time.') 

1147 

1148 stf = STF.T( 

1149 optional=True, 

1150 help='source time function.') 

1151 

1152 stf_mode = STFMode.T( 

1153 default='post', 

1154 help='whether to apply source time function in pre or ' 

1155 'post-processing.') 

1156 

1157 def __init__(self, **kwargs): 

1158 Location.__init__(self, **kwargs) 

1159 

1160 def update(self, **kwargs): 

1161 ''' 

1162 Change some of the source models parameters. 

1163 

1164 Example:: 

1165 

1166 >>> from pyrocko import gf 

1167 >>> s = gf.DCSource() 

1168 >>> s.update(strike=66., dip=33.) 

1169 >>> print(s) 

1170 --- !pf.DCSource 

1171 depth: 0.0 

1172 time: 1970-01-01 00:00:00 

1173 magnitude: 6.0 

1174 strike: 66.0 

1175 dip: 33.0 

1176 rake: 0.0 

1177 

1178 ''' 

1179 

1180 for (k, v) in kwargs.items(): 

1181 self[k] = v 

1182 

1183 def grid(self, **variables): 

1184 ''' 

1185 Create grid of source model variations. 

1186 

1187 :returns: :py:class:`SourceGrid` instance. 

1188 

1189 Example:: 

1190 

1191 >>> from pyrocko import gf 

1192 >>> base = DCSource() 

1193 >>> R = gf.Range 

1194 >>> for s in base.grid(R(' 

1195 

1196 ''' 

1197 return SourceGrid(base=self, variables=variables) 

1198 

1199 def base_key(self): 

1200 ''' 

1201 Get key to decide about source discretization / GF stack sharing. 

1202 

1203 When two source models differ only in amplitude and origin time, the 

1204 discretization and the GF stacking can be done only once for a unit 

1205 amplitude and a zero origin time and the amplitude and origin times of 

1206 the seismograms can be applied during post-processing of the synthetic 

1207 seismogram. 

1208 

1209 For any derived parameterized source model, this method is called to 

1210 decide if discretization and stacking of the source should be shared. 

1211 When two source models return an equal vector of values discretization 

1212 is shared. 

1213 ''' 

1214 return (self.depth, self.lat, self.north_shift, 

1215 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1216 self.effective_stf_pre().base_key() 

1217 

1218 def get_factor(self): 

1219 ''' 

1220 Get the scaling factor to be applied during post-processing. 

1221 

1222 Discretization of the base seismogram is usually done for a unit 

1223 amplitude, because a common factor can be efficiently multiplied to 

1224 final seismograms. This eliminates to do repeat the stacking when 

1225 creating seismograms for a series of source models only differing in 

1226 amplitude. 

1227 

1228 This method should return the scaling factor to apply in the 

1229 post-processing (often this is simply the scalar moment of the source). 

1230 ''' 

1231 

1232 return 1.0 

1233 

1234 def effective_stf_pre(self): 

1235 ''' 

1236 Return the STF applied before stacking of the Green's functions. 

1237 

1238 This STF is used during discretization of the parameterized source 

1239 models, i.e. to produce a temporal distribution of point sources. 

1240 

1241 Handling of the STF before stacking of the GFs is less efficient but 

1242 allows to use different source time functions for different parts of 

1243 the source. 

1244 ''' 

1245 

1246 if self.stf is not None and self.stf_mode == 'pre': 

1247 return self.stf 

1248 else: 

1249 return g_unit_pulse 

1250 

1251 def effective_stf_post(self): 

1252 ''' 

1253 Return the STF applied after stacking of the Green's fuctions. 

1254 

1255 This STF is used in the post-processing of the synthetic seismograms. 

1256 

1257 Handling of the STF after stacking of the GFs is usually more efficient 

1258 but is only possible when a common STF is used for all subsources. 

1259 ''' 

1260 

1261 if self.stf is not None and self.stf_mode == 'post': 

1262 return self.stf 

1263 else: 

1264 return g_unit_pulse 

1265 

1266 def _dparams_base(self): 

1267 return dict(times=arr(self.time), 

1268 lat=self.lat, lon=self.lon, 

1269 north_shifts=arr(self.north_shift), 

1270 east_shifts=arr(self.east_shift), 

1271 depths=arr(self.depth)) 

1272 

1273 def _hash(self): 

1274 sha = sha1() 

1275 for k in self.base_key(): 

1276 sha.update(str(k).encode()) 

1277 return sha.hexdigest() 

1278 

1279 def _dparams_base_repeated(self, times): 

1280 if times is None: 

1281 return self._dparams_base() 

1282 

1283 nt = times.size 

1284 north_shifts = num.repeat(self.north_shift, nt) 

1285 east_shifts = num.repeat(self.east_shift, nt) 

1286 depths = num.repeat(self.depth, nt) 

1287 return dict(times=times, 

1288 lat=self.lat, lon=self.lon, 

1289 north_shifts=north_shifts, 

1290 east_shifts=east_shifts, 

1291 depths=depths) 

1292 

1293 def pyrocko_event(self, store=None, target=None, **kwargs): 

1294 duration = None 

1295 if self.stf: 

1296 duration = self.stf.effective_duration 

1297 

1298 return model.Event( 

1299 lat=self.lat, 

1300 lon=self.lon, 

1301 north_shift=self.north_shift, 

1302 east_shift=self.east_shift, 

1303 time=self.time, 

1304 name=self.name, 

1305 depth=self.depth, 

1306 duration=duration, 

1307 **kwargs) 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 except meta.OutOfBounds: 

1527 raise DerivedMagnitudeError( 

1528 'Could not get shear modulus at source position.') 

1529 

1530 return float(3. * shear_moduli) 

1531 

1532 def get_factor(self): 

1533 return 1.0 

1534 

1535 def discretize_basesource(self, store, target=None): 

1536 times, amplitudes = self.effective_stf_pre().discretize_t( 

1537 store.config.deltat, self.time) 

1538 

1539 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1540 

1541 if self.volume_change is not None: 

1542 if self.volume_change < 0.: 

1543 amplitudes *= -1 

1544 

1545 return meta.DiscretizedExplosionSource( 

1546 m0s=amplitudes, 

1547 **self._dparams_base_repeated(times)) 

1548 

1549 def pyrocko_moment_tensor(self, store=None, target=None): 

1550 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1551 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1552 

1553 

1554class RectangularExplosionSource(ExplosionSource): 

1555 ''' 

1556 Rectangular or line explosion source. 

1557 ''' 

1558 

1559 discretized_source_class = meta.DiscretizedExplosionSource 

1560 

1561 strike = Float.T( 

1562 default=0.0, 

1563 help='strike direction in [deg], measured clockwise from north') 

1564 

1565 dip = Float.T( 

1566 default=90.0, 

1567 help='dip angle in [deg], measured downward from horizontal') 

1568 

1569 length = Float.T( 

1570 default=0., 

1571 help='length of rectangular source area [m]') 

1572 

1573 width = Float.T( 

1574 default=0., 

1575 help='width of rectangular source area [m]') 

1576 

1577 anchor = StringChoice.T( 

1578 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1579 'bottom_left', 'bottom_right'], 

1580 default='center', 

1581 optional=True, 

1582 help='Anchor point for positioning the plane, can be: top, center or' 

1583 'bottom and also top_left, top_right,bottom_left,' 

1584 'bottom_right, center_left and center right') 

1585 

1586 nucleation_x = Float.T( 

1587 optional=True, 

1588 help='horizontal position of rupture nucleation in normalized fault ' 

1589 'plane coordinates (-1 = left edge, +1 = right edge)') 

1590 

1591 nucleation_y = Float.T( 

1592 optional=True, 

1593 help='down-dip position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1595 

1596 velocity = Float.T( 

1597 default=3500., 

1598 help='speed of explosion front [m/s]') 

1599 

1600 aggressive_oversampling = Bool.T( 

1601 default=False, 

1602 help='Aggressive oversampling for basesource discretization. ' 

1603 'When using \'multilinear\' interpolation oversampling has' 

1604 ' practically no effect.') 

1605 

1606 def base_key(self): 

1607 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1608 self.width, self.nucleation_x, 

1609 self.nucleation_y, self.velocity, 

1610 self.anchor) 

1611 

1612 def discretize_basesource(self, store, target=None): 

1613 

1614 if self.nucleation_x is not None: 

1615 nucx = self.nucleation_x * 0.5 * self.length 

1616 else: 

1617 nucx = None 

1618 

1619 if self.nucleation_y is not None: 

1620 nucy = self.nucleation_y * 0.5 * self.width 

1621 else: 

1622 nucy = None 

1623 

1624 stf = self.effective_stf_pre() 

1625 

1626 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1627 store.config.deltas, store.config.deltat, 

1628 self.time, self.north_shift, self.east_shift, self.depth, 

1629 self.strike, self.dip, self.length, self.width, self.anchor, 

1630 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1631 

1632 amplitudes /= num.sum(amplitudes) 

1633 amplitudes *= self.get_moment(store, target) 

1634 

1635 return meta.DiscretizedExplosionSource( 

1636 lat=self.lat, 

1637 lon=self.lon, 

1638 times=times, 

1639 north_shifts=points[:, 0], 

1640 east_shifts=points[:, 1], 

1641 depths=points[:, 2], 

1642 m0s=amplitudes) 

1643 

1644 def outline(self, cs='xyz'): 

1645 points = outline_rect_source(self.strike, self.dip, self.length, 

1646 self.width, self.anchor) 

1647 

1648 points[:, 0] += self.north_shift 

1649 points[:, 1] += self.east_shift 

1650 points[:, 2] += self.depth 

1651 if cs == 'xyz': 

1652 return points 

1653 elif cs == 'xy': 

1654 return points[:, :2] 

1655 elif cs in ('latlon', 'lonlat'): 

1656 latlon = ne_to_latlon( 

1657 self.lat, self.lon, points[:, 0], points[:, 1]) 

1658 

1659 latlon = num.array(latlon).T 

1660 if cs == 'latlon': 

1661 return latlon 

1662 else: 

1663 return latlon[:, ::-1] 

1664 

1665 def get_nucleation_abs_coord(self, cs='xy'): 

1666 

1667 if self.nucleation_x is None: 

1668 return None, None 

1669 

1670 coords = from_plane_coords(self.strike, self.dip, self.length, 

1671 self.width, self.depth, self.nucleation_x, 

1672 self.nucleation_y, lat=self.lat, 

1673 lon=self.lon, north_shift=self.north_shift, 

1674 east_shift=self.east_shift, cs=cs) 

1675 return coords 

1676 

1677 

1678class DCSource(SourceWithMagnitude): 

1679 ''' 

1680 A double-couple point source. 

1681 ''' 

1682 

1683 strike = Float.T( 

1684 default=0.0, 

1685 help='strike direction in [deg], measured clockwise from north') 

1686 

1687 dip = Float.T( 

1688 default=90.0, 

1689 help='dip angle in [deg], measured downward from horizontal') 

1690 

1691 rake = Float.T( 

1692 default=0.0, 

1693 help='rake angle in [deg], ' 

1694 'measured counter-clockwise from right-horizontal ' 

1695 'in on-plane view') 

1696 

1697 discretized_source_class = meta.DiscretizedMTSource 

1698 

1699 def base_key(self): 

1700 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1701 

1702 def get_factor(self): 

1703 return float(pmt.magnitude_to_moment(self.magnitude)) 

1704 

1705 def discretize_basesource(self, store, target=None): 

1706 mot = pmt.MomentTensor( 

1707 strike=self.strike, dip=self.dip, rake=self.rake) 

1708 

1709 times, amplitudes = self.effective_stf_pre().discretize_t( 

1710 store.config.deltat, self.time) 

1711 return meta.DiscretizedMTSource( 

1712 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1713 **self._dparams_base_repeated(times)) 

1714 

1715 def pyrocko_moment_tensor(self, store=None, target=None): 

1716 return pmt.MomentTensor( 

1717 strike=self.strike, 

1718 dip=self.dip, 

1719 rake=self.rake, 

1720 scalar_moment=self.moment) 

1721 

1722 def pyrocko_event(self, store=None, target=None, **kwargs): 

1723 return SourceWithMagnitude.pyrocko_event( 

1724 self, store, target, 

1725 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1726 **kwargs) 

1727 

1728 @classmethod 

1729 def from_pyrocko_event(cls, ev, **kwargs): 

1730 d = {} 

1731 mt = ev.moment_tensor 

1732 if mt: 

1733 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1734 d.update( 

1735 strike=float(strike), 

1736 dip=float(dip), 

1737 rake=float(rake), 

1738 magnitude=float(mt.moment_magnitude())) 

1739 

1740 d.update(kwargs) 

1741 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1742 

1743 

1744class CLVDSource(SourceWithMagnitude): 

1745 ''' 

1746 A pure CLVD point source. 

1747 ''' 

1748 

1749 discretized_source_class = meta.DiscretizedMTSource 

1750 

1751 azimuth = Float.T( 

1752 default=0.0, 

1753 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1754 

1755 dip = Float.T( 

1756 default=90., 

1757 help='dip direction of largest dipole, downward from horizontal [deg]') 

1758 

1759 def base_key(self): 

1760 return Source.base_key(self) + (self.azimuth, self.dip) 

1761 

1762 def get_factor(self): 

1763 return float(pmt.magnitude_to_moment(self.magnitude)) 

1764 

1765 @property 

1766 def m6(self): 

1767 a = math.sqrt(4. / 3.) * self.get_factor() 

1768 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1769 rotmat1 = pmt.euler_to_matrix( 

1770 d2r * (self.dip - 90.), 

1771 d2r * (self.azimuth - 90.), 

1772 0.) 

1773 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1774 return pmt.to6(m) 

1775 

1776 @property 

1777 def m6_astuple(self): 

1778 return tuple(self.m6.tolist()) 

1779 

1780 def discretize_basesource(self, store, target=None): 

1781 factor = self.get_factor() 

1782 times, amplitudes = self.effective_stf_pre().discretize_t( 

1783 store.config.deltat, self.time) 

1784 return meta.DiscretizedMTSource( 

1785 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1786 **self._dparams_base_repeated(times)) 

1787 

1788 def pyrocko_moment_tensor(self, store=None, target=None): 

1789 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1790 

1791 def pyrocko_event(self, store=None, target=None, **kwargs): 

1792 mt = self.pyrocko_moment_tensor(store, target) 

1793 return Source.pyrocko_event( 

1794 self, store, target, 

1795 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1796 magnitude=float(mt.moment_magnitude()), 

1797 **kwargs) 

1798 

1799 

1800class VLVDSource(SourceWithDerivedMagnitude): 

1801 ''' 

1802 Volumetric linear vector dipole source. 

1803 

1804 This source is a parameterization for a restricted moment tensor point 

1805 source, useful to represent dyke or sill like inflation or deflation 

1806 sources. The restriction is such that the moment tensor is rotational 

1807 symmetric. It can be represented by a superposition of a linear vector 

1808 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1809 restricted moment tensor has 4 degrees of freedom: 2 independent 

1810 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1811 

1812 In this parameterization, the isotropic component is controlled by 

1813 ``volume_change``. To define the moment tensor, it must be converted to the 

1814 scalar moment of the the MT's isotropic component. For the conversion, the 

1815 shear modulus at the source's position must be known. This value is 

1816 extracted from the earth model defined in the GF store in use. 

1817 

1818 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1819 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1820 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1821 ''' 

1822 

1823 discretized_source_class = meta.DiscretizedMTSource 

1824 

1825 azimuth = Float.T( 

1826 default=0.0, 

1827 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1828 

1829 dip = Float.T( 

1830 default=90., 

1831 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1832 

1833 volume_change = Float.T( 

1834 default=0., 

1835 help='volume change of the inflation/deflation [m^3].') 

1836 

1837 clvd_moment = Float.T( 

1838 default=0., 

1839 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1840 'controls the sign of the CLVD (the sign of its largest ' 

1841 'eigenvalue).') 

1842 

1843 def get_moment_to_volume_change_ratio(self, store, target): 

1844 if store is None or target is None: 

1845 raise DerivedMagnitudeError( 

1846 'Need earth model to convert between volume change and ' 

1847 'magnitude.') 

1848 

1849 points = num.array( 

1850 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1851 

1852 try: 

1853 shear_moduli = store.config.get_shear_moduli( 

1854 self.lat, self.lon, 

1855 points=points, 

1856 interpolation=target.interpolation)[0] 

1857 except meta.OutOfBounds: 

1858 raise DerivedMagnitudeError( 

1859 'Could not get shear modulus at source position.') 

1860 

1861 return float(3. * shear_moduli) 

1862 

1863 def base_key(self): 

1864 return Source.base_key(self) + \ 

1865 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1866 

1867 def get_magnitude(self, store=None, target=None): 

1868 mt = self.pyrocko_moment_tensor(store, target) 

1869 return float(pmt.moment_to_magnitude(mt.moment)) 

1870 

1871 def get_m6(self, store, target): 

1872 a = math.sqrt(4. / 3.) * self.clvd_moment 

1873 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1874 

1875 rotmat1 = pmt.euler_to_matrix( 

1876 d2r * (self.dip - 90.), 

1877 d2r * (self.azimuth - 90.), 

1878 0.) 

1879 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1880 

1881 m_iso = self.volume_change * \ 

1882 self.get_moment_to_volume_change_ratio(store, target) 

1883 

1884 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1885 0., 0.,) * math.sqrt(2. / 3) 

1886 

1887 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1888 return m 

1889 

1890 def get_moment(self, store=None, target=None): 

1891 return float(pmt.magnitude_to_moment( 

1892 self.get_magnitude(store, target))) 

1893 

1894 def get_m6_astuple(self, store, target): 

1895 m6 = self.get_m6(store, target) 

1896 return tuple(m6.tolist()) 

1897 

1898 def discretize_basesource(self, store, target=None): 

1899 times, amplitudes = self.effective_stf_pre().discretize_t( 

1900 store.config.deltat, self.time) 

1901 

1902 m6 = self.get_m6(store, target) 

1903 m6 *= amplitudes / self.get_factor() 

1904 

1905 return meta.DiscretizedMTSource( 

1906 m6s=m6[num.newaxis, :], 

1907 **self._dparams_base_repeated(times)) 

1908 

1909 def pyrocko_moment_tensor(self, store=None, target=None): 

1910 m6_astuple = self.get_m6_astuple(store, target) 

1911 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1912 

1913 

1914class MTSource(Source): 

1915 ''' 

1916 A moment tensor point source. 

1917 ''' 

1918 

1919 discretized_source_class = meta.DiscretizedMTSource 

1920 

1921 mnn = Float.T( 

1922 default=1., 

1923 help='north-north component of moment tensor in [Nm]') 

1924 

1925 mee = Float.T( 

1926 default=1., 

1927 help='east-east component of moment tensor in [Nm]') 

1928 

1929 mdd = Float.T( 

1930 default=1., 

1931 help='down-down component of moment tensor in [Nm]') 

1932 

1933 mne = Float.T( 

1934 default=0., 

1935 help='north-east component of moment tensor in [Nm]') 

1936 

1937 mnd = Float.T( 

1938 default=0., 

1939 help='north-down component of moment tensor in [Nm]') 

1940 

1941 med = Float.T( 

1942 default=0., 

1943 help='east-down component of moment tensor in [Nm]') 

1944 

1945 def __init__(self, **kwargs): 

1946 if 'm6' in kwargs: 

1947 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1948 kwargs.pop('m6')): 

1949 kwargs[k] = float(v) 

1950 

1951 Source.__init__(self, **kwargs) 

1952 

1953 @property 

1954 def m6(self): 

1955 return num.array(self.m6_astuple) 

1956 

1957 @property 

1958 def m6_astuple(self): 

1959 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1960 

1961 @m6.setter 

1962 def m6(self, value): 

1963 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1964 

1965 def base_key(self): 

1966 return Source.base_key(self) + self.m6_astuple 

1967 

1968 def discretize_basesource(self, store, target=None): 

1969 times, amplitudes = self.effective_stf_pre().discretize_t( 

1970 store.config.deltat, self.time) 

1971 return meta.DiscretizedMTSource( 

1972 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1973 **self._dparams_base_repeated(times)) 

1974 

1975 def get_magnitude(self, store=None, target=None): 

1976 m6 = self.m6 

1977 return pmt.moment_to_magnitude( 

1978 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1979 math.sqrt(2.)) 

1980 

1981 def pyrocko_moment_tensor(self, store=None, target=None): 

1982 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1983 

1984 def pyrocko_event(self, store=None, target=None, **kwargs): 

1985 mt = self.pyrocko_moment_tensor(store, target) 

1986 return Source.pyrocko_event( 

1987 self, store, target, 

1988 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1989 magnitude=float(mt.moment_magnitude()), 

1990 **kwargs) 

1991 

1992 @classmethod 

1993 def from_pyrocko_event(cls, ev, **kwargs): 

1994 d = {} 

1995 mt = ev.moment_tensor 

1996 if mt: 

1997 d.update(m6=tuple(map(float, mt.m6()))) 

1998 else: 

1999 if ev.magnitude is not None: 

2000 mom = pmt.magnitude_to_moment(ev.magnitude) 

2001 v = math.sqrt(2. / 3.) * mom 

2002 d.update(m6=(v, v, v, 0., 0., 0.)) 

2003 

2004 d.update(kwargs) 

2005 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2006 

2007 

2008map_anchor = { 

2009 'center': (0.0, 0.0), 

2010 'center_left': (-1.0, 0.0), 

2011 'center_right': (1.0, 0.0), 

2012 'top': (0.0, -1.0), 

2013 'top_left': (-1.0, -1.0), 

2014 'top_right': (1.0, -1.0), 

2015 'bottom': (0.0, 1.0), 

2016 'bottom_left': (-1.0, 1.0), 

2017 'bottom_right': (1.0, 1.0)} 

2018 

2019 

2020class RectangularSource(SourceWithDerivedMagnitude): 

2021 ''' 

2022 Classical Haskell source model modified for bilateral rupture. 

2023 ''' 

2024 

2025 discretized_source_class = meta.DiscretizedMTSource 

2026 

2027 magnitude = Float.T( 

2028 optional=True, 

2029 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2030 

2031 strike = Float.T( 

2032 default=0.0, 

2033 help='strike direction in [deg], measured clockwise from north') 

2034 

2035 dip = Float.T( 

2036 default=90.0, 

2037 help='dip angle in [deg], measured downward from horizontal') 

2038 

2039 rake = Float.T( 

2040 default=0.0, 

2041 help='rake angle in [deg], ' 

2042 'measured counter-clockwise from right-horizontal ' 

2043 'in on-plane view') 

2044 

2045 length = Float.T( 

2046 default=0., 

2047 help='length of rectangular source area [m]') 

2048 

2049 width = Float.T( 

2050 default=0., 

2051 help='width of rectangular source area [m]') 

2052 

2053 anchor = StringChoice.T( 

2054 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2055 'bottom_left', 'bottom_right'], 

2056 default='center', 

2057 optional=True, 

2058 help='Anchor point for positioning the plane, can be: ``top, center ' 

2059 'bottom, top_left, top_right,bottom_left,' 

2060 'bottom_right, center_left, center right``.') 

2061 

2062 nucleation_x = Float.T( 

2063 optional=True, 

2064 help='horizontal position of rupture nucleation in normalized fault ' 

2065 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2066 

2067 nucleation_y = Float.T( 

2068 optional=True, 

2069 help='down-dip position of rupture nucleation in normalized fault ' 

2070 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2071 

2072 velocity = Float.T( 

2073 default=3500., 

2074 help='speed of rupture front [m/s]') 

2075 

2076 slip = Float.T( 

2077 optional=True, 

2078 help='Slip on the rectangular source area [m]') 

2079 

2080 opening_fraction = Float.T( 

2081 default=0., 

2082 help='Determines fraction of slip related to opening. ' 

2083 '(``-1``: pure tensile closing, ' 

2084 '``0``: pure shear, ' 

2085 '``1``: pure tensile opening)') 

2086 

2087 decimation_factor = Int.T( 

2088 optional=True, 

2089 default=1, 

2090 help='Sub-source decimation factor, a larger decimation will' 

2091 ' make the result inaccurate but shorten the necessary' 

2092 ' computation time (use for testing puposes only).') 

2093 

2094 aggressive_oversampling = Bool.T( 

2095 default=False, 

2096 help='Aggressive oversampling for basesource discretization. ' 

2097 'When using \'multilinear\' interpolation oversampling has' 

2098 ' practically no effect.') 

2099 

2100 def __init__(self, **kwargs): 

2101 if 'moment' in kwargs: 

2102 mom = kwargs.pop('moment') 

2103 if 'magnitude' not in kwargs: 

2104 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2105 

2106 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2107 

2108 def base_key(self): 

2109 return SourceWithDerivedMagnitude.base_key(self) + ( 

2110 self.magnitude, 

2111 self.slip, 

2112 self.strike, 

2113 self.dip, 

2114 self.rake, 

2115 self.length, 

2116 self.width, 

2117 self.nucleation_x, 

2118 self.nucleation_y, 

2119 self.velocity, 

2120 self.decimation_factor, 

2121 self.anchor) 

2122 

2123 def check_conflicts(self): 

2124 if self.magnitude is not None and self.slip is not None: 

2125 raise DerivedMagnitudeError( 

2126 'Magnitude and slip are both defined.') 

2127 

2128 def get_magnitude(self, store=None, target=None): 

2129 self.check_conflicts() 

2130 if self.magnitude is not None: 

2131 return self.magnitude 

2132 

2133 elif self.slip is not None: 

2134 if None in (store, target): 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude for a rectangular source with slip defined ' 

2137 'can only be derived when earth model and target ' 

2138 'interpolation method are available.') 

2139 

2140 amplitudes = self._discretize(store, target)[2] 

2141 if amplitudes.ndim == 2: 

2142 # CLVD component has no net moment, leave out 

2143 return float(pmt.moment_to_magnitude( 

2144 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2145 else: 

2146 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2147 

2148 else: 

2149 return float(pmt.moment_to_magnitude(1.0)) 

2150 

2151 def get_factor(self): 

2152 return 1.0 

2153 

2154 def get_slip_tensile(self): 

2155 return self.slip * self.opening_fraction 

2156 

2157 def get_slip_shear(self): 

2158 return self.slip - abs(self.get_slip_tensile) 

2159 

2160 def _discretize(self, store, target): 

2161 if self.nucleation_x is not None: 

2162 nucx = self.nucleation_x * 0.5 * self.length 

2163 else: 

2164 nucx = None 

2165 

2166 if self.nucleation_y is not None: 

2167 nucy = self.nucleation_y * 0.5 * self.width 

2168 else: 

2169 nucy = None 

2170 

2171 stf = self.effective_stf_pre() 

2172 

2173 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2174 store.config.deltas, store.config.deltat, 

2175 self.time, self.north_shift, self.east_shift, self.depth, 

2176 self.strike, self.dip, self.length, self.width, self.anchor, 

2177 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2178 decimation_factor=self.decimation_factor, 

2179 aggressive_oversampling=self.aggressive_oversampling) 

2180 

2181 if self.slip is not None: 

2182 if target is not None: 

2183 interpolation = target.interpolation 

2184 else: 

2185 interpolation = 'nearest_neighbor' 

2186 logger.warning( 

2187 'no target information available, will use ' 

2188 '"nearest_neighbor" interpolation when extracting shear ' 

2189 'modulus from earth model') 

2190 

2191 shear_moduli = store.config.get_shear_moduli( 

2192 self.lat, self.lon, 

2193 points=points, 

2194 interpolation=interpolation) 

2195 

2196 tensile_slip = self.get_slip_tensile() 

2197 shear_slip = self.slip - abs(tensile_slip) 

2198 

2199 amplitudes_total = [shear_moduli * shear_slip] 

2200 if tensile_slip != 0: 

2201 bulk_moduli = store.config.get_bulk_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_iso = bulk_moduli * tensile_slip 

2207 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2208 

2209 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2210 

2211 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2212 amplitudes * dl * dw 

2213 

2214 else: 

2215 # normalization to retain total moment 

2216 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2217 moment = self.get_moment(store, target) 

2218 

2219 amplitudes_total = [ 

2220 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2221 if self.opening_fraction != 0.: 

2222 amplitudes_total.append( 

2223 amplitudes_norm * self.opening_fraction * moment) 

2224 

2225 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2226 

2227 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2228 

2229 def discretize_basesource(self, store, target=None): 

2230 

2231 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2232 store, target) 

2233 

2234 mot = pmt.MomentTensor( 

2235 strike=self.strike, dip=self.dip, rake=self.rake) 

2236 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2237 

2238 if amplitudes.ndim == 1: 

2239 m6s[:, :] *= amplitudes[:, num.newaxis] 

2240 elif amplitudes.ndim == 2: 

2241 # shear MT components 

2242 rotmat1 = pmt.euler_to_matrix( 

2243 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2244 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2245 

2246 if amplitudes.shape[0] == 2: 

2247 # tensile MT components - moment/magnitude input 

2248 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2249 rot_tensile = pmt.to6( 

2250 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2251 

2252 m6s_tensile = rot_tensile[ 

2253 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2254 m6s += m6s_tensile 

2255 

2256 elif amplitudes.shape[0] == 3: 

2257 # tensile MT components - slip input 

2258 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2259 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2260 

2261 rot_iso = pmt.to6( 

2262 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2263 rot_clvd = pmt.to6( 

2264 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2265 

2266 m6s_iso = rot_iso[ 

2267 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2268 m6s_clvd = rot_clvd[ 

2269 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2270 m6s += m6s_iso + m6s_clvd 

2271 else: 

2272 raise ValueError('Unknwown amplitudes shape!') 

2273 else: 

2274 raise ValueError( 

2275 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2276 

2277 ds = meta.DiscretizedMTSource( 

2278 lat=self.lat, 

2279 lon=self.lon, 

2280 times=times, 

2281 north_shifts=points[:, 0], 

2282 east_shifts=points[:, 1], 

2283 depths=points[:, 2], 

2284 m6s=m6s, 

2285 dl=dl, 

2286 dw=dw, 

2287 nl=nl, 

2288 nw=nw) 

2289 

2290 return ds 

2291 

2292 def outline(self, cs='xyz'): 

2293 points = outline_rect_source(self.strike, self.dip, self.length, 

2294 self.width, self.anchor) 

2295 

2296 points[:, 0] += self.north_shift 

2297 points[:, 1] += self.east_shift 

2298 points[:, 2] += self.depth 

2299 if cs == 'xyz': 

2300 return points 

2301 elif cs == 'xy': 

2302 return points[:, :2] 

2303 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2304 latlon = ne_to_latlon( 

2305 self.lat, self.lon, points[:, 0], points[:, 1]) 

2306 

2307 latlon = num.array(latlon).T 

2308 if cs == 'latlon': 

2309 return latlon 

2310 elif cs == 'lonlat': 

2311 return latlon[:, ::-1] 

2312 else: 

2313 return num.concatenate( 

2314 (latlon, points[:, 2].reshape((len(points), 1))), 

2315 axis=1) 

2316 

2317 def points_on_source(self, cs='xyz', **kwargs): 

2318 

2319 points = points_on_rect_source( 

2320 self.strike, self.dip, self.length, self.width, 

2321 self.anchor, **kwargs) 

2322 

2323 points[:, 0] += self.north_shift 

2324 points[:, 1] += self.east_shift 

2325 points[:, 2] += self.depth 

2326 if cs == 'xyz': 

2327 return points 

2328 elif cs == 'xy': 

2329 return points[:, :2] 

2330 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2331 latlon = ne_to_latlon( 

2332 self.lat, self.lon, points[:, 0], points[:, 1]) 

2333 

2334 latlon = num.array(latlon).T 

2335 if cs == 'latlon': 

2336 return latlon 

2337 elif cs == 'lonlat': 

2338 return latlon[:, ::-1] 

2339 else: 

2340 return num.concatenate( 

2341 (latlon, points[:, 2].reshape((len(points), 1))), 

2342 axis=1) 

2343 

2344 def get_nucleation_abs_coord(self, cs='xy'): 

2345 

2346 if self.nucleation_x is None: 

2347 return None, None 

2348 

2349 coords = from_plane_coords(self.strike, self.dip, self.length, 

2350 self.width, self.depth, self.nucleation_x, 

2351 self.nucleation_y, lat=self.lat, 

2352 lon=self.lon, north_shift=self.north_shift, 

2353 east_shift=self.east_shift, cs=cs) 

2354 return coords 

2355 

2356 def pyrocko_moment_tensor(self, store=None, target=None): 

2357 return pmt.MomentTensor( 

2358 strike=self.strike, 

2359 dip=self.dip, 

2360 rake=self.rake, 

2361 scalar_moment=self.get_moment(store, target)) 

2362 

2363 def pyrocko_event(self, store=None, target=None, **kwargs): 

2364 return SourceWithDerivedMagnitude.pyrocko_event( 

2365 self, store, target, 

2366 **kwargs) 

2367 

2368 @classmethod 

2369 def from_pyrocko_event(cls, ev, **kwargs): 

2370 d = {} 

2371 mt = ev.moment_tensor 

2372 if mt: 

2373 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2374 d.update( 

2375 strike=float(strike), 

2376 dip=float(dip), 

2377 rake=float(rake), 

2378 magnitude=float(mt.moment_magnitude())) 

2379 

2380 d.update(kwargs) 

2381 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2382 

2383 

2384class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2385 ''' 

2386 Combined Eikonal and Okada quasi-dynamic rupture model. 

2387 

2388 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2389 Note: attribute `stf` is not used so far, but kept for future applications. 

2390 ''' 

2391 

2392 discretized_source_class = meta.DiscretizedMTSource 

2393 

2394 strike = Float.T( 

2395 default=0.0, 

2396 help='Strike direction in [deg], measured clockwise from north.') 

2397 

2398 dip = Float.T( 

2399 default=0.0, 

2400 help='Dip angle in [deg], measured downward from horizontal.') 

2401 

2402 length = Float.T( 

2403 default=10. * km, 

2404 help='Length of rectangular source area in [m].') 

2405 

2406 width = Float.T( 

2407 default=5. * km, 

2408 help='Width of rectangular source area in [m].') 

2409 

2410 anchor = StringChoice.T( 

2411 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2412 'bottom_left', 'bottom_right'], 

2413 default='center', 

2414 optional=True, 

2415 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2416 'bottom, top_left, top_right, bottom_left, ' 

2417 'bottom_right, center_left, center_right``.') 

2418 

2419 nucleation_x__ = Array.T( 

2420 default=num.array([0.]), 

2421 dtype=num.float64, 

2422 serialize_as='list', 

2423 help='Horizontal position of rupture nucleation in normalized fault ' 

2424 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2425 

2426 nucleation_y__ = Array.T( 

2427 default=num.array([0.]), 

2428 dtype=num.float64, 

2429 serialize_as='list', 

2430 help='Down-dip position of rupture nucleation in normalized fault ' 

2431 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2432 

2433 nucleation_time__ = Array.T( 

2434 optional=True, 

2435 help='Time in [s] after origin, when nucleation points defined by ' 

2436 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2437 dtype=num.float64, 

2438 serialize_as='list') 

2439 

2440 gamma = Float.T( 

2441 default=0.8, 

2442 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2443 r':math:`v_r = \gamma * v_s`.') 

2444 

2445 nx = Int.T( 

2446 default=2, 

2447 help='Number of discrete source patches in x direction (along ' 

2448 'strike).') 

2449 

2450 ny = Int.T( 

2451 default=2, 

2452 help='Number of discrete source patches in y direction (down dip).') 

2453 

2454 slip = Float.T( 

2455 optional=True, 

2456 help='Maximum slip of the rectangular source [m]. ' 

2457 'Setting the slip the tractions/stress field ' 

2458 'will be normalized to accomodate the desired maximum slip.') 

2459 

2460 rake = Float.T( 

2461 optional=True, 

2462 help='Rake angle in [deg], ' 

2463 'measured counter-clockwise from right-horizontal ' 

2464 'in on-plane view. Rake is translated into homogenous tractions ' 

2465 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2466 'with tractions parameter.') 

2467 

2468 patches = List.T( 

2469 OkadaSource.T(), 

2470 optional=True, 

2471 help='List of all boundary elements/sub faults/fault patches.') 

2472 

2473 patch_mask__ = Array.T( 

2474 dtype=bool, 

2475 serialize_as='list', 

2476 shape=(None,), 

2477 optional=True, 

2478 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2479 'leaves the patch in the calculation, False excludes the patch.') 

2480 

2481 tractions = TractionField.T( 

2482 optional=True, 

2483 help='Traction field the rupture plane is exposed to. See the' 

2484 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2485 'If ``tractions=None`` and ``rake`` is given' 

2486 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2487 ' be used.') 

2488 

2489 coef_mat = Array.T( 

2490 optional=True, 

2491 help='Coefficient matrix linking traction and dislocation field.', 

2492 dtype=num.float64, 

2493 shape=(None, None)) 

2494 

2495 eikonal_decimation = Int.T( 

2496 optional=True, 

2497 default=1, 

2498 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2499 ' increase the accuracy of rupture front calculation but' 

2500 ' increases also the computation time.') 

2501 

2502 decimation_factor = Int.T( 

2503 optional=True, 

2504 default=1, 

2505 help='Sub-source decimation factor, a larger decimation will' 

2506 ' make the result inaccurate but shorten the necessary' 

2507 ' computation time (use for testing puposes only).') 

2508 

2509 nthreads = Int.T( 

2510 optional=True, 

2511 default=1, 

2512 help='Number of threads for Okada forward modelling, ' 

2513 'matrix inversion and calculation of point subsources. ' 

2514 'Note: for small/medium matrices 1 thread is most efficient.') 

2515 

2516 pure_shear = Bool.T( 

2517 optional=True, 

2518 default=False, 

2519 help='Calculate only shear tractions and omit tensile tractions.') 

2520 

2521 smooth_rupture = Bool.T( 

2522 default=True, 

2523 help='Smooth the tractions by weighting partially ruptured' 

2524 ' fault patches.') 

2525 

2526 aggressive_oversampling = Bool.T( 

2527 default=False, 

2528 help='Aggressive oversampling for basesource discretization. ' 

2529 'When using \'multilinear\' interpolation oversampling has' 

2530 ' practically no effect.') 

2531 

2532 def __init__(self, **kwargs): 

2533 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2534 self._interpolators = {} 

2535 self.check_conflicts() 

2536 

2537 @property 

2538 def nucleation_x(self): 

2539 return self.nucleation_x__ 

2540 

2541 @nucleation_x.setter 

2542 def nucleation_x(self, nucleation_x): 

2543 if isinstance(nucleation_x, list): 

2544 nucleation_x = num.array(nucleation_x) 

2545 

2546 elif not isinstance( 

2547 nucleation_x, num.ndarray) and nucleation_x is not None: 

2548 

2549 nucleation_x = num.array([nucleation_x]) 

2550 self.nucleation_x__ = nucleation_x 

2551 

2552 @property 

2553 def nucleation_y(self): 

2554 return self.nucleation_y__ 

2555 

2556 @nucleation_y.setter 

2557 def nucleation_y(self, nucleation_y): 

2558 if isinstance(nucleation_y, list): 

2559 nucleation_y = num.array(nucleation_y) 

2560 

2561 elif not isinstance(nucleation_y, num.ndarray) \ 

2562 and nucleation_y is not None: 

2563 nucleation_y = num.array([nucleation_y]) 

2564 

2565 self.nucleation_y__ = nucleation_y 

2566 

2567 @property 

2568 def nucleation(self): 

2569 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2570 

2571 if (nucl_x is None) or (nucl_y is None): 

2572 return None 

2573 

2574 assert nucl_x.shape[0] == nucl_y.shape[0] 

2575 

2576 return num.concatenate( 

2577 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2578 

2579 @nucleation.setter 

2580 def nucleation(self, nucleation): 

2581 if isinstance(nucleation, list): 

2582 nucleation = num.array(nucleation) 

2583 

2584 assert nucleation.shape[1] == 2 

2585 

2586 self.nucleation_x = nucleation[:, 0] 

2587 self.nucleation_y = nucleation[:, 1] 

2588 

2589 @property 

2590 def nucleation_time(self): 

2591 return self.nucleation_time__ 

2592 

2593 @nucleation_time.setter 

2594 def nucleation_time(self, nucleation_time): 

2595 if not isinstance(nucleation_time, num.ndarray) \ 

2596 and nucleation_time is not None: 

2597 nucleation_time = num.array([nucleation_time]) 

2598 

2599 self.nucleation_time__ = nucleation_time 

2600 

2601 @property 

2602 def patch_mask(self): 

2603 if (self.patch_mask__ is not None and 

2604 self.patch_mask__.shape == (self.nx * self.ny,)): 

2605 

2606 return self.patch_mask__ 

2607 else: 

2608 return num.ones(self.nx * self.ny, dtype=bool) 

2609 

2610 @patch_mask.setter 

2611 def patch_mask(self, patch_mask): 

2612 if isinstance(patch_mask, list): 

2613 patch_mask = num.array(patch_mask) 

2614 

2615 self.patch_mask__ = patch_mask 

2616 

2617 def get_tractions(self): 

2618 ''' 

2619 Get source traction vectors. 

2620 

2621 If :py:attr:`rake` is given, unit length directed traction vectors 

2622 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2623 else the given :py:attr:`tractions` are used. 

2624 

2625 :returns: 

2626 Traction vectors per patch. 

2627 :rtype: 

2628 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2629 ''' 

2630 

2631 if self.rake is not None: 

2632 if num.isnan(self.rake): 

2633 raise ValueError('Rake must be a real number, not NaN.') 

2634 

2635 logger.warning( 

2636 'Tractions are derived based on the given source rake.') 

2637 tractions = DirectedTractions(rake=self.rake) 

2638 else: 

2639 tractions = self.tractions 

2640 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2641 

2642 def base_key(self): 

2643 return SourceWithDerivedMagnitude.base_key(self) + ( 

2644 self.slip, 

2645 self.strike, 

2646 self.dip, 

2647 self.rake, 

2648 self.length, 

2649 self.width, 

2650 float(self.nucleation_x.mean()), 

2651 float(self.nucleation_y.mean()), 

2652 self.decimation_factor, 

2653 self.anchor, 

2654 self.pure_shear, 

2655 self.gamma, 

2656 tuple(self.patch_mask)) 

2657 

2658 def check_conflicts(self): 

2659 if self.tractions and self.rake: 

2660 raise AttributeError( 

2661 'Tractions and rake are mutually exclusive.') 

2662 if self.tractions is None and self.rake is None: 

2663 self.rake = 0. 

2664 

2665 def get_magnitude(self, store=None, target=None): 

2666 self.check_conflicts() 

2667 if self.slip is not None or self.tractions is not None: 

2668 if store is None: 

2669 raise DerivedMagnitudeError( 

2670 'Magnitude for a rectangular source with slip or ' 

2671 'tractions defined can only be derived when earth model ' 

2672 'is set.') 

2673 

2674 moment_rate, calc_times = self.discretize_basesource( 

2675 store, target=target).get_moment_rate(store.config.deltat) 

2676 

2677 deltat = num.concatenate(( 

2678 (num.diff(calc_times)[0],), 

2679 num.diff(calc_times))) 

2680 

2681 return float(pmt.moment_to_magnitude( 

2682 num.sum(moment_rate * deltat))) 

2683 

2684 else: 

2685 return float(pmt.moment_to_magnitude(1.0)) 

2686 

2687 def get_factor(self): 

2688 return 1.0 

2689 

2690 def outline(self, cs='xyz'): 

2691 ''' 

2692 Get source outline corner coordinates. 

2693 

2694 :param cs: 

2695 :ref:`Output coordinate system <coordinate-system-names>`. 

2696 :type cs: 

2697 optional, str 

2698 

2699 :returns: 

2700 Corner points in desired coordinate system. 

2701 :rtype: 

2702 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2703 ''' 

2704 points = outline_rect_source(self.strike, self.dip, self.length, 

2705 self.width, self.anchor) 

2706 

2707 points[:, 0] += self.north_shift 

2708 points[:, 1] += self.east_shift 

2709 points[:, 2] += self.depth 

2710 if cs == 'xyz': 

2711 return points 

2712 elif cs == 'xy': 

2713 return points[:, :2] 

2714 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2715 latlon = ne_to_latlon( 

2716 self.lat, self.lon, points[:, 0], points[:, 1]) 

2717 

2718 latlon = num.array(latlon).T 

2719 if cs == 'latlon': 

2720 return latlon 

2721 elif cs == 'lonlat': 

2722 return latlon[:, ::-1] 

2723 else: 

2724 return num.concatenate( 

2725 (latlon, points[:, 2].reshape((len(points), 1))), 

2726 axis=1) 

2727 

2728 def points_on_source(self, cs='xyz', **kwargs): 

2729 ''' 

2730 Convert relative plane coordinates to geographical coordinates. 

2731 

2732 Given x and y coordinates (relative source coordinates between -1. 

2733 and 1.) are converted to desired geographical coordinates. Coordinates 

2734 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2735 and ``points_y``. 

2736 

2737 :param cs: 

2738 :ref:`Output coordinate system <coordinate-system-names>`. 

2739 :type cs: 

2740 optional, str 

2741 

2742 :returns: 

2743 Point coordinates in desired coordinate system. 

2744 :rtype: 

2745 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2746 ''' 

2747 points = points_on_rect_source( 

2748 self.strike, self.dip, self.length, self.width, 

2749 self.anchor, **kwargs) 

2750 

2751 points[:, 0] += self.north_shift 

2752 points[:, 1] += self.east_shift 

2753 points[:, 2] += self.depth 

2754 if cs == 'xyz': 

2755 return points 

2756 elif cs == 'xy': 

2757 return points[:, :2] 

2758 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2759 latlon = ne_to_latlon( 

2760 self.lat, self.lon, points[:, 0], points[:, 1]) 

2761 

2762 latlon = num.array(latlon).T 

2763 if cs == 'latlon': 

2764 return latlon 

2765 elif cs == 'lonlat': 

2766 return latlon[:, ::-1] 

2767 else: 

2768 return num.concatenate( 

2769 (latlon, points[:, 2].reshape((len(points), 1))), 

2770 axis=1) 

2771 

2772 def pyrocko_moment_tensor(self, store=None, target=None): 

2773 if store is not None: 

2774 if not self.patches: 

2775 self.discretize_patches(store) 

2776 

2777 data = self.get_slip() 

2778 else: 

2779 data = self.get_tractions() 

2780 

2781 weights = num.linalg.norm(data, axis=1) 

2782 weights /= weights.sum() 

2783 

2784 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2785 rake = num.average(rakes, weights=weights) 

2786 

2787 return pmt.MomentTensor( 

2788 strike=self.strike, 

2789 dip=self.dip, 

2790 rake=rake, 

2791 scalar_moment=self.get_moment(store, target)) 

2792 

2793 def pyrocko_event(self, store=None, target=None, **kwargs): 

2794 return SourceWithDerivedMagnitude.pyrocko_event( 

2795 self, store, target, 

2796 **kwargs) 

2797 

2798 @classmethod 

2799 def from_pyrocko_event(cls, ev, **kwargs): 

2800 d = {} 

2801 mt = ev.moment_tensor 

2802 if mt: 

2803 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2804 d.update( 

2805 strike=float(strike), 

2806 dip=float(dip), 

2807 rake=float(rake)) 

2808 

2809 d.update(kwargs) 

2810 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2811 

2812 def _discretize_points(self, store, *args, **kwargs): 

2813 ''' 

2814 Discretize source plane with equal vertical and horizontal spacing. 

2815 

2816 Additional ``*args`` and ``**kwargs`` are passed to 

2817 :py:meth:`points_on_source`. 

2818 

2819 :param store: 

2820 Green's function database (needs to cover whole region of the 

2821 source). 

2822 :type store: 

2823 :py:class:`~pyrocko.gf.store.Store` 

2824 

2825 :returns: 

2826 Number of points in strike and dip direction, distance 

2827 between adjacent points, coordinates (latlondepth) and coordinates 

2828 (xy on fault) for discrete points. 

2829 :rtype: 

2830 (int, int, float, :py:class:`~numpy.ndarray`, 

2831 :py:class:`~numpy.ndarray`). 

2832 ''' 

2833 anch_x, anch_y = map_anchor[self.anchor] 

2834 

2835 npoints = int(self.width // km) + 1 

2836 points = num.zeros((npoints, 3)) 

2837 points[:, 1] = num.linspace(-1., 1., npoints) 

2838 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2839 

2840 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2841 points = num.dot(rotmat.T, points.T).T 

2842 points[:, 2] += self.depth 

2843 

2844 vs_min = store.config.get_vs( 

2845 self.lat, self.lon, points, 

2846 interpolation='nearest_neighbor') 

2847 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2848 

2849 oversampling = 10. 

2850 delta_l = self.length / (self.nx * oversampling) 

2851 delta_w = self.width / (self.ny * oversampling) 

2852 

2853 delta = self.eikonal_decimation * num.min([ 

2854 store.config.deltat * vr_min / oversampling, 

2855 delta_l, delta_w] + [ 

2856 deltas for deltas in store.config.deltas]) 

2857 

2858 delta = delta_w / num.ceil(delta_w / delta) 

2859 

2860 nx = int(num.ceil(self.length / delta)) + 1 

2861 ny = int(num.ceil(self.width / delta)) + 1 

2862 

2863 rem_l = (nx-1)*delta - self.length 

2864 lim_x = rem_l / self.length 

2865 

2866 points_xy = num.zeros((nx * ny, 2)) 

2867 points_xy[:, 0] = num.repeat( 

2868 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2869 points_xy[:, 1] = num.tile( 

2870 num.linspace(-1., 1., ny), nx) 

2871 

2872 points = self.points_on_source( 

2873 points_x=points_xy[:, 0], 

2874 points_y=points_xy[:, 1], 

2875 **kwargs) 

2876 

2877 return nx, ny, delta, points, points_xy 

2878 

2879 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2880 points=None): 

2881 ''' 

2882 Get rupture velocity for discrete points on source plane. 

2883 

2884 :param store: 

2885 Green's function database (needs to cover the whole region of the 

2886 source) 

2887 :type store: 

2888 optional, :py:class:`~pyrocko.gf.store.Store` 

2889 

2890 :param interpolation: 

2891 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2892 and ``'multilinear'``). 

2893 :type interpolation: 

2894 optional, str 

2895 

2896 :param points: 

2897 Coordinates on fault (-1.:1.) of discrete points. 

2898 :type points: 

2899 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2900 

2901 :returns: 

2902 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2903 points. 

2904 :rtype: 

2905 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2906 ''' 

2907 

2908 if points is None: 

2909 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2910 

2911 return store.config.get_vs( 

2912 self.lat, self.lon, 

2913 points=points, 

2914 interpolation=interpolation) * self.gamma 

2915 

2916 def discretize_time( 

2917 self, store, interpolation='nearest_neighbor', 

2918 vr=None, times=None, *args, **kwargs): 

2919 ''' 

2920 Get rupture start time for discrete points on source plane. 

2921 

2922 :param store: 

2923 Green's function database (needs to cover whole region of the 

2924 source) 

2925 :type store: 

2926 :py:class:`~pyrocko.gf.store.Store` 

2927 

2928 :param interpolation: 

2929 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2930 and ``'multilinear'``). 

2931 :type interpolation: 

2932 optional, str 

2933 

2934 :param vr: 

2935 Array, containing rupture user defined rupture velocity values. 

2936 :type vr: 

2937 optional, :py:class:`~numpy.ndarray` 

2938 

2939 :param times: 

2940 Array, containing zeros, where rupture is starting, real positive 

2941 numbers at later secondary nucleation points and -1, where time 

2942 will be calculated. If not given, rupture starts at nucleation_x, 

2943 nucleation_y. Times are given for discrete points with equal 

2944 horizontal and vertical spacing. 

2945 :type times: 

2946 optional, :py:class:`~numpy.ndarray` 

2947 

2948 :returns: 

2949 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

2950 rupture propagation time of discrete points. 

2951 :rtype: 

2952 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

2953 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

2954 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

2955 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

2956 ''' 

2957 nx, ny, delta, points, points_xy = self._discretize_points( 

2958 store, cs='xyz') 

2959 

2960 if vr is None or vr.shape != tuple((nx, ny)): 

2961 if vr: 

2962 logger.warning( 

2963 'Given rupture velocities are not in right shape: ' 

2964 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

2965 vr = self._discretize_rupture_v(store, interpolation, points)\ 

2966 .reshape(nx, ny) 

2967 

2968 if vr.shape != tuple((nx, ny)): 

2969 logger.warning( 

2970 'Given rupture velocities are not in right shape. Therefore' 

2971 ' standard rupture velocity array is used.') 

2972 

2973 def initialize_times(): 

2974 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2975 

2976 if nucl_x.shape != nucl_y.shape: 

2977 raise ValueError( 

2978 'Nucleation coordinates have different shape.') 

2979 

2980 dist_points = num.array([ 

2981 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

2982 for x, y in zip(nucl_x, nucl_y)]) 

2983 nucl_indices = num.argmin(dist_points, axis=1) 

2984 

2985 if self.nucleation_time is None: 

2986 nucl_times = num.zeros_like(nucl_indices) 

2987 else: 

2988 if self.nucleation_time.shape == nucl_x.shape: 

2989 nucl_times = self.nucleation_time 

2990 else: 

2991 raise ValueError( 

2992 'Nucleation coordinates and times have different ' 

2993 'shapes') 

2994 

2995 t = num.full(nx * ny, -1.) 

2996 t[nucl_indices] = nucl_times 

2997 return t.reshape(nx, ny) 

2998 

2999 if times is None: 

3000 times = initialize_times() 

3001 elif times.shape != tuple((nx, ny)): 

3002 times = initialize_times() 

3003 logger.warning( 

3004 'Given times are not in right shape. Therefore standard time' 

3005 ' array is used.') 

3006 

3007 eikonal_ext.eikonal_solver_fmm_cartesian( 

3008 speeds=vr, times=times, delta=delta) 

3009 

3010 return points, points_xy, vr, times 

3011 

3012 def get_vr_time_interpolators( 

3013 self, store, interpolation='nearest_neighbor', force=False, 

3014 **kwargs): 

3015 ''' 

3016 Get interpolators for rupture velocity and rupture time. 

3017 

3018 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3019 

3020 :param store: 

3021 Green's function database (needs to cover whole region of the 

3022 source). 

3023 :type store: 

3024 :py:class:`~pyrocko.gf.store.Store` 

3025 

3026 :param interpolation: 

3027 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3028 and ``'multilinear'``). 

3029 :type interpolation: 

3030 optional, str 

3031 

3032 :param force: 

3033 Force recalculation of the interpolators (e.g. after change of 

3034 nucleation point locations/times). Default is ``False``. 

3035 :type force: 

3036 optional, bool 

3037 ''' 

3038 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3039 if interpolation not in interp_map: 

3040 raise TypeError( 

3041 'Interpolation method %s not available' % interpolation) 

3042 

3043 if not self._interpolators.get(interpolation, False) or force: 

3044 _, points_xy, vr, times = self.discretize_time( 

3045 store, **kwargs) 

3046 

3047 if self.length <= 0.: 

3048 raise ValueError( 

3049 'length must be larger then 0. not %g' % self.length) 

3050 

3051 if self.width <= 0.: 

3052 raise ValueError( 

3053 'width must be larger then 0. not %g' % self.width) 

3054 

3055 nx, ny = times.shape 

3056 anch_x, anch_y = map_anchor[self.anchor] 

3057 

3058 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3059 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3060 

3061 self._interpolators[interpolation] = ( 

3062 nx, ny, times, vr, 

3063 RegularGridInterpolator( 

3064 (points_xy[::ny, 0], points_xy[:ny, 1]), times, 

3065 method=interp_map[interpolation]), 

3066 RegularGridInterpolator( 

3067 (points_xy[::ny, 0], points_xy[:ny, 1]), vr, 

3068 method=interp_map[interpolation])) 

3069 return self._interpolators[interpolation] 

3070 

3071 def discretize_patches( 

3072 self, store, interpolation='nearest_neighbor', force=False, 

3073 grid_shape=(), 

3074 **kwargs): 

3075 ''' 

3076 Get rupture start time and OkadaSource elements for points on rupture. 

3077 

3078 All source elements and their corresponding center points are 

3079 calculated and stored in the :py:attr:`patches` attribute. 

3080 

3081 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3082 

3083 :param store: 

3084 Green's function database (needs to cover whole region of the 

3085 source). 

3086 :type store: 

3087 :py:class:`~pyrocko.gf.store.Store` 

3088 

3089 :param interpolation: 

3090 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3091 and ``'multilinear'``). 

3092 :type interpolation: 

3093 optional, str 

3094 

3095 :param force: 

3096 Force recalculation of the vr and time interpolators ( e.g. after 

3097 change of nucleation point locations/times). Default is ``False``. 

3098 :type force: 

3099 optional, bool 

3100 

3101 :param grid_shape: 

3102 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3103 or grid_shape should be set. 

3104 :type grid_shape: 

3105 optional, tuple of int 

3106 ''' 

3107 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3108 self.get_vr_time_interpolators( 

3109 store, 

3110 interpolation=interpolation, force=force, **kwargs) 

3111 anch_x, anch_y = map_anchor[self.anchor] 

3112 

3113 al = self.length / 2. 

3114 aw = self.width / 2. 

3115 al1 = -(al + anch_x * al) 

3116 al2 = al - anch_x * al 

3117 aw1 = -aw + anch_y * aw 

3118 aw2 = aw + anch_y * aw 

3119 assert num.abs([al1, al2]).sum() == self.length 

3120 assert num.abs([aw1, aw2]).sum() == self.width 

3121 

3122 def get_lame(*a, **kw): 

3123 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3124 lamb = store.config.get_vp(*a, **kw)**2 \ 

3125 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3126 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3127 

3128 shear_mod, poisson = get_lame( 

3129 self.lat, self.lon, 

3130 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3131 interpolation=interpolation) 

3132 

3133 okada_src = OkadaSource( 

3134 lat=self.lat, lon=self.lon, 

3135 strike=self.strike, dip=self.dip, 

3136 north_shift=self.north_shift, east_shift=self.east_shift, 

3137 depth=self.depth, 

3138 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3139 poisson=poisson.mean(), 

3140 shearmod=shear_mod.mean(), 

3141 opening=kwargs.get('opening', 0.)) 

3142 

3143 if not (self.nx and self.ny): 

3144 if grid_shape: 

3145 self.nx, self.ny = grid_shape 

3146 else: 

3147 self.nx = nx 

3148 self.ny = ny 

3149 

3150 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3151 

3152 shear_mod, poisson = get_lame( 

3153 self.lat, self.lon, 

3154 num.array([src.source_patch()[:3] for src in source_disc]), 

3155 interpolation=interpolation) 

3156 

3157 if (self.nx, self.ny) != (nx, ny): 

3158 times_interp = time_interpolator(source_points[:, :2]) 

3159 vr_interp = vr_interpolator(source_points[:, :2]) 

3160 else: 

3161 times_interp = times.T.ravel() 

3162 vr_interp = vr.T.ravel() 

3163 

3164 for isrc, src in enumerate(source_disc): 

3165 src.vr = vr_interp[isrc] 

3166 src.time = times_interp[isrc] + self.time 

3167 

3168 self.patches = source_disc 

3169 

3170 def discretize_basesource(self, store, target=None): 

3171 ''' 

3172 Prepare source for synthetic waveform calculation. 

3173 

3174 :param store: 

3175 Green's function database (needs to cover whole region of the 

3176 source). 

3177 :type store: 

3178 :py:class:`~pyrocko.gf.store.Store` 

3179 

3180 :param target: 

3181 Target information. 

3182 :type target: 

3183 optional, :py:class:`~pyrocko.gf.targets.Target` 

3184 

3185 :returns: 

3186 Source discretized by a set of moment tensors and times. 

3187 :rtype: 

3188 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3189 ''' 

3190 if not target: 

3191 interpolation = 'nearest_neighbor' 

3192 else: 

3193 interpolation = target.interpolation 

3194 

3195 if not self.patches: 

3196 self.discretize_patches(store, interpolation) 

3197 

3198 if self.coef_mat is None: 

3199 self.calc_coef_mat() 

3200 

3201 delta_slip, slip_times = self.get_delta_slip(store) 

3202 npatches = self.nx * self.ny 

3203 ntimes = slip_times.size 

3204 

3205 anch_x, anch_y = map_anchor[self.anchor] 

3206 

3207 pln = self.length / self.nx 

3208 pwd = self.width / self.ny 

3209 

3210 patch_coords = num.array([ 

3211 (p.ix, p.iy) 

3212 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3213 

3214 # boundary condition is zero-slip 

3215 # is not valid to avoid unwished interpolation effects 

3216 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3217 slip_grid[1:-1, 1:-1, :, :] = \ 

3218 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3219 

3220 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3221 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3222 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3223 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3224 

3225 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3226 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3227 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3228 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3229 

3230 def make_grid(patch_parameter): 

3231 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3232 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3233 

3234 grid[0, 0] = grid[1, 1] 

3235 grid[0, -1] = grid[1, -2] 

3236 grid[-1, 0] = grid[-2, 1] 

3237 grid[-1, -1] = grid[-2, -2] 

3238 

3239 grid[1:-1, 0] = grid[1:-1, 1] 

3240 grid[1:-1, -1] = grid[1:-1, -2] 

3241 grid[0, 1:-1] = grid[1, 1:-1] 

3242 grid[-1, 1:-1] = grid[-2, 1:-1] 

3243 

3244 return grid 

3245 

3246 lamb = self.get_patch_attribute('lamb') 

3247 mu = self.get_patch_attribute('shearmod') 

3248 

3249 lamb_grid = make_grid(lamb) 

3250 mu_grid = make_grid(mu) 

3251 

3252 coords_x = num.zeros(self.nx + 2) 

3253 coords_x[1:-1] = patch_coords[:, 0, 0] 

3254 coords_x[0] = coords_x[1] - pln / 2 

3255 coords_x[-1] = coords_x[-2] + pln / 2 

3256 

3257 coords_y = num.zeros(self.ny + 2) 

3258 coords_y[1:-1] = patch_coords[0, :, 1] 

3259 coords_y[0] = coords_y[1] - pwd / 2 

3260 coords_y[-1] = coords_y[-2] + pwd / 2 

3261 

3262 slip_interp = RegularGridInterpolator( 

3263 (coords_x, coords_y, slip_times), 

3264 slip_grid, method='nearest') 

3265 

3266 lamb_interp = RegularGridInterpolator( 

3267 (coords_x, coords_y), 

3268 lamb_grid, method='nearest') 

3269 

3270 mu_interp = RegularGridInterpolator( 

3271 (coords_x, coords_y), 

3272 mu_grid, method='nearest') 

3273 

3274 # discretize basesources 

3275 mindeltagf = min(tuple( 

3276 (self.length / self.nx, self.width / self.ny) + 

3277 tuple(store.config.deltas))) 

3278 

3279 nl = int((1. / self.decimation_factor) * 

3280 num.ceil(pln / mindeltagf)) + 1 

3281 nw = int((1. / self.decimation_factor) * 

3282 num.ceil(pwd / mindeltagf)) + 1 

3283 nsrc_patch = int(nl * nw) 

3284 dl = pln / nl 

3285 dw = pwd / nw 

3286 

3287 patch_area = dl * dw 

3288 

3289 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3290 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3291 

3292 base_coords = num.zeros((nsrc_patch, 3)) 

3293 base_coords[:, 0] = num.tile(xl, nw) 

3294 base_coords[:, 1] = num.repeat(xw, nl) 

3295 base_coords = num.tile(base_coords, (npatches, 1)) 

3296 

3297 center_coords = num.zeros((npatches, 3)) 

3298 center_coords[:, 0] = num.repeat( 

3299 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3300 center_coords[:, 1] = num.tile( 

3301 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3302 

3303 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3304 nbaselocs = base_coords.shape[0] 

3305 

3306 base_interp = base_coords.repeat(ntimes, axis=0) 

3307 

3308 base_times = num.tile(slip_times, nbaselocs) 

3309 base_interp[:, 0] -= anch_x * self.length / 2 

3310 base_interp[:, 1] -= anch_y * self.width / 2 

3311 base_interp[:, 2] = base_times 

3312 

3313 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3314 store, interpolation=interpolation) 

3315 

3316 time_eikonal_max = time_interpolator.values.max() 

3317 

3318 nbasesrcs = base_interp.shape[0] 

3319 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3320 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3321 mu = mu_interp(base_interp[:, :2]).ravel() 

3322 

3323 if False: 

3324 try: 

3325 import matplotlib.pyplot as plt 

3326 coords = base_coords.copy() 

3327 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3328 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3329 plt.show() 

3330 except AttributeError: 

3331 pass 

3332 

3333 base_interp[:, 2] = 0. 

3334 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3335 base_interp = num.dot(rotmat.T, base_interp.T).T 

3336 base_interp[:, 0] += self.north_shift 

3337 base_interp[:, 1] += self.east_shift 

3338 base_interp[:, 2] += self.depth 

3339 

3340 slip_strike = delta_slip[:, :, 0].ravel() 

3341 slip_dip = delta_slip[:, :, 1].ravel() 

3342 slip_norm = delta_slip[:, :, 2].ravel() 

3343 

3344 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3345 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3346 

3347 m6s = okada_ext.patch2m6( 

3348 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3349 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3350 rakes=slip_rake, 

3351 disl_shear=slip_shear, 

3352 disl_norm=slip_norm, 

3353 lamb=lamb, 

3354 mu=mu, 

3355 nthreads=self.nthreads) 

3356 

3357 m6s *= patch_area 

3358 

3359 dl = -self.patches[0].al1 + self.patches[0].al2 

3360 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3361 

3362 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3363 

3364 ds = meta.DiscretizedMTSource( 

3365 lat=self.lat, 

3366 lon=self.lon, 

3367 times=base_times + self.time, 

3368 north_shifts=base_interp[:, 0], 

3369 east_shifts=base_interp[:, 1], 

3370 depths=base_interp[:, 2], 

3371 m6s=m6s, 

3372 dl=dl, 

3373 dw=dw, 

3374 nl=self.nx, 

3375 nw=self.ny) 

3376 

3377 return ds 

3378 

3379 def calc_coef_mat(self): 

3380 ''' 

3381 Calculate coefficients connecting tractions and dislocations. 

3382 ''' 

3383 if not self.patches: 

3384 raise ValueError( 

3385 'Patches are needed. Please calculate them first.') 

3386 

3387 self.coef_mat = make_okada_coefficient_matrix( 

3388 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3389 

3390 def get_patch_attribute(self, attr): 

3391 ''' 

3392 Get patch attributes. 

3393 

3394 :param attr: 

3395 Name of selected attribute (see 

3396 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3397 :type attr: 

3398 str 

3399 

3400 :returns: 

3401 Array with attribute value for each fault patch. 

3402 :rtype: 

3403 :py:class:`~numpy.ndarray` 

3404 

3405 ''' 

3406 if not self.patches: 

3407 raise ValueError( 

3408 'Patches are needed. Please calculate them first.') 

3409 return num.array([getattr(p, attr) for p in self.patches]) 

3410 

3411 def get_slip( 

3412 self, 

3413 time=None, 

3414 scale_slip=True, 

3415 interpolation='nearest_neighbor', 

3416 **kwargs): 

3417 ''' 

3418 Get slip per subfault patch for given time after rupture start. 

3419 

3420 :param time: 

3421 Time after origin [s], for which slip is computed. If not 

3422 given, final static slip is returned. 

3423 :type time: 

3424 optional, float > 0. 

3425 

3426 :param scale_slip: 

3427 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3428 to fit the given maximum slip. 

3429 :type scale_slip: 

3430 optional, bool 

3431 

3432 :param interpolation: 

3433 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3434 and ``'multilinear'``). 

3435 :type interpolation: 

3436 optional, str 

3437 

3438 :returns: 

3439 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3440 for each source patch. 

3441 :rtype: 

3442 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3443 ''' 

3444 

3445 if self.patches is None: 

3446 raise ValueError( 

3447 'Please discretize the source first (discretize_patches())') 

3448 npatches = len(self.patches) 

3449 tractions = self.get_tractions() 

3450 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3451 

3452 time_patch = time 

3453 if time is None: 

3454 time_patch = time_patch_max 

3455 

3456 if self.coef_mat is None: 

3457 self.calc_coef_mat() 

3458 

3459 if tractions.shape != (npatches, 3): 

3460 raise AttributeError( 

3461 'The traction vector is of invalid shape.' 

3462 ' Required shape is (npatches, 3)') 

3463 

3464 patch_mask = num.ones(npatches, dtype=bool) 

3465 if self.patch_mask is not None: 

3466 patch_mask = self.patch_mask 

3467 

3468 times = self.get_patch_attribute('time') - self.time 

3469 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3470 relevant_sources = num.nonzero(times <= time_patch)[0] 

3471 disloc_est = num.zeros_like(tractions) 

3472 

3473 if self.smooth_rupture: 

3474 patch_activation = num.zeros(npatches) 

3475 

3476 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3477 self.get_vr_time_interpolators( 

3478 store, interpolation=interpolation) 

3479 

3480 # Getting the native Eikonal grid, bit hackish 

3481 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3482 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3483 times_eikonal = time_interpolator.values 

3484 

3485 time_max = time 

3486 if time is None: 

3487 time_max = times_eikonal.max() 

3488 

3489 for ip, p in enumerate(self.patches): 

3490 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3491 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3492 

3493 idx_length = num.logical_and( 

3494 points_x >= ul[0], points_x <= lr[0]) 

3495 idx_width = num.logical_and( 

3496 points_y >= ul[1], points_y <= lr[1]) 

3497 

3498 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3499 if times_patch.size == 0: 

3500 raise AttributeError('could not use smooth_rupture') 

3501 

3502 patch_activation[ip] = \ 

3503 (times_patch <= time_max).sum() / times_patch.size 

3504 

3505 if time_patch == 0 and time_patch != time_patch_max: 

3506 patch_activation[ip] = 0. 

3507 

3508 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3509 

3510 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3511 

3512 if relevant_sources.size == 0: 

3513 return disloc_est 

3514 

3515 indices_disl = num.repeat(relevant_sources * 3, 3) 

3516 indices_disl[1::3] += 1 

3517 indices_disl[2::3] += 2 

3518 

3519 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3520 stress_field=tractions[relevant_sources, :].ravel(), 

3521 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3522 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3523 epsilon=None, 

3524 **kwargs) 

3525 

3526 if self.smooth_rupture: 

3527 disloc_est *= patch_activation[:, num.newaxis] 

3528 

3529 if scale_slip and self.slip is not None: 

3530 disloc_tmax = num.zeros(npatches) 

3531 

3532 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3533 indices_disl[1::3] += 1 

3534 indices_disl[2::3] += 2 

3535 

3536 disloc_tmax[patch_mask] = num.linalg.norm( 

3537 invert_fault_dislocations_bem( 

3538 stress_field=tractions[patch_mask, :].ravel(), 

3539 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3540 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3541 epsilon=None, 

3542 **kwargs), axis=1) 

3543 

3544 disloc_tmax_max = disloc_tmax.max() 

3545 if disloc_tmax_max == 0.: 

3546 logger.warning( 

3547 'slip scaling not performed. Maximum slip is 0.') 

3548 

3549 disloc_est *= self.slip / disloc_tmax_max 

3550 

3551 return disloc_est 

3552 

3553 def get_delta_slip( 

3554 self, 

3555 store=None, 

3556 deltat=None, 

3557 delta=True, 

3558 interpolation='nearest_neighbor', 

3559 **kwargs): 

3560 ''' 

3561 Get slip change snapshots. 

3562 

3563 The time interval, within which the slip changes are computed is 

3564 determined by the sampling rate of the Green's function database or 

3565 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3566 

3567 :param store: 

3568 Green's function database (needs to cover whole region of of the 

3569 source). Its sampling interval is used as time increment for slip 

3570 difference calculation. Either ``deltat`` or ``store`` should be 

3571 given. 

3572 :type store: 

3573 optional, :py:class:`~pyrocko.gf.store.Store` 

3574 

3575 :param deltat: 

3576 Time interval for slip difference calculation [s]. Either 

3577 ``deltat`` or ``store`` should be given. 

3578 :type deltat: 

3579 optional, float 

3580 

3581 :param delta: 

3582 If ``True``, slip differences between two time steps are given. If 

3583 ``False``, cumulative slip for all time steps. 

3584 :type delta: 

3585 optional, bool 

3586 

3587 :param interpolation: 

3588 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3589 and ``'multilinear'``). 

3590 :type interpolation: 

3591 optional, str 

3592 

3593 :returns: 

3594 Displacement changes(:math:`\\Delta u_{strike}, 

3595 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3596 time; corner times, for which delta slip is computed. The order of 

3597 displacement changes array is: 

3598 

3599 .. math:: 

3600 

3601 &[[\\\\ 

3602 &[\\Delta u_{strike, patch1, t1}, 

3603 \\Delta u_{dip, patch1, t1}, 

3604 \\Delta u_{tensile, patch1, t1}],\\\\ 

3605 &[\\Delta u_{strike, patch1, t2}, 

3606 \\Delta u_{dip, patch1, t2}, 

3607 \\Delta u_{tensile, patch1, t2}]\\\\ 

3608 &], [\\\\ 

3609 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3610 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3611 

3612 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3613 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3614 ''' 

3615 if store and deltat: 

3616 raise AttributeError( 

3617 'Argument collision. ' 

3618 'Please define only the store or the deltat argument.') 

3619 

3620 if store: 

3621 deltat = store.config.deltat 

3622 

3623 if not deltat: 

3624 raise AttributeError('Please give a GF store or set deltat.') 

3625 

3626 npatches = len(self.patches) 

3627 

3628 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3629 store, interpolation=interpolation) 

3630 tmax = time_interpolator.values.max() 

3631 

3632 calc_times = num.arange(0., tmax + deltat, deltat) 

3633 calc_times[calc_times > tmax] = tmax 

3634 

3635 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3636 

3637 for itime, t in enumerate(calc_times): 

3638 disloc_est[:, itime, :] = self.get_slip( 

3639 time=t, scale_slip=False, **kwargs) 

3640 

3641 if self.slip: 

3642 disloc_tmax = num.linalg.norm( 

3643 self.get_slip(scale_slip=False, time=tmax), 

3644 axis=1) 

3645 

3646 disloc_tmax_max = disloc_tmax.max() 

3647 if disloc_tmax_max == 0.: 

3648 logger.warning( 

3649 'Slip scaling not performed. Maximum slip is 0.') 

3650 else: 

3651 disloc_est *= self.slip / disloc_tmax_max 

3652 

3653 if not delta: 

3654 return disloc_est, calc_times 

3655 

3656 # if we have only one timestep there is no gradient 

3657 if calc_times.size > 1: 

3658 disloc_init = disloc_est[:, 0, :] 

3659 disloc_est = num.diff(disloc_est, axis=1) 

3660 disloc_est = num.concatenate(( 

3661 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3662 

3663 calc_times = calc_times 

3664 

3665 return disloc_est, calc_times 

3666 

3667 def get_slip_rate(self, *args, **kwargs): 

3668 ''' 

3669 Get slip rate inverted from patches. 

3670 

3671 The time interval, within which the slip rates are computed is 

3672 determined by the sampling rate of the Green's function database or 

3673 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3674 :py:meth:`get_delta_slip`. 

3675 

3676 :returns: 

3677 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3678 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3679 for each source patch and time; corner times, for which slip rate 

3680 is computed. The order of sliprate array is: 

3681 

3682 .. math:: 

3683 

3684 &[[\\\\ 

3685 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3686 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3687 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3688 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3689 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3690 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3691 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3692 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3693 

3694 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3695 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3696 ''' 

3697 ddisloc_est, calc_times = self.get_delta_slip( 

3698 *args, delta=True, **kwargs) 

3699 

3700 dt = num.concatenate( 

3701 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3702 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3703 

3704 return slip_rate, calc_times 

3705 

3706 def get_moment_rate_patches(self, *args, **kwargs): 

3707 ''' 

3708 Get scalar seismic moment rate for each patch individually. 

3709 

3710 Additional ``*args`` and ``**kwargs`` are passed to 

3711 :py:meth:`get_slip_rate`. 

3712 

3713 :returns: 

3714 Seismic moment rate for each source patch and time; corner times, 

3715 for which patch moment rate is computed based on slip rate. The 

3716 order of the moment rate array is: 

3717 

3718 .. math:: 

3719 

3720 &[\\\\ 

3721 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3722 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3723 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3724 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3725 &[...]]\\\\ 

3726 

3727 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3728 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3729 ''' 

3730 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3731 

3732 shear_mod = self.get_patch_attribute('shearmod') 

3733 p_length = self.get_patch_attribute('length') 

3734 p_width = self.get_patch_attribute('width') 

3735 

3736 dA = p_length * p_width 

3737 

3738 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3739 

3740 return mom_rate, calc_times 

3741 

3742 def get_moment_rate(self, store, target=None, deltat=None): 

3743 ''' 

3744 Get seismic source moment rate for the total source (STF). 

3745 

3746 :param store: 

3747 Green's function database (needs to cover whole region of of the 

3748 source). Its ``deltat`` [s] is used as time increment for slip 

3749 difference calculation. Either ``deltat`` or ``store`` should be 

3750 given. 

3751 :type store: 

3752 :py:class:`~pyrocko.gf.store.Store` 

3753 

3754 :param target: 

3755 Target information, needed for interpolation method. 

3756 :type target: 

3757 optional, :py:class:`~pyrocko.gf.targets.Target` 

3758 

3759 :param deltat: 

3760 Time increment for slip difference calculation [s]. If not given 

3761 ``store.deltat`` is used. 

3762 :type deltat: 

3763 optional, float 

3764 

3765 :return: 

3766 Seismic moment rate [Nm/s] for each time; corner times, for which 

3767 moment rate is computed. The order of the moment rate array is: 

3768 

3769 .. math:: 

3770 

3771 &[\\\\ 

3772 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3773 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3774 &...]\\\\ 

3775 

3776 :rtype: 

3777 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3778 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3779 ''' 

3780 if not deltat: 

3781 deltat = store.config.deltat 

3782 return self.discretize_basesource( 

3783 store, target=target).get_moment_rate(deltat) 

3784 

3785 def get_moment(self, *args, **kwargs): 

3786 ''' 

3787 Get seismic cumulative moment. 

3788 

3789 Additional ``*args`` and ``**kwargs`` are passed to 

3790 :py:meth:`get_magnitude`. 

3791 

3792 :returns: 

3793 Cumulative seismic moment in [Nm]. 

3794 :rtype: 

3795 float 

3796 ''' 

3797 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3798 *args, **kwargs))) 

3799 

3800 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3801 ''' 

3802 Rescale source slip based on given target magnitude or seismic moment. 

3803 

3804 Rescale the maximum source slip to fit the source moment magnitude or 

3805 seismic moment to the given target values. Either ``magnitude`` or 

3806 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3807 :py:meth:`get_moment`. 

3808 

3809 :param magnitude: 

3810 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3811 [Hanks and Kanamori, 1979] 

3812 :type magnitude: 

3813 optional, float 

3814 

3815 :param moment: 

3816 Target seismic moment :math:`M_0` [Nm]. 

3817 :type moment: 

3818 optional, float 

3819 ''' 

3820 if self.slip is None: 

3821 self.slip = 1. 

3822 logger.warning('No slip found for rescaling. ' 

3823 'An initial slip of 1 m is assumed.') 

3824 

3825 if magnitude is None and moment is None: 

3826 raise ValueError( 

3827 'Either target magnitude or moment need to be given.') 

3828 

3829 moment_init = self.get_moment(**kwargs) 

3830 

3831 if magnitude is not None: 

3832 moment = pmt.magnitude_to_moment(magnitude) 

3833 

3834 self.slip *= moment / moment_init 

3835 

3836 def get_centroid(self, store, *args, **kwargs): 

3837 ''' 

3838 Centroid of the pseudo dynamic rupture model. 

3839 

3840 The centroid location and time are derived from the locations and times 

3841 of the individual patches weighted with their moment contribution. 

3842 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3843 

3844 :param store: 

3845 Green's function database (needs to cover whole region of of the 

3846 source). Its ``deltat`` [s] is used as time increment for slip 

3847 difference calculation. Either ``deltat`` or ``store`` should be 

3848 given. 

3849 :type store: 

3850 :py:class:`~pyrocko.gf.store.Store` 

3851 

3852 :returns: 

3853 The centroid location and associated moment tensor. 

3854 :rtype: 

3855 :py:class:`pyrocko.model.Event` 

3856 ''' 

3857 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3858 t_max = time.values.max() 

3859 

3860 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3861 

3862 moment = num.sum(moment_rate * times, axis=1) 

3863 weights = moment / moment.sum() 

3864 

3865 norths = self.get_patch_attribute('north_shift') 

3866 easts = self.get_patch_attribute('east_shift') 

3867 depths = self.get_patch_attribute('depth') 

3868 

3869 centroid_n = num.sum(weights * norths) 

3870 centroid_e = num.sum(weights * easts) 

3871 centroid_d = num.sum(weights * depths) 

3872 

3873 centroid_lat, centroid_lon = ne_to_latlon( 

3874 self.lat, self.lon, centroid_n, centroid_e) 

3875 

3876 moment_rate_, times = self.get_moment_rate(store) 

3877 delta_times = num.concatenate(( 

3878 [times[1] - times[0]], 

3879 num.diff(times))) 

3880 moment_src = delta_times * moment_rate 

3881 

3882 centroid_t = num.sum( 

3883 moment_src / num.sum(moment_src) * times) + self.time 

3884 

3885 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3886 

3887 return model.Event( 

3888 lat=centroid_lat, 

3889 lon=centroid_lon, 

3890 depth=centroid_d, 

3891 time=centroid_t, 

3892 moment_tensor=mt, 

3893 magnitude=mt.magnitude, 

3894 duration=t_max) 

3895 

3896 

3897class DoubleDCSource(SourceWithMagnitude): 

3898 ''' 

3899 Two double-couple point sources separated in space and time. 

3900 Moment share between the sub-sources is controlled by the 

3901 parameter mix. 

3902 The position of the subsources is dependent on the moment 

3903 distribution between the two sources. Depth, east and north 

3904 shift are given for the centroid between the two double-couples. 

3905 The subsources will positioned according to their moment shares 

3906 around this centroid position. 

3907 This is done according to their delta parameters, which are 

3908 therefore in relation to that centroid. 

3909 Note that depth of the subsources therefore can be 

3910 depth+/-delta_depth. For shallow earthquakes therefore 

3911 the depth has to be chosen deeper to avoid sampling 

3912 above surface. 

3913 ''' 

3914 

3915 strike1 = Float.T( 

3916 default=0.0, 

3917 help='strike direction in [deg], measured clockwise from north') 

3918 

3919 dip1 = Float.T( 

3920 default=90.0, 

3921 help='dip angle in [deg], measured downward from horizontal') 

3922 

3923 azimuth = Float.T( 

3924 default=0.0, 

3925 help='azimuth to second double-couple [deg], ' 

3926 'measured at first, clockwise from north') 

3927 

3928 rake1 = Float.T( 

3929 default=0.0, 

3930 help='rake angle in [deg], ' 

3931 'measured counter-clockwise from right-horizontal ' 

3932 'in on-plane view') 

3933 

3934 strike2 = Float.T( 

3935 default=0.0, 

3936 help='strike direction in [deg], measured clockwise from north') 

3937 

3938 dip2 = Float.T( 

3939 default=90.0, 

3940 help='dip angle in [deg], measured downward from horizontal') 

3941 

3942 rake2 = Float.T( 

3943 default=0.0, 

3944 help='rake angle in [deg], ' 

3945 'measured counter-clockwise from right-horizontal ' 

3946 'in on-plane view') 

3947 

3948 delta_time = Float.T( 

3949 default=0.0, 

3950 help='separation of double-couples in time (t2-t1) [s]') 

3951 

3952 delta_depth = Float.T( 

3953 default=0.0, 

3954 help='difference in depth (z2-z1) [m]') 

3955 

3956 distance = Float.T( 

3957 default=0.0, 

3958 help='distance between the two double-couples [m]') 

3959 

3960 mix = Float.T( 

3961 default=0.5, 

3962 help='how to distribute the moment to the two doublecouples ' 

3963 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

3964 

3965 stf1 = STF.T( 

3966 optional=True, 

3967 help='Source time function of subsource 1 ' 

3968 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3969 

3970 stf2 = STF.T( 

3971 optional=True, 

3972 help='Source time function of subsource 2 ' 

3973 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3974 

3975 discretized_source_class = meta.DiscretizedMTSource 

3976 

3977 def base_key(self): 

3978 return ( 

3979 self.time, self.depth, self.lat, self.north_shift, 

3980 self.lon, self.east_shift, type(self).__name__) + \ 

3981 self.effective_stf1_pre().base_key() + \ 

3982 self.effective_stf2_pre().base_key() + ( 

3983 self.strike1, self.dip1, self.rake1, 

3984 self.strike2, self.dip2, self.rake2, 

3985 self.delta_time, self.delta_depth, 

3986 self.azimuth, self.distance, self.mix) 

3987 

3988 def get_factor(self): 

3989 return self.moment 

3990 

3991 def effective_stf1_pre(self): 

3992 return self.stf1 or self.stf or g_unit_pulse 

3993 

3994 def effective_stf2_pre(self): 

3995 return self.stf2 or self.stf or g_unit_pulse 

3996 

3997 def effective_stf_post(self): 

3998 return g_unit_pulse 

3999 

4000 def split(self): 

4001 a1 = 1.0 - self.mix 

4002 a2 = self.mix 

4003 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4004 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4005 

4006 dc1 = DCSource( 

4007 lat=self.lat, 

4008 lon=self.lon, 

4009 time=self.time - self.delta_time * a2, 

4010 north_shift=self.north_shift - delta_north * a2, 

4011 east_shift=self.east_shift - delta_east * a2, 

4012 depth=self.depth - self.delta_depth * a2, 

4013 moment=self.moment * a1, 

4014 strike=self.strike1, 

4015 dip=self.dip1, 

4016 rake=self.rake1, 

4017 stf=self.stf1 or self.stf) 

4018 

4019 dc2 = DCSource( 

4020 lat=self.lat, 

4021 lon=self.lon, 

4022 time=self.time + self.delta_time * a1, 

4023 north_shift=self.north_shift + delta_north * a1, 

4024 east_shift=self.east_shift + delta_east * a1, 

4025 depth=self.depth + self.delta_depth * a1, 

4026 moment=self.moment * a2, 

4027 strike=self.strike2, 

4028 dip=self.dip2, 

4029 rake=self.rake2, 

4030 stf=self.stf2 or self.stf) 

4031 

4032 return [dc1, dc2] 

4033 

4034 def discretize_basesource(self, store, target=None): 

4035 a1 = 1.0 - self.mix 

4036 a2 = self.mix 

4037 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4038 rake=self.rake1, scalar_moment=a1) 

4039 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4040 rake=self.rake2, scalar_moment=a2) 

4041 

4042 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4043 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4044 

4045 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4046 store.config.deltat, self.time - self.delta_time * a2) 

4047 

4048 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4049 store.config.deltat, self.time + self.delta_time * a1) 

4050 

4051 nt1 = times1.size 

4052 nt2 = times2.size 

4053 

4054 ds = meta.DiscretizedMTSource( 

4055 lat=self.lat, 

4056 lon=self.lon, 

4057 times=num.concatenate((times1, times2)), 

4058 north_shifts=num.concatenate(( 

4059 num.repeat(self.north_shift - delta_north * a2, nt1), 

4060 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4061 east_shifts=num.concatenate(( 

4062 num.repeat(self.east_shift - delta_east * a2, nt1), 

4063 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4064 depths=num.concatenate(( 

4065 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4066 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4067 m6s=num.vstack(( 

4068 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4069 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4070 

4071 return ds 

4072 

4073 def pyrocko_moment_tensor(self, store=None, target=None): 

4074 a1 = 1.0 - self.mix 

4075 a2 = self.mix 

4076 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4077 rake=self.rake1, 

4078 scalar_moment=a1 * self.moment) 

4079 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4080 rake=self.rake2, 

4081 scalar_moment=a2 * self.moment) 

4082 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4083 

4084 def pyrocko_event(self, store=None, target=None, **kwargs): 

4085 return SourceWithMagnitude.pyrocko_event( 

4086 self, store, target, 

4087 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4088 **kwargs) 

4089 

4090 @classmethod 

4091 def from_pyrocko_event(cls, ev, **kwargs): 

4092 d = {} 

4093 mt = ev.moment_tensor 

4094 if mt: 

4095 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4096 d.update( 

4097 strike1=float(strike), 

4098 dip1=float(dip), 

4099 rake1=float(rake), 

4100 strike2=float(strike), 

4101 dip2=float(dip), 

4102 rake2=float(rake), 

4103 mix=0.0, 

4104 magnitude=float(mt.moment_magnitude())) 

4105 

4106 d.update(kwargs) 

4107 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4108 source.stf1 = source.stf 

4109 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4110 source.stf = None 

4111 return source 

4112 

4113 

4114class RingfaultSource(SourceWithMagnitude): 

4115 ''' 

4116 A ring fault with vertical doublecouples. 

4117 ''' 

4118 

4119 diameter = Float.T( 

4120 default=1.0, 

4121 help='diameter of the ring in [m]') 

4122 

4123 sign = Float.T( 

4124 default=1.0, 

4125 help='inside of the ring moves up (+1) or down (-1)') 

4126 

4127 strike = Float.T( 

4128 default=0.0, 

4129 help='strike direction of the ring plane, clockwise from north,' 

4130 ' in [deg]') 

4131 

4132 dip = Float.T( 

4133 default=0.0, 

4134 help='dip angle of the ring plane from horizontal in [deg]') 

4135 

4136 npointsources = Int.T( 

4137 default=360, 

4138 help='number of point sources to use') 

4139 

4140 discretized_source_class = meta.DiscretizedMTSource 

4141 

4142 def base_key(self): 

4143 return Source.base_key(self) + ( 

4144 self.strike, self.dip, self.diameter, self.npointsources) 

4145 

4146 def get_factor(self): 

4147 return self.sign * self.moment 

4148 

4149 def discretize_basesource(self, store=None, target=None): 

4150 n = self.npointsources 

4151 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4152 

4153 points = num.zeros((n, 3)) 

4154 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4155 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4156 

4157 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4158 points = num.dot(rotmat.T, points.T).T # !!! ? 

4159 

4160 points[:, 0] += self.north_shift 

4161 points[:, 1] += self.east_shift 

4162 points[:, 2] += self.depth 

4163 

4164 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4165 scalar_moment=1.0 / n).m()) 

4166 

4167 rotmats = num.transpose( 

4168 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4169 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4170 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4171 

4172 ms = num.zeros((n, 3, 3)) 

4173 for i in range(n): 

4174 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4175 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4176 

4177 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4178 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4179 

4180 times, amplitudes = self.effective_stf_pre().discretize_t( 

4181 store.config.deltat, self.time) 

4182 

4183 nt = times.size 

4184 

4185 return meta.DiscretizedMTSource( 

4186 times=num.tile(times, n), 

4187 lat=self.lat, 

4188 lon=self.lon, 

4189 north_shifts=num.repeat(points[:, 0], nt), 

4190 east_shifts=num.repeat(points[:, 1], nt), 

4191 depths=num.repeat(points[:, 2], nt), 

4192 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4193 amplitudes, n)[:, num.newaxis]) 

4194 

4195 

4196class CombiSource(Source): 

4197 ''' 

4198 Composite source model. 

4199 ''' 

4200 

4201 discretized_source_class = meta.DiscretizedMTSource 

4202 

4203 subsources = List.T(Source.T()) 

4204 

4205 def __init__(self, subsources=[], **kwargs): 

4206 if not subsources: 

4207 raise BadRequest( 

4208 'Need at least one sub-source to create a CombiSource object.') 

4209 

4210 lats = num.array( 

4211 [subsource.lat for subsource in subsources], dtype=float) 

4212 lons = num.array( 

4213 [subsource.lon for subsource in subsources], dtype=float) 

4214 

4215 lat, lon = lats[0], lons[0] 

4216 if not num.all(lats == lat) and num.all(lons == lon): 

4217 subsources = [s.clone() for s in subsources] 

4218 for subsource in subsources[1:]: 

4219 subsource.set_origin(lat, lon) 

4220 

4221 depth = float(num.mean([p.depth for p in subsources])) 

4222 time = float(num.mean([p.time for p in subsources])) 

4223 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4224 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4225 kwargs.update( 

4226 time=time, 

4227 lat=float(lat), 

4228 lon=float(lon), 

4229 north_shift=north_shift, 

4230 east_shift=east_shift, 

4231 depth=depth) 

4232 

4233 Source.__init__(self, subsources=subsources, **kwargs) 

4234 

4235 def get_factor(self): 

4236 return 1.0 

4237 

4238 def discretize_basesource(self, store, target=None): 

4239 dsources = [] 

4240 for sf in self.subsources: 

4241 ds = sf.discretize_basesource(store, target) 

4242 ds.m6s *= sf.get_factor() 

4243 dsources.append(ds) 

4244 

4245 return meta.DiscretizedMTSource.combine(dsources) 

4246 

4247 

4248class SFSource(Source): 

4249 ''' 

4250 A single force point source. 

4251 

4252 Supported GF schemes: `'elastic5'`. 

4253 ''' 

4254 

4255 discretized_source_class = meta.DiscretizedSFSource 

4256 

4257 fn = Float.T( 

4258 default=0., 

4259 help='northward component of single force [N]') 

4260 

4261 fe = Float.T( 

4262 default=0., 

4263 help='eastward component of single force [N]') 

4264 

4265 fd = Float.T( 

4266 default=0., 

4267 help='downward component of single force [N]') 

4268 

4269 def __init__(self, **kwargs): 

4270 Source.__init__(self, **kwargs) 

4271 

4272 def base_key(self): 

4273 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4274 

4275 def get_factor(self): 

4276 return 1.0 

4277 

4278 def discretize_basesource(self, store, target=None): 

4279 times, amplitudes = self.effective_stf_pre().discretize_t( 

4280 store.config.deltat, self.time) 

4281 forces = amplitudes[:, num.newaxis] * num.array( 

4282 [[self.fn, self.fe, self.fd]], dtype=float) 

4283 

4284 return meta.DiscretizedSFSource(forces=forces, 

4285 **self._dparams_base_repeated(times)) 

4286 

4287 def pyrocko_event(self, store=None, target=None, **kwargs): 

4288 return Source.pyrocko_event( 

4289 self, store, target, 

4290 **kwargs) 

4291 

4292 @classmethod 

4293 def from_pyrocko_event(cls, ev, **kwargs): 

4294 d = {} 

4295 d.update(kwargs) 

4296 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4297 

4298 

4299class PorePressurePointSource(Source): 

4300 ''' 

4301 Excess pore pressure point source. 

4302 

4303 For poro-elastic initial value problem where an excess pore pressure is 

4304 brought into a small source volume. 

4305 ''' 

4306 

4307 discretized_source_class = meta.DiscretizedPorePressureSource 

4308 

4309 pp = Float.T( 

4310 default=1.0, 

4311 help='initial excess pore pressure in [Pa]') 

4312 

4313 def base_key(self): 

4314 return Source.base_key(self) 

4315 

4316 def get_factor(self): 

4317 return self.pp 

4318 

4319 def discretize_basesource(self, store, target=None): 

4320 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4321 **self._dparams_base()) 

4322 

4323 

4324class PorePressureLineSource(Source): 

4325 ''' 

4326 Excess pore pressure line source. 

4327 

4328 The line source is centered at (north_shift, east_shift, depth). 

4329 ''' 

4330 

4331 discretized_source_class = meta.DiscretizedPorePressureSource 

4332 

4333 pp = Float.T( 

4334 default=1.0, 

4335 help='initial excess pore pressure in [Pa]') 

4336 

4337 length = Float.T( 

4338 default=0.0, 

4339 help='length of the line source [m]') 

4340 

4341 azimuth = Float.T( 

4342 default=0.0, 

4343 help='azimuth direction, clockwise from north [deg]') 

4344 

4345 dip = Float.T( 

4346 default=90., 

4347 help='dip direction, downward from horizontal [deg]') 

4348 

4349 def base_key(self): 

4350 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4351 

4352 def get_factor(self): 

4353 return self.pp 

4354 

4355 def discretize_basesource(self, store, target=None): 

4356 

4357 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4358 

4359 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4360 

4361 sa = math.sin(self.azimuth * d2r) 

4362 ca = math.cos(self.azimuth * d2r) 

4363 sd = math.sin(self.dip * d2r) 

4364 cd = math.cos(self.dip * d2r) 

4365 

4366 points = num.zeros((n, 3)) 

4367 points[:, 0] = self.north_shift + a * ca * cd 

4368 points[:, 1] = self.east_shift + a * sa * cd 

4369 points[:, 2] = self.depth + a * sd 

4370 

4371 return meta.DiscretizedPorePressureSource( 

4372 times=util.num_full(n, self.time), 

4373 lat=self.lat, 

4374 lon=self.lon, 

4375 north_shifts=points[:, 0], 

4376 east_shifts=points[:, 1], 

4377 depths=points[:, 2], 

4378 pp=num.ones(n) / n) 

4379 

4380 

4381class Request(Object): 

4382 ''' 

4383 Synthetic seismogram computation request. 

4384 

4385 :: 

4386 

4387 Request(**kwargs) 

4388 Request(sources, targets, **kwargs) 

4389 ''' 

4390 

4391 sources = List.T( 

4392 Source.T(), 

4393 help='list of sources for which to produce synthetics.') 

4394 

4395 targets = List.T( 

4396 Target.T(), 

4397 help='list of targets for which to produce synthetics.') 

4398 

4399 @classmethod 

4400 def args2kwargs(cls, args): 

4401 if len(args) not in (0, 2, 3): 

4402 raise BadRequest('Invalid arguments.') 

4403 

4404 if len(args) == 2: 

4405 return dict(sources=args[0], targets=args[1]) 

4406 else: 

4407 return {} 

4408 

4409 def __init__(self, *args, **kwargs): 

4410 kwargs.update(self.args2kwargs(args)) 

4411 sources = kwargs.pop('sources', []) 

4412 targets = kwargs.pop('targets', []) 

4413 

4414 if isinstance(sources, Source): 

4415 sources = [sources] 

4416 

4417 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4418 targets = [targets] 

4419 

4420 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4421 

4422 @property 

4423 def targets_dynamic(self): 

4424 return [t for t in self.targets if isinstance(t, Target)] 

4425 

4426 @property 

4427 def targets_static(self): 

4428 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4429 

4430 @property 

4431 def has_dynamic(self): 

4432 return True if len(self.targets_dynamic) > 0 else False 

4433 

4434 @property 

4435 def has_statics(self): 

4436 return True if len(self.targets_static) > 0 else False 

4437 

4438 def subsources_map(self): 

4439 m = defaultdict(list) 

4440 for source in self.sources: 

4441 m[source.base_key()].append(source) 

4442 

4443 return m 

4444 

4445 def subtargets_map(self): 

4446 m = defaultdict(list) 

4447 for target in self.targets: 

4448 m[target.base_key()].append(target) 

4449 

4450 return m 

4451 

4452 def subrequest_map(self): 

4453 ms = self.subsources_map() 

4454 mt = self.subtargets_map() 

4455 m = {} 

4456 for (ks, ls) in ms.items(): 

4457 for (kt, lt) in mt.items(): 

4458 m[ks, kt] = (ls, lt) 

4459 

4460 return m 

4461 

4462 

4463class ProcessingStats(Object): 

4464 t_perc_get_store_and_receiver = Float.T(default=0.) 

4465 t_perc_discretize_source = Float.T(default=0.) 

4466 t_perc_make_base_seismogram = Float.T(default=0.) 

4467 t_perc_make_same_span = Float.T(default=0.) 

4468 t_perc_post_process = Float.T(default=0.) 

4469 t_perc_optimize = Float.T(default=0.) 

4470 t_perc_stack = Float.T(default=0.) 

4471 t_perc_static_get_store = Float.T(default=0.) 

4472 t_perc_static_discretize_basesource = Float.T(default=0.) 

4473 t_perc_static_sum_statics = Float.T(default=0.) 

4474 t_perc_static_post_process = Float.T(default=0.) 

4475 t_wallclock = Float.T(default=0.) 

4476 t_cpu = Float.T(default=0.) 

4477 n_read_blocks = Int.T(default=0) 

4478 n_results = Int.T(default=0) 

4479 n_subrequests = Int.T(default=0) 

4480 n_stores = Int.T(default=0) 

4481 n_records_stacked = Int.T(default=0) 

4482 

4483 

4484class Response(Object): 

4485 ''' 

4486 Resonse object to a synthetic seismogram computation request. 

4487 ''' 

4488 

4489 request = Request.T() 

4490 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4491 stats = ProcessingStats.T() 

4492 

4493 def pyrocko_traces(self): 

4494 ''' 

4495 Return a list of requested 

4496 :class:`~pyrocko.trace.Trace` instances. 

4497 ''' 

4498 

4499 traces = [] 

4500 for results in self.results_list: 

4501 for result in results: 

4502 if not isinstance(result, meta.Result): 

4503 continue 

4504 traces.append(result.trace.pyrocko_trace()) 

4505 

4506 return traces 

4507 

4508 def kite_scenes(self): 

4509 ''' 

4510 Return a list of requested 

4511 :class:`~kite.scenes` instances. 

4512 ''' 

4513 kite_scenes = [] 

4514 for results in self.results_list: 

4515 for result in results: 

4516 if isinstance(result, meta.KiteSceneResult): 

4517 sc = result.get_scene() 

4518 kite_scenes.append(sc) 

4519 

4520 return kite_scenes 

4521 

4522 def static_results(self): 

4523 ''' 

4524 Return a list of requested 

4525 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4526 ''' 

4527 statics = [] 

4528 for results in self.results_list: 

4529 for result in results: 

4530 if not isinstance(result, meta.StaticResult): 

4531 continue 

4532 statics.append(result) 

4533 

4534 return statics 

4535 

4536 def iter_results(self, get='pyrocko_traces'): 

4537 ''' 

4538 Generator function to iterate over results of request. 

4539 

4540 Yields associated :py:class:`Source`, 

4541 :class:`~pyrocko.gf.targets.Target`, 

4542 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4543 ''' 

4544 

4545 for isource, source in enumerate(self.request.sources): 

4546 for itarget, target in enumerate(self.request.targets): 

4547 result = self.results_list[isource][itarget] 

4548 if get == 'pyrocko_traces': 

4549 yield source, target, result.trace.pyrocko_trace() 

4550 elif get == 'results': 

4551 yield source, target, result 

4552 

4553 def snuffle(self, **kwargs): 

4554 ''' 

4555 Open *snuffler* with requested traces. 

4556 ''' 

4557 

4558 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4559 

4560 

4561class Engine(Object): 

4562 ''' 

4563 Base class for synthetic seismogram calculators. 

4564 ''' 

4565 

4566 def get_store_ids(self): 

4567 ''' 

4568 Get list of available GF store IDs 

4569 ''' 

4570 

4571 return [] 

4572 

4573 

4574class Rule(object): 

4575 pass 

4576 

4577 

4578class VectorRule(Rule): 

4579 

4580 def __init__(self, quantity, differentiate=0, integrate=0): 

4581 self.components = [quantity + '.' + c for c in 'ned'] 

4582 self.differentiate = differentiate 

4583 self.integrate = integrate 

4584 

4585 def required_components(self, target): 

4586 n, e, d = self.components 

4587 sa, ca, sd, cd = target.get_sin_cos_factors() 

4588 

4589 comps = [] 

4590 if nonzero(ca * cd): 

4591 comps.append(n) 

4592 

4593 if nonzero(sa * cd): 

4594 comps.append(e) 

4595 

4596 if nonzero(sd): 

4597 comps.append(d) 

4598 

4599 return tuple(comps) 

4600 

4601 def apply_(self, target, base_seismogram): 

4602 n, e, d = self.components 

4603 sa, ca, sd, cd = target.get_sin_cos_factors() 

4604 

4605 if nonzero(ca * cd): 

4606 data = base_seismogram[n].data * (ca * cd) 

4607 deltat = base_seismogram[n].deltat 

4608 else: 

4609 data = 0.0 

4610 

4611 if nonzero(sa * cd): 

4612 data = data + base_seismogram[e].data * (sa * cd) 

4613 deltat = base_seismogram[e].deltat 

4614 

4615 if nonzero(sd): 

4616 data = data + base_seismogram[d].data * sd 

4617 deltat = base_seismogram[d].deltat 

4618 

4619 if self.differentiate: 

4620 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4621 

4622 if self.integrate: 

4623 raise NotImplementedError('Integration is not implemented yet.') 

4624 

4625 return data 

4626 

4627 

4628class HorizontalVectorRule(Rule): 

4629 

4630 def __init__(self, quantity, differentiate=0, integrate=0): 

4631 self.components = [quantity + '.' + c for c in 'ne'] 

4632 self.differentiate = differentiate 

4633 self.integrate = integrate 

4634 

4635 def required_components(self, target): 

4636 n, e = self.components 

4637 sa, ca, _, _ = target.get_sin_cos_factors() 

4638 

4639 comps = [] 

4640 if nonzero(ca): 

4641 comps.append(n) 

4642 

4643 if nonzero(sa): 

4644 comps.append(e) 

4645 

4646 return tuple(comps) 

4647 

4648 def apply_(self, target, base_seismogram): 

4649 n, e = self.components 

4650 sa, ca, _, _ = target.get_sin_cos_factors() 

4651 

4652 if nonzero(ca): 

4653 data = base_seismogram[n].data * ca 

4654 else: 

4655 data = 0.0 

4656 

4657 if nonzero(sa): 

4658 data = data + base_seismogram[e].data * sa 

4659 

4660 if self.differentiate: 

4661 deltat = base_seismogram[e].deltat 

4662 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4663 

4664 if self.integrate: 

4665 raise NotImplementedError('Integration is not implemented yet.') 

4666 

4667 return data 

4668 

4669 

4670class ScalarRule(Rule): 

4671 

4672 def __init__(self, quantity, differentiate=0): 

4673 self.c = quantity 

4674 

4675 def required_components(self, target): 

4676 return (self.c, ) 

4677 

4678 def apply_(self, target, base_seismogram): 

4679 data = base_seismogram[self.c].data.copy() 

4680 deltat = base_seismogram[self.c].deltat 

4681 if self.differentiate: 

4682 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4683 

4684 return data 

4685 

4686 

4687class StaticDisplacement(Rule): 

4688 

4689 def required_components(self, target): 

4690 return tuple(['displacement.%s' % c for c in list('ned')]) 

4691 

4692 def apply_(self, target, base_statics): 

4693 if isinstance(target, SatelliteTarget): 

4694 los_fac = target.get_los_factors() 

4695 base_statics['displacement.los'] =\ 

4696 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4697 los_fac[:, 1] * base_statics['displacement.e'] + 

4698 los_fac[:, 2] * base_statics['displacement.n']) 

4699 return base_statics 

4700 

4701 

4702channel_rules = { 

4703 'displacement': [VectorRule('displacement')], 

4704 'rotation': [VectorRule('rotation')], 

4705 'velocity': [ 

4706 VectorRule('velocity'), 

4707 VectorRule('displacement', differentiate=1)], 

4708 'acceleration': [ 

4709 VectorRule('acceleration'), 

4710 VectorRule('velocity', differentiate=1), 

4711 VectorRule('displacement', differentiate=2)], 

4712 'pore_pressure': [ScalarRule('pore_pressure')], 

4713 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4714 'darcy_velocity': [VectorRule('darcy_velocity')], 

4715} 

4716 

4717static_rules = { 

4718 'displacement': [StaticDisplacement()] 

4719} 

4720 

4721 

4722class OutOfBoundsContext(Object): 

4723 source = Source.T() 

4724 target = Target.T() 

4725 distance = Float.T() 

4726 components = List.T(String.T()) 

4727 

4728 

4729def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4730 dsource_cache = {} 

4731 tcounters = list(range(6)) 

4732 

4733 store_ids = set() 

4734 sources = set() 

4735 targets = set() 

4736 

4737 for itarget, target in enumerate(ptargets): 

4738 target._id = itarget 

4739 

4740 for w in work: 

4741 _, _, isources, itargets = w 

4742 

4743 sources.update([psources[isource] for isource in isources]) 

4744 targets.update([ptargets[itarget] for itarget in itargets]) 

4745 

4746 store_ids = set([t.store_id for t in targets]) 

4747 

4748 for isource, source in enumerate(psources): 

4749 

4750 components = set() 

4751 for itarget, target in enumerate(targets): 

4752 rule = engine.get_rule(source, target) 

4753 components.update(rule.required_components(target)) 

4754 

4755 for store_id in store_ids: 

4756 store_targets = [t for t in targets if t.store_id == store_id] 

4757 

4758 sample_rates = set([t.sample_rate for t in store_targets]) 

4759 interpolations = set([t.interpolation for t in store_targets]) 

4760 

4761 base_seismograms = [] 

4762 store_targets_out = [] 

4763 

4764 for samp_rate in sample_rates: 

4765 for interp in interpolations: 

4766 engine_targets = [ 

4767 t for t in store_targets if t.sample_rate == samp_rate 

4768 and t.interpolation == interp] 

4769 

4770 if not engine_targets: 

4771 continue 

4772 

4773 store_targets_out += engine_targets 

4774 

4775 base_seismograms += engine.base_seismograms( 

4776 source, 

4777 engine_targets, 

4778 components, 

4779 dsource_cache, 

4780 nthreads) 

4781 

4782 for iseis, seismogram in enumerate(base_seismograms): 

4783 for tr in seismogram.values(): 

4784 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4785 e = SeismosizerError( 

4786 'Seismosizer failed with return code %i\n%s' % ( 

4787 tr.err, str( 

4788 OutOfBoundsContext( 

4789 source=source, 

4790 target=store_targets[iseis], 

4791 distance=source.distance_to( 

4792 store_targets[iseis]), 

4793 components=components)))) 

4794 raise e 

4795 

4796 for seismogram, target in zip(base_seismograms, store_targets_out): 

4797 

4798 try: 

4799 result = engine._post_process_dynamic( 

4800 seismogram, source, target) 

4801 except SeismosizerError as e: 

4802 result = e 

4803 

4804 yield (isource, target._id, result), tcounters 

4805 

4806 

4807def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4808 dsource_cache = {} 

4809 

4810 for w in work: 

4811 _, _, isources, itargets = w 

4812 

4813 sources = [psources[isource] for isource in isources] 

4814 targets = [ptargets[itarget] for itarget in itargets] 

4815 

4816 components = set() 

4817 for target in targets: 

4818 rule = engine.get_rule(sources[0], target) 

4819 components.update(rule.required_components(target)) 

4820 

4821 for isource, source in zip(isources, sources): 

4822 for itarget, target in zip(itargets, targets): 

4823 

4824 try: 

4825 base_seismogram, tcounters = engine.base_seismogram( 

4826 source, target, components, dsource_cache, nthreads) 

4827 except meta.OutOfBounds as e: 

4828 e.context = OutOfBoundsContext( 

4829 source=sources[0], 

4830 target=targets[0], 

4831 distance=sources[0].distance_to(targets[0]), 

4832 components=components) 

4833 raise 

4834 

4835 n_records_stacked = 0 

4836 t_optimize = 0.0 

4837 t_stack = 0.0 

4838 

4839 for _, tr in base_seismogram.items(): 

4840 n_records_stacked += tr.n_records_stacked 

4841 t_optimize += tr.t_optimize 

4842 t_stack += tr.t_stack 

4843 

4844 try: 

4845 result = engine._post_process_dynamic( 

4846 base_seismogram, source, target) 

4847 result.n_records_stacked = n_records_stacked 

4848 result.n_shared_stacking = len(sources) *\ 

4849 len(targets) 

4850 result.t_optimize = t_optimize 

4851 result.t_stack = t_stack 

4852 except SeismosizerError as e: 

4853 result = e 

4854 

4855 tcounters.append(xtime()) 

4856 yield (isource, itarget, result), tcounters 

4857 

4858 

4859def process_static(work, psources, ptargets, engine, nthreads=0): 

4860 for w in work: 

4861 _, _, isources, itargets = w 

4862 

4863 sources = [psources[isource] for isource in isources] 

4864 targets = [ptargets[itarget] for itarget in itargets] 

4865 

4866 for isource, source in zip(isources, sources): 

4867 for itarget, target in zip(itargets, targets): 

4868 components = engine.get_rule(source, target)\ 

4869 .required_components(target) 

4870 

4871 try: 

4872 base_statics, tcounters = engine.base_statics( 

4873 source, target, components, nthreads) 

4874 except meta.OutOfBounds as e: 

4875 e.context = OutOfBoundsContext( 

4876 source=sources[0], 

4877 target=targets[0], 

4878 distance=float('nan'), 

4879 components=components) 

4880 raise 

4881 result = engine._post_process_statics( 

4882 base_statics, source, target) 

4883 tcounters.append(xtime()) 

4884 

4885 yield (isource, itarget, result), tcounters 

4886 

4887 

4888class LocalEngine(Engine): 

4889 ''' 

4890 Offline synthetic seismogram calculator. 

4891 

4892 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4893 :py:attr:`store_dirs` with paths set in environment variables 

4894 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4895 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4896 :py:attr:`store_dirs` with paths set in the user's config file. 

4897 

4898 The config file can be found at :file:`~/.pyrocko/config.pf` 

4899 

4900 .. code-block :: python 

4901 

4902 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4903 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4904 ''' 

4905 

4906 store_superdirs = List.T( 

4907 String.T(), 

4908 help='directories which are searched for Green\'s function stores') 

4909 

4910 store_dirs = List.T( 

4911 String.T(), 

4912 help='additional individual Green\'s function store directories') 

4913 

4914 default_store_id = String.T( 

4915 optional=True, 

4916 help='default store ID to be used when a request does not provide ' 

4917 'one') 

4918 

4919 def __init__(self, **kwargs): 

4920 use_env = kwargs.pop('use_env', False) 

4921 use_config = kwargs.pop('use_config', False) 

4922 Engine.__init__(self, **kwargs) 

4923 if use_env: 

4924 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4925 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4926 if env_store_superdirs: 

4927 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4928 

4929 if env_store_dirs: 

4930 self.store_dirs.extend(env_store_dirs.split(':')) 

4931 

4932 if use_config: 

4933 c = config.config() 

4934 self.store_superdirs.extend(c.gf_store_superdirs) 

4935 self.store_dirs.extend(c.gf_store_dirs) 

4936 

4937 self._check_store_dirs_type() 

4938 self._id_to_store_dir = {} 

4939 self._open_stores = {} 

4940 self._effective_default_store_id = None 

4941 

4942 def _check_store_dirs_type(self): 

4943 for sdir in ['store_dirs', 'store_superdirs']: 

4944 if not isinstance(self.__getattribute__(sdir), list): 

4945 raise TypeError("{} of {} is not of type list".format( 

4946 sdir, self.__class__.__name__)) 

4947 

4948 def _get_store_id(self, store_dir): 

4949 store_ = store.Store(store_dir) 

4950 store_id = store_.config.id 

4951 store_.close() 

4952 return store_id 

4953 

4954 def _looks_like_store_dir(self, store_dir): 

4955 return os.path.isdir(store_dir) and \ 

4956 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4957 ('index', 'traces', 'config')) 

4958 

4959 def iter_store_dirs(self): 

4960 store_dirs = set() 

4961 for d in self.store_superdirs: 

4962 if not os.path.exists(d): 

4963 logger.warning('store_superdir not available: %s' % d) 

4964 continue 

4965 

4966 for entry in os.listdir(d): 

4967 store_dir = os.path.realpath(pjoin(d, entry)) 

4968 if self._looks_like_store_dir(store_dir): 

4969 store_dirs.add(store_dir) 

4970 

4971 for store_dir in self.store_dirs: 

4972 store_dirs.add(os.path.realpath(store_dir)) 

4973 

4974 return store_dirs 

4975 

4976 def _scan_stores(self): 

4977 for store_dir in self.iter_store_dirs(): 

4978 store_id = self._get_store_id(store_dir) 

4979 if store_id not in self._id_to_store_dir: 

4980 self._id_to_store_dir[store_id] = store_dir 

4981 else: 

4982 if store_dir != self._id_to_store_dir[store_id]: 

4983 raise DuplicateStoreId( 

4984 'GF store ID %s is used in (at least) two ' 

4985 'different stores. Locations are: %s and %s' % 

4986 (store_id, self._id_to_store_dir[store_id], store_dir)) 

4987 

4988 def get_store_dir(self, store_id): 

4989 ''' 

4990 Lookup directory given a GF store ID. 

4991 ''' 

4992 

4993 if store_id not in self._id_to_store_dir: 

4994 self._scan_stores() 

4995 

4996 if store_id not in self._id_to_store_dir: 

4997 raise NoSuchStore(store_id, self.iter_store_dirs()) 

4998 

4999 return self._id_to_store_dir[store_id] 

5000 

5001 def get_store_ids(self): 

5002 ''' 

5003 Get list of available store IDs. 

5004 ''' 

5005 

5006 self._scan_stores() 

5007 return sorted(self._id_to_store_dir.keys()) 

5008 

5009 def effective_default_store_id(self): 

5010 if self._effective_default_store_id is None: 

5011 if self.default_store_id is None: 

5012 store_ids = self.get_store_ids() 

5013 if len(store_ids) == 1: 

5014 self._effective_default_store_id = self.get_store_ids()[0] 

5015 else: 

5016 raise NoDefaultStoreSet() 

5017 else: 

5018 self._effective_default_store_id = self.default_store_id 

5019 

5020 return self._effective_default_store_id 

5021 

5022 def get_store(self, store_id=None): 

5023 ''' 

5024 Get a store from the engine. 

5025 

5026 :param store_id: identifier of the store (optional) 

5027 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5028 

5029 If no ``store_id`` is provided the store 

5030 associated with the :py:gattr:`default_store_id` is returned. 

5031 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5032 undefined. 

5033 ''' 

5034 

5035 if store_id is None: 

5036 store_id = self.effective_default_store_id() 

5037 

5038 if store_id not in self._open_stores: 

5039 store_dir = self.get_store_dir(store_id) 

5040 self._open_stores[store_id] = store.Store(store_dir) 

5041 

5042 return self._open_stores[store_id] 

5043 

5044 def get_store_config(self, store_id): 

5045 store = self.get_store(store_id) 

5046 return store.config 

5047 

5048 def get_store_extra(self, store_id, key): 

5049 store = self.get_store(store_id) 

5050 return store.get_extra(key) 

5051 

5052 def close_cashed_stores(self): 

5053 ''' 

5054 Close and remove ids from cashed stores. 

5055 ''' 

5056 store_ids = [] 

5057 for store_id, store_ in self._open_stores.items(): 

5058 store_.close() 

5059 store_ids.append(store_id) 

5060 

5061 for store_id in store_ids: 

5062 self._open_stores.pop(store_id) 

5063 

5064 def get_rule(self, source, target): 

5065 cprovided = self.get_store(target.store_id).get_provided_components() 

5066 

5067 if isinstance(target, StaticTarget): 

5068 quantity = target.quantity 

5069 available_rules = static_rules 

5070 elif isinstance(target, Target): 

5071 quantity = target.effective_quantity() 

5072 available_rules = channel_rules 

5073 

5074 try: 

5075 for rule in available_rules[quantity]: 

5076 cneeded = rule.required_components(target) 

5077 if all(c in cprovided for c in cneeded): 

5078 return rule 

5079 

5080 except KeyError: 

5081 pass 

5082 

5083 raise BadRequest( 

5084 'No rule to calculate "%s" with GFs from store "%s" ' 

5085 'for source model "%s".' % ( 

5086 target.effective_quantity(), 

5087 target.store_id, 

5088 source.__class__.__name__)) 

5089 

5090 def _cached_discretize_basesource(self, source, store, cache, target): 

5091 if (source, store) not in cache: 

5092 cache[source, store] = source.discretize_basesource(store, target) 

5093 

5094 return cache[source, store] 

5095 

5096 def base_seismograms(self, source, targets, components, dsource_cache, 

5097 nthreads=0): 

5098 

5099 target = targets[0] 

5100 

5101 interp = set([t.interpolation for t in targets]) 

5102 if len(interp) > 1: 

5103 raise BadRequest('Targets have different interpolation schemes.') 

5104 

5105 rates = set([t.sample_rate for t in targets]) 

5106 if len(rates) > 1: 

5107 raise BadRequest('Targets have different sample rates.') 

5108 

5109 store_ = self.get_store(target.store_id) 

5110 receivers = [t.receiver(store_) for t in targets] 

5111 

5112 if target.sample_rate is not None: 

5113 deltat = 1. / target.sample_rate 

5114 rate = target.sample_rate 

5115 else: 

5116 deltat = None 

5117 rate = store_.config.sample_rate 

5118 

5119 tmin = num.fromiter( 

5120 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5121 tmax = num.fromiter( 

5122 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5123 

5124 itmin = num.floor(tmin * rate).astype(num.int64) 

5125 itmax = num.ceil(tmax * rate).astype(num.int64) 

5126 nsamples = itmax - itmin + 1 

5127 

5128 mask = num.isnan(tmin) 

5129 itmin[mask] = 0 

5130 nsamples[mask] = -1 

5131 

5132 base_source = self._cached_discretize_basesource( 

5133 source, store_, dsource_cache, target) 

5134 

5135 base_seismograms = store_.calc_seismograms( 

5136 base_source, receivers, components, 

5137 deltat=deltat, 

5138 itmin=itmin, nsamples=nsamples, 

5139 interpolation=target.interpolation, 

5140 optimization=target.optimization, 

5141 nthreads=nthreads) 

5142 

5143 for i, base_seismogram in enumerate(base_seismograms): 

5144 base_seismograms[i] = store.make_same_span(base_seismogram) 

5145 

5146 return base_seismograms 

5147 

5148 def base_seismogram(self, source, target, components, dsource_cache, 

5149 nthreads): 

5150 

5151 tcounters = [xtime()] 

5152 

5153 store_ = self.get_store(target.store_id) 

5154 receiver = target.receiver(store_) 

5155 

5156 if target.tmin and target.tmax is not None: 

5157 rate = store_.config.sample_rate 

5158 itmin = int(num.floor(target.tmin * rate)) 

5159 itmax = int(num.ceil(target.tmax * rate)) 

5160 nsamples = itmax - itmin + 1 

5161 else: 

5162 itmin = None 

5163 nsamples = None 

5164 

5165 tcounters.append(xtime()) 

5166 base_source = self._cached_discretize_basesource( 

5167 source, store_, dsource_cache, target) 

5168 

5169 tcounters.append(xtime()) 

5170 

5171 if target.sample_rate is not None: 

5172 deltat = 1. / target.sample_rate 

5173 else: 

5174 deltat = None 

5175 

5176 base_seismogram = store_.seismogram( 

5177 base_source, receiver, components, 

5178 deltat=deltat, 

5179 itmin=itmin, nsamples=nsamples, 

5180 interpolation=target.interpolation, 

5181 optimization=target.optimization, 

5182 nthreads=nthreads) 

5183 

5184 tcounters.append(xtime()) 

5185 

5186 base_seismogram = store.make_same_span(base_seismogram) 

5187 

5188 tcounters.append(xtime()) 

5189 

5190 return base_seismogram, tcounters 

5191 

5192 def base_statics(self, source, target, components, nthreads): 

5193 tcounters = [xtime()] 

5194 store_ = self.get_store(target.store_id) 

5195 

5196 if target.tsnapshot is not None: 

5197 rate = store_.config.sample_rate 

5198 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5199 else: 

5200 itsnapshot = None 

5201 tcounters.append(xtime()) 

5202 

5203 base_source = source.discretize_basesource(store_, target=target) 

5204 

5205 tcounters.append(xtime()) 

5206 

5207 base_statics = store_.statics( 

5208 base_source, 

5209 target, 

5210 itsnapshot, 

5211 components, 

5212 target.interpolation, 

5213 nthreads) 

5214 

5215 tcounters.append(xtime()) 

5216 

5217 return base_statics, tcounters 

5218 

5219 def _post_process_dynamic(self, base_seismogram, source, target): 

5220 base_any = next(iter(base_seismogram.values())) 

5221 deltat = base_any.deltat 

5222 itmin = base_any.itmin 

5223 

5224 rule = self.get_rule(source, target) 

5225 data = rule.apply_(target, base_seismogram) 

5226 

5227 factor = source.get_factor() * target.get_factor() 

5228 if factor != 1.0: 

5229 data = data * factor 

5230 

5231 stf = source.effective_stf_post() 

5232 

5233 times, amplitudes = stf.discretize_t( 

5234 deltat, 0.0) 

5235 

5236 # repeat end point to prevent boundary effects 

5237 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5238 padded_data[:data.size] = data 

5239 padded_data[data.size:] = data[-1] 

5240 data = num.convolve(amplitudes, padded_data) 

5241 

5242 tmin = itmin * deltat + times[0] 

5243 

5244 tr = meta.SeismosizerTrace( 

5245 codes=target.codes, 

5246 data=data[:-amplitudes.size], 

5247 deltat=deltat, 

5248 tmin=tmin) 

5249 

5250 return target.post_process(self, source, tr) 

5251 

5252 def _post_process_statics(self, base_statics, source, starget): 

5253 rule = self.get_rule(source, starget) 

5254 data = rule.apply_(starget, base_statics) 

5255 

5256 factor = source.get_factor() 

5257 if factor != 1.0: 

5258 for v in data.values(): 

5259 v *= factor 

5260 

5261 return starget.post_process(self, source, base_statics) 

5262 

5263 def process(self, *args, **kwargs): 

5264 ''' 

5265 Process a request. 

5266 

5267 :: 

5268 

5269 process(**kwargs) 

5270 process(request, **kwargs) 

5271 process(sources, targets, **kwargs) 

5272 

5273 The request can be given a a :py:class:`Request` object, or such an 

5274 object is created using ``Request(**kwargs)`` for convenience. 

5275 

5276 :returns: :py:class:`Response` object 

5277 ''' 

5278 

5279 if len(args) not in (0, 1, 2): 

5280 raise BadRequest('Invalid arguments.') 

5281 

5282 if len(args) == 1: 

5283 kwargs['request'] = args[0] 

5284 

5285 elif len(args) == 2: 

5286 kwargs.update(Request.args2kwargs(args)) 

5287 

5288 request = kwargs.pop('request', None) 

5289 status_callback = kwargs.pop('status_callback', None) 

5290 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5291 

5292 nprocs = kwargs.pop('nprocs', None) 

5293 nthreads = kwargs.pop('nthreads', 1) 

5294 if nprocs is not None: 

5295 nthreads = nprocs 

5296 

5297 if request is None: 

5298 request = Request(**kwargs) 

5299 

5300 if resource: 

5301 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5302 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5303 tt0 = xtime() 

5304 

5305 # make sure stores are open before fork() 

5306 store_ids = set(target.store_id for target in request.targets) 

5307 for store_id in store_ids: 

5308 self.get_store(store_id) 

5309 

5310 source_index = dict((x, i) for (i, x) in 

5311 enumerate(request.sources)) 

5312 target_index = dict((x, i) for (i, x) in 

5313 enumerate(request.targets)) 

5314 

5315 m = request.subrequest_map() 

5316 

5317 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5318 results_list = [] 

5319 

5320 for i in range(len(request.sources)): 

5321 results_list.append([None] * len(request.targets)) 

5322 

5323 tcounters_dyn_list = [] 

5324 tcounters_static_list = [] 

5325 nsub = len(skeys) 

5326 isub = 0 

5327 

5328 # Processing dynamic targets through 

5329 # parimap(process_subrequest_dynamic) 

5330 

5331 if calc_timeseries: 

5332 _process_dynamic = process_dynamic_timeseries 

5333 else: 

5334 _process_dynamic = process_dynamic 

5335 

5336 if request.has_dynamic: 

5337 work_dynamic = [ 

5338 (i, nsub, 

5339 [source_index[source] for source in m[k][0]], 

5340 [target_index[target] for target in m[k][1] 

5341 if not isinstance(target, StaticTarget)]) 

5342 for (i, k) in enumerate(skeys)] 

5343 

5344 for ii_results, tcounters_dyn in _process_dynamic( 

5345 work_dynamic, request.sources, request.targets, self, 

5346 nthreads): 

5347 

5348 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5349 isource, itarget, result = ii_results 

5350 results_list[isource][itarget] = result 

5351 

5352 if status_callback: 

5353 status_callback(isub, nsub) 

5354 

5355 isub += 1 

5356 

5357 # Processing static targets through process_static 

5358 if request.has_statics: 

5359 work_static = [ 

5360 (i, nsub, 

5361 [source_index[source] for source in m[k][0]], 

5362 [target_index[target] for target in m[k][1] 

5363 if isinstance(target, StaticTarget)]) 

5364 for (i, k) in enumerate(skeys)] 

5365 

5366 for ii_results, tcounters_static in process_static( 

5367 work_static, request.sources, request.targets, self, 

5368 nthreads=nthreads): 

5369 

5370 tcounters_static_list.append(num.diff(tcounters_static)) 

5371 isource, itarget, result = ii_results 

5372 results_list[isource][itarget] = result 

5373 

5374 if status_callback: 

5375 status_callback(isub, nsub) 

5376 

5377 isub += 1 

5378 

5379 if status_callback: 

5380 status_callback(nsub, nsub) 

5381 

5382 tt1 = time.time() 

5383 if resource: 

5384 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5385 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5386 

5387 s = ProcessingStats() 

5388 

5389 if request.has_dynamic: 

5390 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5391 t_dyn = float(num.sum(tcumu_dyn)) 

5392 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5393 (s.t_perc_get_store_and_receiver, 

5394 s.t_perc_discretize_source, 

5395 s.t_perc_make_base_seismogram, 

5396 s.t_perc_make_same_span, 

5397 s.t_perc_post_process) = perc_dyn 

5398 else: 

5399 t_dyn = 0. 

5400 

5401 if request.has_statics: 

5402 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5403 t_static = num.sum(tcumu_static) 

5404 perc_static = map(float, tcumu_static / t_static * 100.) 

5405 (s.t_perc_static_get_store, 

5406 s.t_perc_static_discretize_basesource, 

5407 s.t_perc_static_sum_statics, 

5408 s.t_perc_static_post_process) = perc_static 

5409 

5410 s.t_wallclock = tt1 - tt0 

5411 if resource: 

5412 s.t_cpu = ( 

5413 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5414 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5415 s.n_read_blocks = ( 

5416 (rs1.ru_inblock + rc1.ru_inblock) - 

5417 (rs0.ru_inblock + rc0.ru_inblock)) 

5418 

5419 n_records_stacked = 0. 

5420 for results in results_list: 

5421 for result in results: 

5422 if not isinstance(result, meta.Result): 

5423 continue 

5424 shr = float(result.n_shared_stacking) 

5425 n_records_stacked += result.n_records_stacked / shr 

5426 s.t_perc_optimize += result.t_optimize / shr 

5427 s.t_perc_stack += result.t_stack / shr 

5428 s.n_records_stacked = int(n_records_stacked) 

5429 if t_dyn != 0.: 

5430 s.t_perc_optimize /= t_dyn * 100 

5431 s.t_perc_stack /= t_dyn * 100 

5432 

5433 return Response( 

5434 request=request, 

5435 results_list=results_list, 

5436 stats=s) 

5437 

5438 

5439class RemoteEngine(Engine): 

5440 ''' 

5441 Client for remote synthetic seismogram calculator. 

5442 ''' 

5443 

5444 site = String.T(default=ws.g_default_site, optional=True) 

5445 url = String.T(default=ws.g_url, optional=True) 

5446 

5447 def process(self, request=None, status_callback=None, **kwargs): 

5448 

5449 if request is None: 

5450 request = Request(**kwargs) 

5451 

5452 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5453 

5454 

5455g_engine = None 

5456 

5457 

5458def get_engine(store_superdirs=[]): 

5459 global g_engine 

5460 if g_engine is None: 

5461 g_engine = LocalEngine(use_env=True, use_config=True) 

5462 

5463 for d in store_superdirs: 

5464 if d not in g_engine.store_superdirs: 

5465 g_engine.store_superdirs.append(d) 

5466 

5467 return g_engine 

5468 

5469 

5470class SourceGroup(Object): 

5471 

5472 def __getattr__(self, k): 

5473 return num.fromiter((getattr(s, k) for s in self), 

5474 dtype=float) 

5475 

5476 def __iter__(self): 

5477 raise NotImplementedError( 

5478 'This method should be implemented in subclass.') 

5479 

5480 def __len__(self): 

5481 raise NotImplementedError( 

5482 'This method should be implemented in subclass.') 

5483 

5484 

5485class SourceList(SourceGroup): 

5486 sources = List.T(Source.T()) 

5487 

5488 def append(self, s): 

5489 self.sources.append(s) 

5490 

5491 def __iter__(self): 

5492 return iter(self.sources) 

5493 

5494 def __len__(self): 

5495 return len(self.sources) 

5496 

5497 

5498class SourceGrid(SourceGroup): 

5499 

5500 base = Source.T() 

5501 variables = Dict.T(String.T(), Range.T()) 

5502 order = List.T(String.T()) 

5503 

5504 def __len__(self): 

5505 n = 1 

5506 for (k, v) in self.make_coords(self.base): 

5507 n *= len(list(v)) 

5508 

5509 return n 

5510 

5511 def __iter__(self): 

5512 for items in permudef(self.make_coords(self.base)): 

5513 s = self.base.clone(**{k: v for (k, v) in items}) 

5514 s.regularize() 

5515 yield s 

5516 

5517 def ordered_params(self): 

5518 ks = list(self.variables.keys()) 

5519 for k in self.order + list(self.base.keys()): 

5520 if k in ks: 

5521 yield k 

5522 ks.remove(k) 

5523 if ks: 

5524 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5525 (ks[0], self.base.__class__.__name__)) 

5526 

5527 def make_coords(self, base): 

5528 return [(param, self.variables[param].make(base=base[param])) 

5529 for param in self.ordered_params()] 

5530 

5531 

5532source_classes = [ 

5533 Source, 

5534 SourceWithMagnitude, 

5535 SourceWithDerivedMagnitude, 

5536 ExplosionSource, 

5537 RectangularExplosionSource, 

5538 DCSource, 

5539 CLVDSource, 

5540 VLVDSource, 

5541 MTSource, 

5542 RectangularSource, 

5543 PseudoDynamicRupture, 

5544 DoubleDCSource, 

5545 RingfaultSource, 

5546 CombiSource, 

5547 SFSource, 

5548 PorePressurePointSource, 

5549 PorePressureLineSource, 

5550] 

5551 

5552stf_classes = [ 

5553 STF, 

5554 BoxcarSTF, 

5555 TriangularSTF, 

5556 HalfSinusoidSTF, 

5557 ResonatorSTF, 

5558] 

5559 

5560__all__ = ''' 

5561SeismosizerError 

5562BadRequest 

5563NoSuchStore 

5564DerivedMagnitudeError 

5565STFMode 

5566'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5567Request 

5568ProcessingStats 

5569Response 

5570Engine 

5571LocalEngine 

5572RemoteEngine 

5573source_classes 

5574get_engine 

5575Range 

5576SourceGroup 

5577SourceList 

5578SourceGrid 

5579map_anchor 

5580'''.split()