1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28 

29from __future__ import absolute_import, division, print_function 

30 

31from collections import defaultdict 

32from functools import cmp_to_key 

33import time 

34import math 

35import os 

36import re 

37import logging 

38try: 

39 import resource 

40except ImportError: 

41 resource = None 

42from hashlib import sha1 

43 

44import numpy as num 

45from scipy.interpolate import RegularGridInterpolator 

46 

47from pyrocko.guts import (Object, Float, String, StringChoice, List, 

48 Timestamp, Int, SObject, ArgumentError, Dict, 

49 ValidationError, Bool) 

50from pyrocko.guts_array import Array 

51 

52from pyrocko import moment_tensor as pmt 

53from pyrocko import trace, util, config, model, eikonal_ext 

54from pyrocko.orthodrome import ne_to_latlon 

55from pyrocko.model import Location 

56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

57 okada_ext, invert_fault_dislocations_bem 

58 

59from . import meta, store, ws 

60from .tractions import TractionField, DirectedTractions 

61from .targets import Target, StaticTarget, SatelliteTarget 

62 

63pjoin = os.path.join 

64 

65guts_prefix = 'pf' 

66 

67d2r = math.pi / 180. 

68r2d = 180. / math.pi 

69km = 1e3 

70 

71logger = logging.getLogger('pyrocko.gf.seismosizer') 

72 

73 

74def cmp_none_aware(a, b): 

75 if isinstance(a, tuple) and isinstance(b, tuple): 

76 for xa, xb in zip(a, b): 

77 rv = cmp_none_aware(xa, xb) 

78 if rv != 0: 

79 return rv 

80 

81 return 0 

82 

83 anone = a is None 

84 bnone = b is None 

85 

86 if anone and bnone: 

87 return 0 

88 

89 if anone: 

90 return -1 

91 

92 if bnone: 

93 return 1 

94 

95 return bool(a > b) - bool(a < b) 

96 

97 

98def xtime(): 

99 return time.time() 

100 

101 

102class SeismosizerError(Exception): 

103 pass 

104 

105 

106class BadRequest(SeismosizerError): 

107 pass 

108 

109 

110class DuplicateStoreId(Exception): 

111 pass 

112 

113 

114class NoDefaultStoreSet(Exception): 

115 pass 

116 

117 

118class ConversionError(Exception): 

119 pass 

120 

121 

122class NoSuchStore(BadRequest): 

123 

124 def __init__(self, store_id=None, dirs=None): 

125 BadRequest.__init__(self) 

126 self.store_id = store_id 

127 self.dirs = dirs 

128 

129 def __str__(self): 

130 if self.store_id is not None: 

131 rstr = 'no GF store with id "%s" found.' % self.store_id 

132 else: 

133 rstr = 'GF store not found.' 

134 

135 if self.dirs is not None: 

136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

137 return rstr 

138 

139 

140def ufloat(s): 

141 units = { 

142 'k': 1e3, 

143 'M': 1e6, 

144 } 

145 

146 factor = 1.0 

147 if s and s[-1] in units: 

148 factor = units[s[-1]] 

149 s = s[:-1] 

150 if not s: 

151 raise ValueError('unit without a number: \'%s\'' % s) 

152 

153 return float(s) * factor 

154 

155 

156def ufloat_or_none(s): 

157 if s: 

158 return ufloat(s) 

159 else: 

160 return None 

161 

162 

163def int_or_none(s): 

164 if s: 

165 return int(s) 

166 else: 

167 return None 

168 

169 

170def nonzero(x, eps=1e-15): 

171 return abs(x) > eps 

172 

173 

174def permudef(ln, j=0): 

175 if j < len(ln): 

176 k, v = ln[j] 

177 for y in v: 

178 ln[j] = k, y 

179 for s in permudef(ln, j + 1): 

180 yield s 

181 

182 ln[j] = k, v 

183 return 

184 else: 

185 yield ln 

186 

187 

188def arr(x): 

189 return num.atleast_1d(num.asarray(x)) 

190 

191 

192def discretize_rect_source(deltas, deltat, time, north, east, depth, 

193 strike, dip, length, width, 

194 anchor, velocity=None, stf=None, 

195 nucleation_x=None, nucleation_y=None, 

196 decimation_factor=1, pointsonly=False, 

197 plane_coords=False, 

198 aggressive_oversampling=False): 

199 

200 if stf is None: 

201 stf = STF() 

202 

203 if not velocity and not pointsonly: 

204 raise AttributeError('velocity is required in time mode') 

205 

206 mindeltagf = float(num.min(deltas)) 

207 if velocity: 

208 mindeltagf = min(mindeltagf, deltat * velocity) 

209 

210 ln = length 

211 wd = width 

212 

213 if aggressive_oversampling: 

214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 else: 

217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

219 

220 n = int(nl * nw) 

221 

222 dl = ln / nl 

223 dw = wd / nw 

224 

225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

227 

228 points = num.zeros((n, 3)) 

229 points[:, 0] = num.tile(xl, nw) 

230 points[:, 1] = num.repeat(xw, nl) 

231 

232 if nucleation_x is not None: 

233 dist_x = num.abs(nucleation_x - points[:, 0]) 

234 else: 

235 dist_x = num.zeros(n) 

236 

237 if nucleation_y is not None: 

238 dist_y = num.abs(nucleation_y - points[:, 1]) 

239 else: 

240 dist_y = num.zeros(n) 

241 

242 dist = num.sqrt(dist_x**2 + dist_y**2) 

243 times = dist / velocity 

244 

245 anch_x, anch_y = map_anchor[anchor] 

246 

247 points[:, 0] -= anch_x * 0.5 * length 

248 points[:, 1] -= anch_y * 0.5 * width 

249 

250 if plane_coords: 

251 return points, dl, dw, nl, nw 

252 

253 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

254 points = num.dot(rotmat.T, points.T).T 

255 

256 points[:, 0] += north 

257 points[:, 1] += east 

258 points[:, 2] += depth 

259 

260 if pointsonly: 

261 return points, dl, dw, nl, nw 

262 

263 xtau, amplitudes = stf.discretize_t(deltat, time) 

264 nt = xtau.size 

265 

266 points2 = num.repeat(points, nt, axis=0) 

267 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

268 amplitudes2 = num.tile(amplitudes, n) 

269 

270 return points2, times2, amplitudes2, dl, dw, nl, nw 

271 

272 

273def check_rect_source_discretisation(points2, nl, nw, store): 

274 # We assume a non-rotated fault plane 

275 N_CRITICAL = 8 

276 points = points2.T.reshape((3, nl, nw)) 

277 if points.size <= N_CRITICAL: 

278 logger.warning('RectangularSource is defined by only %d sub-sources!' 

279 % points.size) 

280 return True 

281 

282 distances = num.sqrt( 

283 (points[0, 0, :] - points[0, 1, :])**2 + 

284 (points[1, 0, :] - points[1, 1, :])**2 + 

285 (points[2, 0, :] - points[2, 1, :])**2) 

286 

287 depths = points[2, 0, :] 

288 vs_profile = store.config.get_vs( 

289 lat=0., lon=0., 

290 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

291 interpolation='multilinear') 

292 

293 min_wavelength = vs_profile * (store.config.deltat * 2) 

294 if not num.all(min_wavelength > distances / 2): 

295 return False 

296 return True 

297 

298 

299def outline_rect_source(strike, dip, length, width, anchor): 

300 ln = length 

301 wd = width 

302 points = num.array( 

303 [[-0.5 * ln, -0.5 * wd, 0.], 

304 [0.5 * ln, -0.5 * wd, 0.], 

305 [0.5 * ln, 0.5 * wd, 0.], 

306 [-0.5 * ln, 0.5 * wd, 0.], 

307 [-0.5 * ln, -0.5 * wd, 0.]]) 

308 

309 anch_x, anch_y = map_anchor[anchor] 

310 points[:, 0] -= anch_x * 0.5 * length 

311 points[:, 1] -= anch_y * 0.5 * width 

312 

313 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

314 

315 return num.dot(rotmat.T, points.T).T 

316 

317 

318def from_plane_coords( 

319 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

320 lat=0., lon=0., 

321 north_shift=0, east_shift=0, 

322 anchor='top', cs='xy'): 

323 

324 ln = length 

325 wd = width 

326 x_abs = [] 

327 y_abs = [] 

328 if not isinstance(x_plane_coords, list): 

329 x_plane_coords = [x_plane_coords] 

330 y_plane_coords = [y_plane_coords] 

331 

332 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

333 points = num.array( 

334 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

335 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

336 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

337 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

338 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

339 

340 anch_x, anch_y = map_anchor[anchor] 

341 points[:, 0] -= anch_x * 0.5 * length 

342 points[:, 1] -= anch_y * 0.5 * width 

343 

344 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

345 

346 points = num.dot(rotmat.T, points.T).T 

347 points[:, 0] += north_shift 

348 points[:, 1] += east_shift 

349 points[:, 2] += depth 

350 if cs in ('latlon', 'lonlat'): 

351 latlon = ne_to_latlon(lat, lon, 

352 points[:, 0], points[:, 1]) 

353 latlon = num.array(latlon).T 

354 x_abs.append(latlon[1:2, 1]) 

355 y_abs.append(latlon[2:3, 0]) 

356 if cs == 'xy': 

357 x_abs.append(points[1:2, 1]) 

358 y_abs.append(points[2:3, 0]) 

359 

360 if cs == 'lonlat': 

361 return y_abs, x_abs 

362 else: 

363 return x_abs, y_abs 

364 

365 

366def points_on_rect_source( 

367 strike, dip, length, width, anchor, 

368 discretized_basesource=None, points_x=None, points_y=None): 

369 

370 ln = length 

371 wd = width 

372 

373 if isinstance(points_x, list) or isinstance(points_x, float): 

374 points_x = num.array([points_x]) 

375 if isinstance(points_y, list) or isinstance(points_y, float): 

376 points_y = num.array([points_y]) 

377 

378 if discretized_basesource: 

379 ds = discretized_basesource 

380 

381 nl_patches = ds.nl + 1 

382 nw_patches = ds.nw + 1 

383 

384 npoints = nl_patches * nw_patches 

385 points = num.zeros((npoints, 3)) 

386 ln_patches = num.array([il for il in range(nl_patches)]) 

387 wd_patches = num.array([iw for iw in range(nw_patches)]) 

388 

389 points_ln =\ 

390 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

391 points_wd =\ 

392 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

393 

394 for il in range(nl_patches): 

395 for iw in range(nw_patches): 

396 points[il * nw_patches + iw, :] = num.array([ 

397 points_ln[il] * ln * 0.5, 

398 points_wd[iw] * wd * 0.5, 0.0]) 

399 

400 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

401 points = num.zeros(shape=((len(points_x), 3))) 

402 for i, (x, y) in enumerate(zip(points_x, points_y)): 

403 points[i, :] = num.array( 

404 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

405 

406 anch_x, anch_y = map_anchor[anchor] 

407 

408 points[:, 0] -= anch_x * 0.5 * ln 

409 points[:, 1] -= anch_y * 0.5 * wd 

410 

411 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

412 

413 return num.dot(rotmat.T, points.T).T 

414 

415 

416class InvalidGridDef(Exception): 

417 pass 

418 

419 

420class Range(SObject): 

421 ''' 

422 Convenient range specification. 

423 

424 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

425 

426 Range('0 .. 10k : 1k') 

427 Range(start=0., stop=10e3, step=1e3) 

428 Range(0, 10e3, 1e3) 

429 Range('0 .. 10k @ 11') 

430 Range(start=0., stop=10*km, n=11) 

431 

432 Range(0, 10e3, n=11) 

433 Range(values=[x*1e3 for x in range(11)]) 

434 

435 Depending on the use context, it can be possible to omit any part of the 

436 specification. E.g. in the context of extracting a subset of an already 

437 existing range, the existing range's specification values would be filled 

438 in where missing. 

439 

440 The values are distributed with equal spacing, unless the ``spacing`` 

441 argument is modified. The values can be created offset or relative to an 

442 external base value with the ``relative`` argument if the use context 

443 supports this. 

444 

445 The range specification can be expressed with a short string 

446 representation:: 

447 

448 'start .. stop @ num | spacing, relative' 

449 'start .. stop : step | spacing, relative' 

450 

451 most parts of the expression can be omitted if not needed. Whitespace is 

452 allowed for readability but can also be omitted. 

453 ''' 

454 

455 start = Float.T(optional=True) 

456 stop = Float.T(optional=True) 

457 step = Float.T(optional=True) 

458 n = Int.T(optional=True) 

459 values = Array.T(optional=True, dtype=float, shape=(None,)) 

460 

461 spacing = StringChoice.T( 

462 choices=['lin', 'log', 'symlog'], 

463 default='lin', 

464 optional=True) 

465 

466 relative = StringChoice.T( 

467 choices=['', 'add', 'mult'], 

468 default='', 

469 optional=True) 

470 

471 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

472 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

473 r'(\|(?P<stuff>.+))?$') 

474 

475 def __init__(self, *args, **kwargs): 

476 d = {} 

477 if len(args) == 1: 

478 d = self.parse(args[0]) 

479 elif len(args) in (2, 3): 

480 d['start'], d['stop'] = [float(x) for x in args[:2]] 

481 if len(args) == 3: 

482 d['step'] = float(args[2]) 

483 

484 for k, v in kwargs.items(): 

485 if k in d: 

486 raise ArgumentError('%s specified more than once' % k) 

487 

488 d[k] = v 

489 

490 SObject.__init__(self, **d) 

491 

492 def __str__(self): 

493 def sfloat(x): 

494 if x is not None: 

495 return '%g' % x 

496 else: 

497 return '' 

498 

499 if self.values: 

500 return ','.join('%g' % x for x in self.values) 

501 

502 if self.start is None and self.stop is None: 

503 s0 = '' 

504 else: 

505 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

506 

507 s1 = '' 

508 if self.step is not None: 

509 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

510 elif self.n is not None: 

511 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

512 

513 if self.spacing == 'lin' and self.relative == '': 

514 s2 = '' 

515 else: 

516 x = [] 

517 if self.spacing != 'lin': 

518 x.append(self.spacing) 

519 

520 if self.relative != '': 

521 x.append(self.relative) 

522 

523 s2 = ' | %s' % ','.join(x) 

524 

525 return s0 + s1 + s2 

526 

527 @classmethod 

528 def parse(cls, s): 

529 s = re.sub(r'\s+', '', s) 

530 m = cls.pattern.match(s) 

531 if not m: 

532 try: 

533 vals = [ufloat(x) for x in s.split(',')] 

534 except Exception: 

535 raise InvalidGridDef( 

536 '"%s" is not a valid range specification' % s) 

537 

538 return dict(values=num.array(vals, dtype=float)) 

539 

540 d = m.groupdict() 

541 try: 

542 start = ufloat_or_none(d['start']) 

543 stop = ufloat_or_none(d['stop']) 

544 step = ufloat_or_none(d['step']) 

545 n = int_or_none(d['n']) 

546 except Exception: 

547 raise InvalidGridDef( 

548 '"%s" is not a valid range specification' % s) 

549 

550 spacing = 'lin' 

551 relative = '' 

552 

553 if d['stuff'] is not None: 

554 t = d['stuff'].split(',') 

555 for x in t: 

556 if x in cls.spacing.choices: 

557 spacing = x 

558 elif x and x in cls.relative.choices: 

559 relative = x 

560 else: 

561 raise InvalidGridDef( 

562 '"%s" is not a valid range specification' % s) 

563 

564 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

565 relative=relative) 

566 

567 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

568 if self.values: 

569 return self.values 

570 

571 start = self.start 

572 stop = self.stop 

573 step = self.step 

574 n = self.n 

575 

576 swap = step is not None and step < 0. 

577 if start is None: 

578 start = [mi, ma][swap] 

579 if stop is None: 

580 stop = [ma, mi][swap] 

581 if step is None and inc is not None: 

582 step = [inc, -inc][ma < mi] 

583 

584 if start is None or stop is None: 

585 raise InvalidGridDef( 

586 'Cannot use range specification "%s" without start ' 

587 'and stop in this context' % self) 

588 

589 if step is None and n is None: 

590 step = stop - start 

591 

592 if n is None: 

593 if (step < 0) != (stop - start < 0): 

594 raise InvalidGridDef( 

595 'Range specification "%s" has inconsistent ordering ' 

596 '(step < 0 => stop > start)' % self) 

597 

598 n = int(round((stop - start) / step)) + 1 

599 stop2 = start + (n - 1) * step 

600 if abs(stop - stop2) > eps: 

601 n = int(math.floor((stop - start) / step)) + 1 

602 stop = start + (n - 1) * step 

603 else: 

604 stop = stop2 

605 

606 if start == stop: 

607 n = 1 

608 

609 if self.spacing == 'lin': 

610 vals = num.linspace(start, stop, n) 

611 

612 elif self.spacing in ('log', 'symlog'): 

613 if start > 0. and stop > 0.: 

614 vals = num.exp(num.linspace(num.log(start), 

615 num.log(stop), n)) 

616 elif start < 0. and stop < 0.: 

617 vals = -num.exp(num.linspace(num.log(-start), 

618 num.log(-stop), n)) 

619 else: 

620 raise InvalidGridDef( 

621 'Log ranges should not include or cross zero ' 

622 '(in range specification "%s").' % self) 

623 

624 if self.spacing == 'symlog': 

625 nvals = - vals 

626 vals = num.concatenate((nvals[::-1], vals)) 

627 

628 if self.relative in ('add', 'mult') and base is None: 

629 raise InvalidGridDef( 

630 'Cannot use relative range specification in this context.') 

631 

632 vals = self.make_relative(base, vals) 

633 

634 return list(map(float, vals)) 

635 

636 def make_relative(self, base, vals): 

637 if self.relative == 'add': 

638 vals += base 

639 

640 if self.relative == 'mult': 

641 vals *= base 

642 

643 return vals 

644 

645 

646class GridDefElement(Object): 

647 

648 param = meta.StringID.T() 

649 rs = Range.T() 

650 

651 def __init__(self, shorthand=None, **kwargs): 

652 if shorthand is not None: 

653 t = shorthand.split('=') 

654 if len(t) != 2: 

655 raise InvalidGridDef( 

656 'Invalid grid specification element: %s' % shorthand) 

657 

658 sp, sr = t[0].strip(), t[1].strip() 

659 

660 kwargs['param'] = sp 

661 kwargs['rs'] = Range(sr) 

662 

663 Object.__init__(self, **kwargs) 

664 

665 def shorthand(self): 

666 return self.param + ' = ' + str(self.rs) 

667 

668 

669class GridDef(Object): 

670 

671 elements = List.T(GridDefElement.T()) 

672 

673 def __init__(self, shorthand=None, **kwargs): 

674 if shorthand is not None: 

675 t = shorthand.splitlines() 

676 tt = [] 

677 for x in t: 

678 x = x.strip() 

679 if x: 

680 tt.extend(x.split(';')) 

681 

682 elements = [] 

683 for se in tt: 

684 elements.append(GridDef(se)) 

685 

686 kwargs['elements'] = elements 

687 

688 Object.__init__(self, **kwargs) 

689 

690 def shorthand(self): 

691 return '; '.join(str(x) for x in self.elements) 

692 

693 

694class Cloneable(object): 

695 

696 def __iter__(self): 

697 return iter(self.T.propnames) 

698 

699 def __getitem__(self, k): 

700 if k not in self.keys(): 

701 raise KeyError(k) 

702 

703 return getattr(self, k) 

704 

705 def __setitem__(self, k, v): 

706 if k not in self.keys(): 

707 raise KeyError(k) 

708 

709 return setattr(self, k, v) 

710 

711 def clone(self, **kwargs): 

712 ''' 

713 Make a copy of the object. 

714 

715 A new object of the same class is created and initialized with the 

716 parameters of the object on which this method is called on. If 

717 ``kwargs`` are given, these are used to override any of the 

718 initialization parameters. 

719 ''' 

720 

721 d = dict(self) 

722 for k in d: 

723 v = d[k] 

724 if isinstance(v, Cloneable): 

725 d[k] = v.clone() 

726 

727 d.update(kwargs) 

728 return self.__class__(**d) 

729 

730 @classmethod 

731 def keys(cls): 

732 ''' 

733 Get list of the source model's parameter names. 

734 ''' 

735 

736 return cls.T.propnames 

737 

738 

739class STF(Object, Cloneable): 

740 

741 ''' 

742 Base class for source time functions. 

743 ''' 

744 

745 def __init__(self, effective_duration=None, **kwargs): 

746 if effective_duration is not None: 

747 kwargs['duration'] = effective_duration / \ 

748 self.factor_duration_to_effective() 

749 

750 Object.__init__(self, **kwargs) 

751 

752 @classmethod 

753 def factor_duration_to_effective(cls): 

754 return 1.0 

755 

756 def centroid_time(self, tref): 

757 return tref 

758 

759 @property 

760 def effective_duration(self): 

761 return self.duration * self.factor_duration_to_effective() 

762 

763 def discretize_t(self, deltat, tref): 

764 tl = math.floor(tref / deltat) * deltat 

765 th = math.ceil(tref / deltat) * deltat 

766 if tl == th: 

767 return num.array([tl], dtype=float), num.ones(1) 

768 else: 

769 return ( 

770 num.array([tl, th], dtype=float), 

771 num.array([th - tref, tref - tl], dtype=float) / deltat) 

772 

773 def base_key(self): 

774 return (type(self).__name__,) 

775 

776 

777g_unit_pulse = STF() 

778 

779 

780def sshift(times, amplitudes, tshift, deltat): 

781 

782 t0 = math.floor(tshift / deltat) * deltat 

783 t1 = math.ceil(tshift / deltat) * deltat 

784 if t0 == t1: 

785 return times, amplitudes 

786 

787 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

788 

789 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

790 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

791 

792 times2 = num.arange(times.size + 1, dtype=float) * \ 

793 deltat + times[0] + t0 

794 

795 return times2, amplitudes2 

796 

797 

798class BoxcarSTF(STF): 

799 

800 ''' 

801 Boxcar type source time function. 

802 

803 .. figure :: /static/stf-BoxcarSTF.svg 

804 :width: 40% 

805 :align: center 

806 :alt: boxcar source time function 

807 ''' 

808 

809 duration = Float.T( 

810 default=0.0, 

811 help='duration of the boxcar') 

812 

813 anchor = Float.T( 

814 default=0.0, 

815 help='anchor point with respect to source.time: (' 

816 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

817 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

818 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

819 

820 @classmethod 

821 def factor_duration_to_effective(cls): 

822 return 1.0 

823 

824 def centroid_time(self, tref): 

825 return tref - 0.5 * self.duration * self.anchor 

826 

827 def discretize_t(self, deltat, tref): 

828 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

829 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

830 tmin = round(tmin_stf / deltat) * deltat 

831 tmax = round(tmax_stf / deltat) * deltat 

832 nt = int(round((tmax - tmin) / deltat)) + 1 

833 times = num.linspace(tmin, tmax, nt) 

834 amplitudes = num.ones_like(times) 

835 if times.size > 1: 

836 t_edges = num.linspace( 

837 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

838 t = tmin_stf + self.duration * num.array( 

839 [0.0, 0.0, 1.0, 1.0], dtype=float) 

840 f = num.array([0., 1., 1., 0.], dtype=float) 

841 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

842 amplitudes /= num.sum(amplitudes) 

843 

844 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

845 

846 return sshift(times, amplitudes, -tshift, deltat) 

847 

848 def base_key(self): 

849 return (type(self).__name__, self.duration, self.anchor) 

850 

851 

852class TriangularSTF(STF): 

853 

854 ''' 

855 Triangular type source time function. 

856 

857 .. figure :: /static/stf-TriangularSTF.svg 

858 :width: 40% 

859 :align: center 

860 :alt: triangular source time function 

861 ''' 

862 

863 duration = Float.T( 

864 default=0.0, 

865 help='baseline of the triangle') 

866 

867 peak_ratio = Float.T( 

868 default=0.5, 

869 help='fraction of time compared to duration, ' 

870 'when the maximum amplitude is reached') 

871 

872 anchor = Float.T( 

873 default=0.0, 

874 help='anchor point with respect to source.time: (' 

875 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

876 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

877 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

878 

879 @classmethod 

880 def factor_duration_to_effective(cls, peak_ratio=None): 

881 if peak_ratio is None: 

882 peak_ratio = cls.peak_ratio.default() 

883 

884 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

885 

886 def __init__(self, effective_duration=None, **kwargs): 

887 if effective_duration is not None: 

888 kwargs['duration'] = effective_duration / \ 

889 self.factor_duration_to_effective( 

890 kwargs.get('peak_ratio', None)) 

891 

892 STF.__init__(self, **kwargs) 

893 

894 @property 

895 def centroid_ratio(self): 

896 ra = self.peak_ratio 

897 rb = 1.0 - ra 

898 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

899 

900 def centroid_time(self, tref): 

901 ca = self.centroid_ratio 

902 cb = 1.0 - ca 

903 if self.anchor <= 0.: 

904 return tref - ca * self.duration * self.anchor 

905 else: 

906 return tref - cb * self.duration * self.anchor 

907 

908 @property 

909 def effective_duration(self): 

910 return self.duration * self.factor_duration_to_effective( 

911 self.peak_ratio) 

912 

913 def tminmax_stf(self, tref): 

914 ca = self.centroid_ratio 

915 cb = 1.0 - ca 

916 if self.anchor <= 0.: 

917 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

918 tmax_stf = tmin_stf + self.duration 

919 else: 

920 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

921 tmin_stf = tmax_stf - self.duration 

922 

923 return tmin_stf, tmax_stf 

924 

925 def discretize_t(self, deltat, tref): 

926 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

927 

928 tmin = round(tmin_stf / deltat) * deltat 

929 tmax = round(tmax_stf / deltat) * deltat 

930 nt = int(round((tmax - tmin) / deltat)) + 1 

931 if nt > 1: 

932 t_edges = num.linspace( 

933 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

934 t = tmin_stf + self.duration * num.array( 

935 [0.0, self.peak_ratio, 1.0], dtype=float) 

936 f = num.array([0., 1., 0.], dtype=float) 

937 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

938 amplitudes /= num.sum(amplitudes) 

939 else: 

940 amplitudes = num.ones(1) 

941 

942 times = num.linspace(tmin, tmax, nt) 

943 return times, amplitudes 

944 

945 def base_key(self): 

946 return ( 

947 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

948 

949 

950class HalfSinusoidSTF(STF): 

951 

952 ''' 

953 Half sinusoid type source time function. 

954 

955 .. figure :: /static/stf-HalfSinusoidSTF.svg 

956 :width: 40% 

957 :align: center 

958 :alt: half-sinusouid source time function 

959 ''' 

960 

961 duration = Float.T( 

962 default=0.0, 

963 help='duration of the half-sinusoid (baseline)') 

964 

965 anchor = Float.T( 

966 default=0.0, 

967 help='anchor point with respect to source.time: (' 

968 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

969 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

970 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

971 

972 exponent = Int.T( 

973 default=1, 

974 help='set to 2 to use square of the half-period sinusoidal function.') 

975 

976 def __init__(self, effective_duration=None, **kwargs): 

977 if effective_duration is not None: 

978 kwargs['duration'] = effective_duration / \ 

979 self.factor_duration_to_effective( 

980 kwargs.get('exponent', 1)) 

981 

982 STF.__init__(self, **kwargs) 

983 

984 @classmethod 

985 def factor_duration_to_effective(cls, exponent): 

986 if exponent == 1: 

987 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

988 elif exponent == 2: 

989 return math.sqrt(math.pi**2 - 6) / math.pi 

990 else: 

991 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

992 

993 @property 

994 def effective_duration(self): 

995 return self.duration * self.factor_duration_to_effective(self.exponent) 

996 

997 def centroid_time(self, tref): 

998 return tref - 0.5 * self.duration * self.anchor 

999 

1000 def discretize_t(self, deltat, tref): 

1001 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1002 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1003 tmin = round(tmin_stf / deltat) * deltat 

1004 tmax = round(tmax_stf / deltat) * deltat 

1005 nt = int(round((tmax - tmin) / deltat)) + 1 

1006 if nt > 1: 

1007 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1008 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1009 

1010 if self.exponent == 1: 

1011 fint = -num.cos( 

1012 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1013 

1014 elif self.exponent == 2: 

1015 fint = (t_edges - tmin_stf) / self.duration \ 

1016 - 1.0 / (2.0 * math.pi) * num.sin( 

1017 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1018 else: 

1019 raise ValueError( 

1020 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1021 

1022 amplitudes = fint[1:] - fint[:-1] 

1023 amplitudes /= num.sum(amplitudes) 

1024 else: 

1025 amplitudes = num.ones(1) 

1026 

1027 times = num.linspace(tmin, tmax, nt) 

1028 return times, amplitudes 

1029 

1030 def base_key(self): 

1031 return (type(self).__name__, self.duration, self.anchor) 

1032 

1033 

1034class SmoothRampSTF(STF): 

1035 ''' 

1036 Smooth-ramp type source time function for near-field displacement. 

1037 Based on moment function of double-couple point source proposed by Bruestle 

1038 and Mueller (PEPI, 1983). 

1039 

1040 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1041 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1042 312-324. 

1043 

1044 .. figure :: /static/stf-SmoothRampSTF.svg 

1045 :width: 40% 

1046 :alt: smooth ramp source time function 

1047 ''' 

1048 duration = Float.T( 

1049 default=0.0, 

1050 help='duration of the ramp (baseline)') 

1051 

1052 rise_ratio = Float.T( 

1053 default=0.5, 

1054 help='fraction of time compared to duration, ' 

1055 'when the maximum amplitude is reached') 

1056 

1057 anchor = Float.T( 

1058 default=0.0, 

1059 help='anchor point with respect to source.time: (' 

1060 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1061 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1062 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1063 

1064 def discretize_t(self, deltat, tref): 

1065 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1066 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1067 tmin = round(tmin_stf / deltat) * deltat 

1068 tmax = round(tmax_stf / deltat) * deltat 

1069 D = round((tmax - tmin) / deltat) * deltat 

1070 nt = int(round(D / deltat)) + 1 

1071 times = num.linspace(tmin, tmax, nt) 

1072 if nt > 1: 

1073 rise_time = self.rise_ratio * self.duration 

1074 amplitudes = num.ones_like(times) 

1075 tp = tmin + rise_time 

1076 ii = num.where(times <= tp) 

1077 t_inc = times[ii] 

1078 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1079 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1080 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1081 

1082 amplitudes /= num.sum(amplitudes) 

1083 else: 

1084 amplitudes = num.ones(1) 

1085 

1086 return times, amplitudes 

1087 

1088 def base_key(self): 

1089 return (type(self).__name__, 

1090 self.duration, self.rise_ratio, self.anchor) 

1091 

1092 

1093class ResonatorSTF(STF): 

1094 ''' 

1095 Simple resonator like source time function. 

1096 

1097 .. math :: 

1098 

1099 f(t) = 0 for t < 0 

1100 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1101 

1102 

1103 .. figure :: /static/stf-SmoothRampSTF.svg 

1104 :width: 40% 

1105 :alt: smooth ramp source time function 

1106 

1107 ''' 

1108 

1109 duration = Float.T( 

1110 default=0.0, 

1111 help='decay time') 

1112 

1113 frequency = Float.T( 

1114 default=1.0, 

1115 help='resonance frequency') 

1116 

1117 def discretize_t(self, deltat, tref): 

1118 tmin_stf = tref 

1119 tmax_stf = tref + self.duration * 3 

1120 tmin = math.floor(tmin_stf / deltat) * deltat 

1121 tmax = math.ceil(tmax_stf / deltat) * deltat 

1122 times = util.arange2(tmin, tmax, deltat) 

1123 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1124 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1125 

1126 return times, amplitudes 

1127 

1128 def base_key(self): 

1129 return (type(self).__name__, 

1130 self.duration, self.frequency) 

1131 

1132 

1133class STFMode(StringChoice): 

1134 choices = ['pre', 'post'] 

1135 

1136 

1137class Source(Location, Cloneable): 

1138 ''' 

1139 Base class for all source models. 

1140 ''' 

1141 

1142 name = String.T(optional=True, default='') 

1143 

1144 time = Timestamp.T( 

1145 default=Timestamp.D('1970-01-01 00:00:00'), 

1146 help='source origin time.') 

1147 

1148 stf = STF.T( 

1149 optional=True, 

1150 help='source time function.') 

1151 

1152 stf_mode = STFMode.T( 

1153 default='post', 

1154 help='whether to apply source time function in pre or ' 

1155 'post-processing.') 

1156 

1157 def __init__(self, **kwargs): 

1158 Location.__init__(self, **kwargs) 

1159 

1160 def update(self, **kwargs): 

1161 ''' 

1162 Change some of the source models parameters. 

1163 

1164 Example:: 

1165 

1166 >>> from pyrocko import gf 

1167 >>> s = gf.DCSource() 

1168 >>> s.update(strike=66., dip=33.) 

1169 >>> print(s) 

1170 --- !pf.DCSource 

1171 depth: 0.0 

1172 time: 1970-01-01 00:00:00 

1173 magnitude: 6.0 

1174 strike: 66.0 

1175 dip: 33.0 

1176 rake: 0.0 

1177 

1178 ''' 

1179 

1180 for (k, v) in kwargs.items(): 

1181 self[k] = v 

1182 

1183 def grid(self, **variables): 

1184 ''' 

1185 Create grid of source model variations. 

1186 

1187 :returns: :py:class:`SourceGrid` instance. 

1188 

1189 Example:: 

1190 

1191 >>> from pyrocko import gf 

1192 >>> base = DCSource() 

1193 >>> R = gf.Range 

1194 >>> for s in base.grid(R(' 

1195 

1196 ''' 

1197 return SourceGrid(base=self, variables=variables) 

1198 

1199 def base_key(self): 

1200 ''' 

1201 Get key to decide about source discretization / GF stack sharing. 

1202 

1203 When two source models differ only in amplitude and origin time, the 

1204 discretization and the GF stacking can be done only once for a unit 

1205 amplitude and a zero origin time and the amplitude and origin times of 

1206 the seismograms can be applied during post-processing of the synthetic 

1207 seismogram. 

1208 

1209 For any derived parameterized source model, this method is called to 

1210 decide if discretization and stacking of the source should be shared. 

1211 When two source models return an equal vector of values discretization 

1212 is shared. 

1213 ''' 

1214 return (self.depth, self.lat, self.north_shift, 

1215 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1216 self.effective_stf_pre().base_key() 

1217 

1218 def get_factor(self): 

1219 ''' 

1220 Get the scaling factor to be applied during post-processing. 

1221 

1222 Discretization of the base seismogram is usually done for a unit 

1223 amplitude, because a common factor can be efficiently multiplied to 

1224 final seismograms. This eliminates to do repeat the stacking when 

1225 creating seismograms for a series of source models only differing in 

1226 amplitude. 

1227 

1228 This method should return the scaling factor to apply in the 

1229 post-processing (often this is simply the scalar moment of the source). 

1230 ''' 

1231 

1232 return 1.0 

1233 

1234 def effective_stf_pre(self): 

1235 ''' 

1236 Return the STF applied before stacking of the Green's functions. 

1237 

1238 This STF is used during discretization of the parameterized source 

1239 models, i.e. to produce a temporal distribution of point sources. 

1240 

1241 Handling of the STF before stacking of the GFs is less efficient but 

1242 allows to use different source time functions for different parts of 

1243 the source. 

1244 ''' 

1245 

1246 if self.stf is not None and self.stf_mode == 'pre': 

1247 return self.stf 

1248 else: 

1249 return g_unit_pulse 

1250 

1251 def effective_stf_post(self): 

1252 ''' 

1253 Return the STF applied after stacking of the Green's fuctions. 

1254 

1255 This STF is used in the post-processing of the synthetic seismograms. 

1256 

1257 Handling of the STF after stacking of the GFs is usually more efficient 

1258 but is only possible when a common STF is used for all subsources. 

1259 ''' 

1260 

1261 if self.stf is not None and self.stf_mode == 'post': 

1262 return self.stf 

1263 else: 

1264 return g_unit_pulse 

1265 

1266 def _dparams_base(self): 

1267 return dict(times=arr(self.time), 

1268 lat=self.lat, lon=self.lon, 

1269 north_shifts=arr(self.north_shift), 

1270 east_shifts=arr(self.east_shift), 

1271 depths=arr(self.depth)) 

1272 

1273 def _hash(self): 

1274 sha = sha1() 

1275 for k in self.base_key(): 

1276 sha.update(str(k).encode()) 

1277 return sha.hexdigest() 

1278 

1279 def _dparams_base_repeated(self, times): 

1280 if times is None: 

1281 return self._dparams_base() 

1282 

1283 nt = times.size 

1284 north_shifts = num.repeat(self.north_shift, nt) 

1285 east_shifts = num.repeat(self.east_shift, nt) 

1286 depths = num.repeat(self.depth, nt) 

1287 return dict(times=times, 

1288 lat=self.lat, lon=self.lon, 

1289 north_shifts=north_shifts, 

1290 east_shifts=east_shifts, 

1291 depths=depths) 

1292 

1293 def pyrocko_event(self, store=None, target=None, **kwargs): 

1294 duration = None 

1295 if self.stf: 

1296 duration = self.stf.effective_duration 

1297 

1298 return model.Event( 

1299 lat=self.lat, 

1300 lon=self.lon, 

1301 north_shift=self.north_shift, 

1302 east_shift=self.east_shift, 

1303 time=self.time, 

1304 name=self.name, 

1305 depth=self.depth, 

1306 duration=duration, 

1307 **kwargs) 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 

1527 bulk_moduli = store.config.get_bulk_moduli( 

1528 self.lat, self.lon, 

1529 points=points, 

1530 interpolation=interpolation)[0] 

1531 except meta.OutOfBounds: 

1532 raise DerivedMagnitudeError( 

1533 'Could not get shear modulus at source position.') 

1534 

1535 return float(2. * shear_moduli + bulk_moduli) 

1536 

1537 def get_factor(self): 

1538 return 1.0 

1539 

1540 def discretize_basesource(self, store, target=None): 

1541 times, amplitudes = self.effective_stf_pre().discretize_t( 

1542 store.config.deltat, self.time) 

1543 

1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1545 

1546 if self.volume_change is not None: 

1547 if self.volume_change < 0.: 

1548 amplitudes *= -1 

1549 

1550 return meta.DiscretizedExplosionSource( 

1551 m0s=amplitudes, 

1552 **self._dparams_base_repeated(times)) 

1553 

1554 def pyrocko_moment_tensor(self, store=None, target=None): 

1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1557 

1558 

1559class RectangularExplosionSource(ExplosionSource): 

1560 ''' 

1561 Rectangular or line explosion source. 

1562 ''' 

1563 

1564 discretized_source_class = meta.DiscretizedExplosionSource 

1565 

1566 strike = Float.T( 

1567 default=0.0, 

1568 help='strike direction in [deg], measured clockwise from north') 

1569 

1570 dip = Float.T( 

1571 default=90.0, 

1572 help='dip angle in [deg], measured downward from horizontal') 

1573 

1574 length = Float.T( 

1575 default=0., 

1576 help='length of rectangular source area [m]') 

1577 

1578 width = Float.T( 

1579 default=0., 

1580 help='width of rectangular source area [m]') 

1581 

1582 anchor = StringChoice.T( 

1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1584 'bottom_left', 'bottom_right'], 

1585 default='center', 

1586 optional=True, 

1587 help='Anchor point for positioning the plane, can be: top, center or' 

1588 'bottom and also top_left, top_right,bottom_left,' 

1589 'bottom_right, center_left and center right') 

1590 

1591 nucleation_x = Float.T( 

1592 optional=True, 

1593 help='horizontal position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = left edge, +1 = right edge)') 

1595 

1596 nucleation_y = Float.T( 

1597 optional=True, 

1598 help='down-dip position of rupture nucleation in normalized fault ' 

1599 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1600 

1601 velocity = Float.T( 

1602 default=3500., 

1603 help='speed of explosion front [m/s]') 

1604 

1605 aggressive_oversampling = Bool.T( 

1606 default=False, 

1607 help='Aggressive oversampling for basesource discretization. ' 

1608 'When using \'multilinear\' interpolation oversampling has' 

1609 ' practically no effect.') 

1610 

1611 def base_key(self): 

1612 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1613 self.width, self.nucleation_x, 

1614 self.nucleation_y, self.velocity, 

1615 self.anchor) 

1616 

1617 def discretize_basesource(self, store, target=None): 

1618 

1619 if self.nucleation_x is not None: 

1620 nucx = self.nucleation_x * 0.5 * self.length 

1621 else: 

1622 nucx = None 

1623 

1624 if self.nucleation_y is not None: 

1625 nucy = self.nucleation_y * 0.5 * self.width 

1626 else: 

1627 nucy = None 

1628 

1629 stf = self.effective_stf_pre() 

1630 

1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1632 store.config.deltas, store.config.deltat, 

1633 self.time, self.north_shift, self.east_shift, self.depth, 

1634 self.strike, self.dip, self.length, self.width, self.anchor, 

1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1636 

1637 amplitudes /= num.sum(amplitudes) 

1638 amplitudes *= self.get_moment(store, target) 

1639 

1640 return meta.DiscretizedExplosionSource( 

1641 lat=self.lat, 

1642 lon=self.lon, 

1643 times=times, 

1644 north_shifts=points[:, 0], 

1645 east_shifts=points[:, 1], 

1646 depths=points[:, 2], 

1647 m0s=amplitudes) 

1648 

1649 def outline(self, cs='xyz'): 

1650 points = outline_rect_source(self.strike, self.dip, self.length, 

1651 self.width, self.anchor) 

1652 

1653 points[:, 0] += self.north_shift 

1654 points[:, 1] += self.east_shift 

1655 points[:, 2] += self.depth 

1656 if cs == 'xyz': 

1657 return points 

1658 elif cs == 'xy': 

1659 return points[:, :2] 

1660 elif cs in ('latlon', 'lonlat'): 

1661 latlon = ne_to_latlon( 

1662 self.lat, self.lon, points[:, 0], points[:, 1]) 

1663 

1664 latlon = num.array(latlon).T 

1665 if cs == 'latlon': 

1666 return latlon 

1667 else: 

1668 return latlon[:, ::-1] 

1669 

1670 def get_nucleation_abs_coord(self, cs='xy'): 

1671 

1672 if self.nucleation_x is None: 

1673 return None, None 

1674 

1675 coords = from_plane_coords(self.strike, self.dip, self.length, 

1676 self.width, self.depth, self.nucleation_x, 

1677 self.nucleation_y, lat=self.lat, 

1678 lon=self.lon, north_shift=self.north_shift, 

1679 east_shift=self.east_shift, cs=cs) 

1680 return coords 

1681 

1682 

1683class DCSource(SourceWithMagnitude): 

1684 ''' 

1685 A double-couple point source. 

1686 ''' 

1687 

1688 strike = Float.T( 

1689 default=0.0, 

1690 help='strike direction in [deg], measured clockwise from north') 

1691 

1692 dip = Float.T( 

1693 default=90.0, 

1694 help='dip angle in [deg], measured downward from horizontal') 

1695 

1696 rake = Float.T( 

1697 default=0.0, 

1698 help='rake angle in [deg], ' 

1699 'measured counter-clockwise from right-horizontal ' 

1700 'in on-plane view') 

1701 

1702 discretized_source_class = meta.DiscretizedMTSource 

1703 

1704 def base_key(self): 

1705 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1706 

1707 def get_factor(self): 

1708 return float(pmt.magnitude_to_moment(self.magnitude)) 

1709 

1710 def discretize_basesource(self, store, target=None): 

1711 mot = pmt.MomentTensor( 

1712 strike=self.strike, dip=self.dip, rake=self.rake) 

1713 

1714 times, amplitudes = self.effective_stf_pre().discretize_t( 

1715 store.config.deltat, self.time) 

1716 return meta.DiscretizedMTSource( 

1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1718 **self._dparams_base_repeated(times)) 

1719 

1720 def pyrocko_moment_tensor(self, store=None, target=None): 

1721 return pmt.MomentTensor( 

1722 strike=self.strike, 

1723 dip=self.dip, 

1724 rake=self.rake, 

1725 scalar_moment=self.moment) 

1726 

1727 def pyrocko_event(self, store=None, target=None, **kwargs): 

1728 return SourceWithMagnitude.pyrocko_event( 

1729 self, store, target, 

1730 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1731 **kwargs) 

1732 

1733 @classmethod 

1734 def from_pyrocko_event(cls, ev, **kwargs): 

1735 d = {} 

1736 mt = ev.moment_tensor 

1737 if mt: 

1738 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1739 d.update( 

1740 strike=float(strike), 

1741 dip=float(dip), 

1742 rake=float(rake), 

1743 magnitude=float(mt.moment_magnitude())) 

1744 

1745 d.update(kwargs) 

1746 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1747 

1748 

1749class CLVDSource(SourceWithMagnitude): 

1750 ''' 

1751 A pure CLVD point source. 

1752 ''' 

1753 

1754 discretized_source_class = meta.DiscretizedMTSource 

1755 

1756 azimuth = Float.T( 

1757 default=0.0, 

1758 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1759 

1760 dip = Float.T( 

1761 default=90., 

1762 help='dip direction of largest dipole, downward from horizontal [deg]') 

1763 

1764 def base_key(self): 

1765 return Source.base_key(self) + (self.azimuth, self.dip) 

1766 

1767 def get_factor(self): 

1768 return float(pmt.magnitude_to_moment(self.magnitude)) 

1769 

1770 @property 

1771 def m6(self): 

1772 a = math.sqrt(4. / 3.) * self.get_factor() 

1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1774 rotmat1 = pmt.euler_to_matrix( 

1775 d2r * (self.dip - 90.), 

1776 d2r * (self.azimuth - 90.), 

1777 0.) 

1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1779 return pmt.to6(m) 

1780 

1781 @property 

1782 def m6_astuple(self): 

1783 return tuple(self.m6.tolist()) 

1784 

1785 def discretize_basesource(self, store, target=None): 

1786 factor = self.get_factor() 

1787 times, amplitudes = self.effective_stf_pre().discretize_t( 

1788 store.config.deltat, self.time) 

1789 return meta.DiscretizedMTSource( 

1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1791 **self._dparams_base_repeated(times)) 

1792 

1793 def pyrocko_moment_tensor(self, store=None, target=None): 

1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1795 

1796 def pyrocko_event(self, store=None, target=None, **kwargs): 

1797 mt = self.pyrocko_moment_tensor(store, target) 

1798 return Source.pyrocko_event( 

1799 self, store, target, 

1800 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1801 magnitude=float(mt.moment_magnitude()), 

1802 **kwargs) 

1803 

1804 

1805class VLVDSource(SourceWithDerivedMagnitude): 

1806 ''' 

1807 Volumetric linear vector dipole source. 

1808 

1809 This source is a parameterization for a restricted moment tensor point 

1810 source, useful to represent dyke or sill like inflation or deflation 

1811 sources. The restriction is such that the moment tensor is rotational 

1812 symmetric. It can be represented by a superposition of a linear vector 

1813 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1814 restricted moment tensor has 4 degrees of freedom: 2 independent 

1815 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1816 

1817 In this parameterization, the isotropic component is controlled by 

1818 ``volume_change``. To define the moment tensor, it must be converted to the 

1819 scalar moment of the the MT's isotropic component. For the conversion, the 

1820 shear modulus at the source's position must be known. This value is 

1821 extracted from the earth model defined in the GF store in use. 

1822 

1823 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1825 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1826 ''' 

1827 

1828 discretized_source_class = meta.DiscretizedMTSource 

1829 

1830 azimuth = Float.T( 

1831 default=0.0, 

1832 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1833 

1834 dip = Float.T( 

1835 default=90., 

1836 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1837 

1838 volume_change = Float.T( 

1839 default=0., 

1840 help='volume change of the inflation/deflation [m^3].') 

1841 

1842 clvd_moment = Float.T( 

1843 default=0., 

1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1845 'controls the sign of the CLVD (the sign of its largest ' 

1846 'eigenvalue).') 

1847 

1848 def get_moment_to_volume_change_ratio(self, store, target): 

1849 if store is None or target is None: 

1850 raise DerivedMagnitudeError( 

1851 'Need earth model to convert between volume change and ' 

1852 'magnitude.') 

1853 

1854 points = num.array( 

1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1856 

1857 try: 

1858 shear_moduli = store.config.get_shear_moduli( 

1859 self.lat, self.lon, 

1860 points=points, 

1861 interpolation=target.interpolation)[0] 

1862 

1863 bulk_moduli = store.config.get_bulk_moduli( 

1864 self.lat, self.lon, 

1865 points=points, 

1866 interpolation=target.interpolation)[0] 

1867 except meta.OutOfBounds: 

1868 raise DerivedMagnitudeError( 

1869 'Could not get shear modulus at source position.') 

1870 

1871 return float(2. * shear_moduli + bulk_moduli) 

1872 

1873 def base_key(self): 

1874 return Source.base_key(self) + \ 

1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1876 

1877 def get_magnitude(self, store=None, target=None): 

1878 mt = self.pyrocko_moment_tensor(store, target) 

1879 return float(pmt.moment_to_magnitude(mt.moment)) 

1880 

1881 def get_m6(self, store, target): 

1882 a = math.sqrt(4. / 3.) * self.clvd_moment 

1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1884 

1885 rotmat1 = pmt.euler_to_matrix( 

1886 d2r * (self.dip - 90.), 

1887 d2r * (self.azimuth - 90.), 

1888 0.) 

1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1890 

1891 m_iso = self.volume_change * \ 

1892 self.get_moment_to_volume_change_ratio(store, target) 

1893 

1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1895 0., 0.,) * math.sqrt(2. / 3) 

1896 

1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1898 return m 

1899 

1900 def get_moment(self, store=None, target=None): 

1901 return float(pmt.magnitude_to_moment( 

1902 self.get_magnitude(store, target))) 

1903 

1904 def get_m6_astuple(self, store, target): 

1905 m6 = self.get_m6(store, target) 

1906 return tuple(m6.tolist()) 

1907 

1908 def discretize_basesource(self, store, target=None): 

1909 times, amplitudes = self.effective_stf_pre().discretize_t( 

1910 store.config.deltat, self.time) 

1911 

1912 m6 = self.get_m6(store, target) 

1913 m6 *= amplitudes / self.get_factor() 

1914 

1915 return meta.DiscretizedMTSource( 

1916 m6s=m6[num.newaxis, :], 

1917 **self._dparams_base_repeated(times)) 

1918 

1919 def pyrocko_moment_tensor(self, store=None, target=None): 

1920 m6_astuple = self.get_m6_astuple(store, target) 

1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1922 

1923 

1924class MTSource(Source): 

1925 ''' 

1926 A moment tensor point source. 

1927 ''' 

1928 

1929 discretized_source_class = meta.DiscretizedMTSource 

1930 

1931 mnn = Float.T( 

1932 default=1., 

1933 help='north-north component of moment tensor in [Nm]') 

1934 

1935 mee = Float.T( 

1936 default=1., 

1937 help='east-east component of moment tensor in [Nm]') 

1938 

1939 mdd = Float.T( 

1940 default=1., 

1941 help='down-down component of moment tensor in [Nm]') 

1942 

1943 mne = Float.T( 

1944 default=0., 

1945 help='north-east component of moment tensor in [Nm]') 

1946 

1947 mnd = Float.T( 

1948 default=0., 

1949 help='north-down component of moment tensor in [Nm]') 

1950 

1951 med = Float.T( 

1952 default=0., 

1953 help='east-down component of moment tensor in [Nm]') 

1954 

1955 def __init__(self, **kwargs): 

1956 if 'm6' in kwargs: 

1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1958 kwargs.pop('m6')): 

1959 kwargs[k] = float(v) 

1960 

1961 Source.__init__(self, **kwargs) 

1962 

1963 @property 

1964 def m6(self): 

1965 return num.array(self.m6_astuple) 

1966 

1967 @property 

1968 def m6_astuple(self): 

1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1970 

1971 @m6.setter 

1972 def m6(self, value): 

1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1974 

1975 def base_key(self): 

1976 return Source.base_key(self) + self.m6_astuple 

1977 

1978 def discretize_basesource(self, store, target=None): 

1979 times, amplitudes = self.effective_stf_pre().discretize_t( 

1980 store.config.deltat, self.time) 

1981 return meta.DiscretizedMTSource( 

1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1983 **self._dparams_base_repeated(times)) 

1984 

1985 def get_magnitude(self, store=None, target=None): 

1986 m6 = self.m6 

1987 return pmt.moment_to_magnitude( 

1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1989 math.sqrt(2.)) 

1990 

1991 def pyrocko_moment_tensor(self, store=None, target=None): 

1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1993 

1994 def pyrocko_event(self, store=None, target=None, **kwargs): 

1995 mt = self.pyrocko_moment_tensor(store, target) 

1996 return Source.pyrocko_event( 

1997 self, store, target, 

1998 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1999 magnitude=float(mt.moment_magnitude()), 

2000 **kwargs) 

2001 

2002 @classmethod 

2003 def from_pyrocko_event(cls, ev, **kwargs): 

2004 d = {} 

2005 mt = ev.moment_tensor 

2006 if mt: 

2007 d.update(m6=tuple(map(float, mt.m6()))) 

2008 else: 

2009 if ev.magnitude is not None: 

2010 mom = pmt.magnitude_to_moment(ev.magnitude) 

2011 v = math.sqrt(2. / 3.) * mom 

2012 d.update(m6=(v, v, v, 0., 0., 0.)) 

2013 

2014 d.update(kwargs) 

2015 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2016 

2017 

2018map_anchor = { 

2019 'center': (0.0, 0.0), 

2020 'center_left': (-1.0, 0.0), 

2021 'center_right': (1.0, 0.0), 

2022 'top': (0.0, -1.0), 

2023 'top_left': (-1.0, -1.0), 

2024 'top_right': (1.0, -1.0), 

2025 'bottom': (0.0, 1.0), 

2026 'bottom_left': (-1.0, 1.0), 

2027 'bottom_right': (1.0, 1.0)} 

2028 

2029 

2030class RectangularSource(SourceWithDerivedMagnitude): 

2031 ''' 

2032 Classical Haskell source model modified for bilateral rupture. 

2033 ''' 

2034 

2035 discretized_source_class = meta.DiscretizedMTSource 

2036 

2037 magnitude = Float.T( 

2038 optional=True, 

2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2040 

2041 strike = Float.T( 

2042 default=0.0, 

2043 help='strike direction in [deg], measured clockwise from north') 

2044 

2045 dip = Float.T( 

2046 default=90.0, 

2047 help='dip angle in [deg], measured downward from horizontal') 

2048 

2049 rake = Float.T( 

2050 default=0.0, 

2051 help='rake angle in [deg], ' 

2052 'measured counter-clockwise from right-horizontal ' 

2053 'in on-plane view') 

2054 

2055 length = Float.T( 

2056 default=0., 

2057 help='length of rectangular source area [m]') 

2058 

2059 width = Float.T( 

2060 default=0., 

2061 help='width of rectangular source area [m]') 

2062 

2063 anchor = StringChoice.T( 

2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2065 'bottom_left', 'bottom_right'], 

2066 default='center', 

2067 optional=True, 

2068 help='Anchor point for positioning the plane, can be: ``top, center ' 

2069 'bottom, top_left, top_right,bottom_left,' 

2070 'bottom_right, center_left, center right``.') 

2071 

2072 nucleation_x = Float.T( 

2073 optional=True, 

2074 help='horizontal position of rupture nucleation in normalized fault ' 

2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2076 

2077 nucleation_y = Float.T( 

2078 optional=True, 

2079 help='down-dip position of rupture nucleation in normalized fault ' 

2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2081 

2082 velocity = Float.T( 

2083 default=3500., 

2084 help='speed of rupture front [m/s]') 

2085 

2086 slip = Float.T( 

2087 optional=True, 

2088 help='Slip on the rectangular source area [m]') 

2089 

2090 opening_fraction = Float.T( 

2091 default=0., 

2092 help='Determines fraction of slip related to opening. ' 

2093 '(``-1``: pure tensile closing, ' 

2094 '``0``: pure shear, ' 

2095 '``1``: pure tensile opening)') 

2096 

2097 decimation_factor = Int.T( 

2098 optional=True, 

2099 default=1, 

2100 help='Sub-source decimation factor, a larger decimation will' 

2101 ' make the result inaccurate but shorten the necessary' 

2102 ' computation time (use for testing puposes only).') 

2103 

2104 aggressive_oversampling = Bool.T( 

2105 default=False, 

2106 help='Aggressive oversampling for basesource discretization. ' 

2107 'When using \'multilinear\' interpolation oversampling has' 

2108 ' practically no effect.') 

2109 

2110 def __init__(self, **kwargs): 

2111 if 'moment' in kwargs: 

2112 mom = kwargs.pop('moment') 

2113 if 'magnitude' not in kwargs: 

2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2115 

2116 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2117 

2118 def base_key(self): 

2119 return SourceWithDerivedMagnitude.base_key(self) + ( 

2120 self.magnitude, 

2121 self.slip, 

2122 self.strike, 

2123 self.dip, 

2124 self.rake, 

2125 self.length, 

2126 self.width, 

2127 self.nucleation_x, 

2128 self.nucleation_y, 

2129 self.velocity, 

2130 self.decimation_factor, 

2131 self.anchor) 

2132 

2133 def check_conflicts(self): 

2134 if self.magnitude is not None and self.slip is not None: 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude and slip are both defined.') 

2137 

2138 def get_magnitude(self, store=None, target=None): 

2139 self.check_conflicts() 

2140 if self.magnitude is not None: 

2141 return self.magnitude 

2142 

2143 elif self.slip is not None: 

2144 if None in (store, target): 

2145 raise DerivedMagnitudeError( 

2146 'Magnitude for a rectangular source with slip defined ' 

2147 'can only be derived when earth model and target ' 

2148 'interpolation method are available.') 

2149 

2150 amplitudes = self._discretize(store, target)[2] 

2151 if amplitudes.ndim == 2: 

2152 # CLVD component has no net moment, leave out 

2153 return float(pmt.moment_to_magnitude( 

2154 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2155 else: 

2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2157 

2158 else: 

2159 return float(pmt.moment_to_magnitude(1.0)) 

2160 

2161 def get_factor(self): 

2162 return 1.0 

2163 

2164 def get_slip_tensile(self): 

2165 return self.slip * self.opening_fraction 

2166 

2167 def get_slip_shear(self): 

2168 return self.slip - abs(self.get_slip_tensile) 

2169 

2170 def _discretize(self, store, target): 

2171 if self.nucleation_x is not None: 

2172 nucx = self.nucleation_x * 0.5 * self.length 

2173 else: 

2174 nucx = None 

2175 

2176 if self.nucleation_y is not None: 

2177 nucy = self.nucleation_y * 0.5 * self.width 

2178 else: 

2179 nucy = None 

2180 

2181 stf = self.effective_stf_pre() 

2182 

2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2184 store.config.deltas, store.config.deltat, 

2185 self.time, self.north_shift, self.east_shift, self.depth, 

2186 self.strike, self.dip, self.length, self.width, self.anchor, 

2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2188 decimation_factor=self.decimation_factor, 

2189 aggressive_oversampling=self.aggressive_oversampling) 

2190 

2191 if self.slip is not None: 

2192 if target is not None: 

2193 interpolation = target.interpolation 

2194 else: 

2195 interpolation = 'nearest_neighbor' 

2196 logger.warning( 

2197 'no target information available, will use ' 

2198 '"nearest_neighbor" interpolation when extracting shear ' 

2199 'modulus from earth model') 

2200 

2201 shear_moduli = store.config.get_shear_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_slip = self.get_slip_tensile() 

2207 shear_slip = self.slip - abs(tensile_slip) 

2208 

2209 amplitudes_total = [shear_moduli * shear_slip] 

2210 if tensile_slip != 0: 

2211 bulk_moduli = store.config.get_bulk_moduli( 

2212 self.lat, self.lon, 

2213 points=points, 

2214 interpolation=interpolation) 

2215 

2216 tensile_iso = bulk_moduli * tensile_slip 

2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2218 

2219 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2220 

2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2222 amplitudes * dl * dw 

2223 

2224 else: 

2225 # normalization to retain total moment 

2226 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2227 moment = self.get_moment(store, target) 

2228 

2229 amplitudes_total = [ 

2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2231 if self.opening_fraction != 0.: 

2232 amplitudes_total.append( 

2233 amplitudes_norm * self.opening_fraction * moment) 

2234 

2235 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2236 

2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2238 

2239 def discretize_basesource(self, store, target=None): 

2240 

2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2242 store, target) 

2243 

2244 mot = pmt.MomentTensor( 

2245 strike=self.strike, dip=self.dip, rake=self.rake) 

2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2247 

2248 if amplitudes.ndim == 1: 

2249 m6s[:, :] *= amplitudes[:, num.newaxis] 

2250 elif amplitudes.ndim == 2: 

2251 # shear MT components 

2252 rotmat1 = pmt.euler_to_matrix( 

2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2255 

2256 if amplitudes.shape[0] == 2: 

2257 # tensile MT components - moment/magnitude input 

2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2259 rot_tensile = pmt.to6( 

2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2261 

2262 m6s_tensile = rot_tensile[ 

2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2264 m6s += m6s_tensile 

2265 

2266 elif amplitudes.shape[0] == 3: 

2267 # tensile MT components - slip input 

2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2270 

2271 rot_iso = pmt.to6( 

2272 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2273 rot_clvd = pmt.to6( 

2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2275 

2276 m6s_iso = rot_iso[ 

2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2278 m6s_clvd = rot_clvd[ 

2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2280 m6s += m6s_iso + m6s_clvd 

2281 else: 

2282 raise ValueError('Unknwown amplitudes shape!') 

2283 else: 

2284 raise ValueError( 

2285 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2286 

2287 ds = meta.DiscretizedMTSource( 

2288 lat=self.lat, 

2289 lon=self.lon, 

2290 times=times, 

2291 north_shifts=points[:, 0], 

2292 east_shifts=points[:, 1], 

2293 depths=points[:, 2], 

2294 m6s=m6s, 

2295 dl=dl, 

2296 dw=dw, 

2297 nl=nl, 

2298 nw=nw) 

2299 

2300 return ds 

2301 

2302 def outline(self, cs='xyz'): 

2303 points = outline_rect_source(self.strike, self.dip, self.length, 

2304 self.width, self.anchor) 

2305 

2306 points[:, 0] += self.north_shift 

2307 points[:, 1] += self.east_shift 

2308 points[:, 2] += self.depth 

2309 if cs == 'xyz': 

2310 return points 

2311 elif cs == 'xy': 

2312 return points[:, :2] 

2313 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2314 latlon = ne_to_latlon( 

2315 self.lat, self.lon, points[:, 0], points[:, 1]) 

2316 

2317 latlon = num.array(latlon).T 

2318 if cs == 'latlon': 

2319 return latlon 

2320 elif cs == 'lonlat': 

2321 return latlon[:, ::-1] 

2322 else: 

2323 return num.concatenate( 

2324 (latlon, points[:, 2].reshape((len(points), 1))), 

2325 axis=1) 

2326 

2327 def points_on_source(self, cs='xyz', **kwargs): 

2328 

2329 points = points_on_rect_source( 

2330 self.strike, self.dip, self.length, self.width, 

2331 self.anchor, **kwargs) 

2332 

2333 points[:, 0] += self.north_shift 

2334 points[:, 1] += self.east_shift 

2335 points[:, 2] += self.depth 

2336 if cs == 'xyz': 

2337 return points 

2338 elif cs == 'xy': 

2339 return points[:, :2] 

2340 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2341 latlon = ne_to_latlon( 

2342 self.lat, self.lon, points[:, 0], points[:, 1]) 

2343 

2344 latlon = num.array(latlon).T 

2345 if cs == 'latlon': 

2346 return latlon 

2347 elif cs == 'lonlat': 

2348 return latlon[:, ::-1] 

2349 else: 

2350 return num.concatenate( 

2351 (latlon, points[:, 2].reshape((len(points), 1))), 

2352 axis=1) 

2353 

2354 def get_nucleation_abs_coord(self, cs='xy'): 

2355 

2356 if self.nucleation_x is None: 

2357 return None, None 

2358 

2359 coords = from_plane_coords(self.strike, self.dip, self.length, 

2360 self.width, self.depth, self.nucleation_x, 

2361 self.nucleation_y, lat=self.lat, 

2362 lon=self.lon, north_shift=self.north_shift, 

2363 east_shift=self.east_shift, cs=cs) 

2364 return coords 

2365 

2366 def pyrocko_moment_tensor(self, store=None, target=None): 

2367 return pmt.MomentTensor( 

2368 strike=self.strike, 

2369 dip=self.dip, 

2370 rake=self.rake, 

2371 scalar_moment=self.get_moment(store, target)) 

2372 

2373 def pyrocko_event(self, store=None, target=None, **kwargs): 

2374 return SourceWithDerivedMagnitude.pyrocko_event( 

2375 self, store, target, 

2376 **kwargs) 

2377 

2378 @classmethod 

2379 def from_pyrocko_event(cls, ev, **kwargs): 

2380 d = {} 

2381 mt = ev.moment_tensor 

2382 if mt: 

2383 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2384 d.update( 

2385 strike=float(strike), 

2386 dip=float(dip), 

2387 rake=float(rake), 

2388 magnitude=float(mt.moment_magnitude())) 

2389 

2390 d.update(kwargs) 

2391 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2392 

2393 

2394class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2395 ''' 

2396 Combined Eikonal and Okada quasi-dynamic rupture model. 

2397 

2398 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2399 Note: attribute `stf` is not used so far, but kept for future applications. 

2400 ''' 

2401 

2402 discretized_source_class = meta.DiscretizedMTSource 

2403 

2404 strike = Float.T( 

2405 default=0.0, 

2406 help='Strike direction in [deg], measured clockwise from north.') 

2407 

2408 dip = Float.T( 

2409 default=0.0, 

2410 help='Dip angle in [deg], measured downward from horizontal.') 

2411 

2412 length = Float.T( 

2413 default=10. * km, 

2414 help='Length of rectangular source area in [m].') 

2415 

2416 width = Float.T( 

2417 default=5. * km, 

2418 help='Width of rectangular source area in [m].') 

2419 

2420 anchor = StringChoice.T( 

2421 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2422 'bottom_left', 'bottom_right'], 

2423 default='center', 

2424 optional=True, 

2425 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2426 'bottom, top_left, top_right, bottom_left, ' 

2427 'bottom_right, center_left, center_right``.') 

2428 

2429 nucleation_x__ = Array.T( 

2430 default=num.array([0.]), 

2431 dtype=num.float64, 

2432 serialize_as='list', 

2433 help='Horizontal position of rupture nucleation in normalized fault ' 

2434 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2435 

2436 nucleation_y__ = Array.T( 

2437 default=num.array([0.]), 

2438 dtype=num.float64, 

2439 serialize_as='list', 

2440 help='Down-dip position of rupture nucleation in normalized fault ' 

2441 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2442 

2443 nucleation_time__ = Array.T( 

2444 optional=True, 

2445 help='Time in [s] after origin, when nucleation points defined by ' 

2446 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2447 dtype=num.float64, 

2448 serialize_as='list') 

2449 

2450 gamma = Float.T( 

2451 default=0.8, 

2452 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2453 r':math:`v_r = \gamma * v_s`.') 

2454 

2455 nx = Int.T( 

2456 default=2, 

2457 help='Number of discrete source patches in x direction (along ' 

2458 'strike).') 

2459 

2460 ny = Int.T( 

2461 default=2, 

2462 help='Number of discrete source patches in y direction (down dip).') 

2463 

2464 slip = Float.T( 

2465 optional=True, 

2466 help='Maximum slip of the rectangular source [m]. ' 

2467 'Setting the slip the tractions/stress field ' 

2468 'will be normalized to accomodate the desired maximum slip.') 

2469 

2470 rake = Float.T( 

2471 optional=True, 

2472 help='Rake angle in [deg], ' 

2473 'measured counter-clockwise from right-horizontal ' 

2474 'in on-plane view. Rake is translated into homogenous tractions ' 

2475 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2476 'with tractions parameter.') 

2477 

2478 patches = List.T( 

2479 OkadaSource.T(), 

2480 optional=True, 

2481 help='List of all boundary elements/sub faults/fault patches.') 

2482 

2483 patch_mask__ = Array.T( 

2484 dtype=bool, 

2485 serialize_as='list', 

2486 shape=(None,), 

2487 optional=True, 

2488 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2489 'leaves the patch in the calculation, False excludes the patch.') 

2490 

2491 tractions = TractionField.T( 

2492 optional=True, 

2493 help='Traction field the rupture plane is exposed to. See the' 

2494 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2495 'If ``tractions=None`` and ``rake`` is given' 

2496 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2497 ' be used.') 

2498 

2499 coef_mat = Array.T( 

2500 optional=True, 

2501 help='Coefficient matrix linking traction and dislocation field.', 

2502 dtype=num.float64, 

2503 shape=(None, None)) 

2504 

2505 eikonal_decimation = Int.T( 

2506 optional=True, 

2507 default=1, 

2508 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2509 ' increase the accuracy of rupture front calculation but' 

2510 ' increases also the computation time.') 

2511 

2512 decimation_factor = Int.T( 

2513 optional=True, 

2514 default=1, 

2515 help='Sub-source decimation factor, a larger decimation will' 

2516 ' make the result inaccurate but shorten the necessary' 

2517 ' computation time (use for testing puposes only).') 

2518 

2519 nthreads = Int.T( 

2520 optional=True, 

2521 default=1, 

2522 help='Number of threads for Okada forward modelling, ' 

2523 'matrix inversion and calculation of point subsources. ' 

2524 'Note: for small/medium matrices 1 thread is most efficient.') 

2525 

2526 pure_shear = Bool.T( 

2527 optional=True, 

2528 default=False, 

2529 help='Calculate only shear tractions and omit tensile tractions.') 

2530 

2531 smooth_rupture = Bool.T( 

2532 default=True, 

2533 help='Smooth the tractions by weighting partially ruptured' 

2534 ' fault patches.') 

2535 

2536 aggressive_oversampling = Bool.T( 

2537 default=False, 

2538 help='Aggressive oversampling for basesource discretization. ' 

2539 'When using \'multilinear\' interpolation oversampling has' 

2540 ' practically no effect.') 

2541 

2542 def __init__(self, **kwargs): 

2543 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2544 self._interpolators = {} 

2545 self.check_conflicts() 

2546 

2547 @property 

2548 def nucleation_x(self): 

2549 return self.nucleation_x__ 

2550 

2551 @nucleation_x.setter 

2552 def nucleation_x(self, nucleation_x): 

2553 if isinstance(nucleation_x, list): 

2554 nucleation_x = num.array(nucleation_x) 

2555 

2556 elif not isinstance( 

2557 nucleation_x, num.ndarray) and nucleation_x is not None: 

2558 

2559 nucleation_x = num.array([nucleation_x]) 

2560 self.nucleation_x__ = nucleation_x 

2561 

2562 @property 

2563 def nucleation_y(self): 

2564 return self.nucleation_y__ 

2565 

2566 @nucleation_y.setter 

2567 def nucleation_y(self, nucleation_y): 

2568 if isinstance(nucleation_y, list): 

2569 nucleation_y = num.array(nucleation_y) 

2570 

2571 elif not isinstance(nucleation_y, num.ndarray) \ 

2572 and nucleation_y is not None: 

2573 nucleation_y = num.array([nucleation_y]) 

2574 

2575 self.nucleation_y__ = nucleation_y 

2576 

2577 @property 

2578 def nucleation(self): 

2579 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2580 

2581 if (nucl_x is None) or (nucl_y is None): 

2582 return None 

2583 

2584 assert nucl_x.shape[0] == nucl_y.shape[0] 

2585 

2586 return num.concatenate( 

2587 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2588 

2589 @nucleation.setter 

2590 def nucleation(self, nucleation): 

2591 if isinstance(nucleation, list): 

2592 nucleation = num.array(nucleation) 

2593 

2594 assert nucleation.shape[1] == 2 

2595 

2596 self.nucleation_x = nucleation[:, 0] 

2597 self.nucleation_y = nucleation[:, 1] 

2598 

2599 @property 

2600 def nucleation_time(self): 

2601 return self.nucleation_time__ 

2602 

2603 @nucleation_time.setter 

2604 def nucleation_time(self, nucleation_time): 

2605 if not isinstance(nucleation_time, num.ndarray) \ 

2606 and nucleation_time is not None: 

2607 nucleation_time = num.array([nucleation_time]) 

2608 

2609 self.nucleation_time__ = nucleation_time 

2610 

2611 @property 

2612 def patch_mask(self): 

2613 if (self.patch_mask__ is not None and 

2614 self.patch_mask__.shape == (self.nx * self.ny,)): 

2615 

2616 return self.patch_mask__ 

2617 else: 

2618 return num.ones(self.nx * self.ny, dtype=bool) 

2619 

2620 @patch_mask.setter 

2621 def patch_mask(self, patch_mask): 

2622 if isinstance(patch_mask, list): 

2623 patch_mask = num.array(patch_mask) 

2624 

2625 self.patch_mask__ = patch_mask 

2626 

2627 def get_tractions(self): 

2628 ''' 

2629 Get source traction vectors. 

2630 

2631 If :py:attr:`rake` is given, unit length directed traction vectors 

2632 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2633 else the given :py:attr:`tractions` are used. 

2634 

2635 :returns: 

2636 Traction vectors per patch. 

2637 :rtype: 

2638 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2639 ''' 

2640 

2641 if self.rake is not None: 

2642 if num.isnan(self.rake): 

2643 raise ValueError('Rake must be a real number, not NaN.') 

2644 

2645 logger.warning( 

2646 'Tractions are derived based on the given source rake.') 

2647 tractions = DirectedTractions(rake=self.rake) 

2648 else: 

2649 tractions = self.tractions 

2650 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2651 

2652 def base_key(self): 

2653 return SourceWithDerivedMagnitude.base_key(self) + ( 

2654 self.slip, 

2655 self.strike, 

2656 self.dip, 

2657 self.rake, 

2658 self.length, 

2659 self.width, 

2660 float(self.nucleation_x.mean()), 

2661 float(self.nucleation_y.mean()), 

2662 self.decimation_factor, 

2663 self.anchor, 

2664 self.pure_shear, 

2665 self.gamma, 

2666 tuple(self.patch_mask)) 

2667 

2668 def check_conflicts(self): 

2669 if self.tractions and self.rake: 

2670 raise AttributeError( 

2671 'Tractions and rake are mutually exclusive.') 

2672 if self.tractions is None and self.rake is None: 

2673 self.rake = 0. 

2674 

2675 def get_magnitude(self, store=None, target=None): 

2676 self.check_conflicts() 

2677 if self.slip is not None or self.tractions is not None: 

2678 if store is None: 

2679 raise DerivedMagnitudeError( 

2680 'Magnitude for a rectangular source with slip or ' 

2681 'tractions defined can only be derived when earth model ' 

2682 'is set.') 

2683 

2684 moment_rate, calc_times = self.discretize_basesource( 

2685 store, target=target).get_moment_rate(store.config.deltat) 

2686 

2687 deltat = num.concatenate(( 

2688 (num.diff(calc_times)[0],), 

2689 num.diff(calc_times))) 

2690 

2691 return float(pmt.moment_to_magnitude( 

2692 num.sum(moment_rate * deltat))) 

2693 

2694 else: 

2695 return float(pmt.moment_to_magnitude(1.0)) 

2696 

2697 def get_factor(self): 

2698 return 1.0 

2699 

2700 def outline(self, cs='xyz'): 

2701 ''' 

2702 Get source outline corner coordinates. 

2703 

2704 :param cs: 

2705 :ref:`Output coordinate system <coordinate-system-names>`. 

2706 :type cs: 

2707 optional, str 

2708 

2709 :returns: 

2710 Corner points in desired coordinate system. 

2711 :rtype: 

2712 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2713 ''' 

2714 points = outline_rect_source(self.strike, self.dip, self.length, 

2715 self.width, self.anchor) 

2716 

2717 points[:, 0] += self.north_shift 

2718 points[:, 1] += self.east_shift 

2719 points[:, 2] += self.depth 

2720 if cs == 'xyz': 

2721 return points 

2722 elif cs == 'xy': 

2723 return points[:, :2] 

2724 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2725 latlon = ne_to_latlon( 

2726 self.lat, self.lon, points[:, 0], points[:, 1]) 

2727 

2728 latlon = num.array(latlon).T 

2729 if cs == 'latlon': 

2730 return latlon 

2731 elif cs == 'lonlat': 

2732 return latlon[:, ::-1] 

2733 else: 

2734 return num.concatenate( 

2735 (latlon, points[:, 2].reshape((len(points), 1))), 

2736 axis=1) 

2737 

2738 def points_on_source(self, cs='xyz', **kwargs): 

2739 ''' 

2740 Convert relative plane coordinates to geographical coordinates. 

2741 

2742 Given x and y coordinates (relative source coordinates between -1. 

2743 and 1.) are converted to desired geographical coordinates. Coordinates 

2744 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2745 and ``points_y``. 

2746 

2747 :param cs: 

2748 :ref:`Output coordinate system <coordinate-system-names>`. 

2749 :type cs: 

2750 optional, str 

2751 

2752 :returns: 

2753 Point coordinates in desired coordinate system. 

2754 :rtype: 

2755 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2756 ''' 

2757 points = points_on_rect_source( 

2758 self.strike, self.dip, self.length, self.width, 

2759 self.anchor, **kwargs) 

2760 

2761 points[:, 0] += self.north_shift 

2762 points[:, 1] += self.east_shift 

2763 points[:, 2] += self.depth 

2764 if cs == 'xyz': 

2765 return points 

2766 elif cs == 'xy': 

2767 return points[:, :2] 

2768 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2769 latlon = ne_to_latlon( 

2770 self.lat, self.lon, points[:, 0], points[:, 1]) 

2771 

2772 latlon = num.array(latlon).T 

2773 if cs == 'latlon': 

2774 return latlon 

2775 elif cs == 'lonlat': 

2776 return latlon[:, ::-1] 

2777 else: 

2778 return num.concatenate( 

2779 (latlon, points[:, 2].reshape((len(points), 1))), 

2780 axis=1) 

2781 

2782 def pyrocko_moment_tensor(self, store=None, target=None): 

2783 if store is not None: 

2784 if not self.patches: 

2785 self.discretize_patches(store) 

2786 

2787 data = self.get_slip() 

2788 else: 

2789 data = self.get_tractions() 

2790 

2791 weights = num.linalg.norm(data, axis=1) 

2792 weights /= weights.sum() 

2793 

2794 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2795 rake = num.average(rakes, weights=weights) 

2796 

2797 return pmt.MomentTensor( 

2798 strike=self.strike, 

2799 dip=self.dip, 

2800 rake=rake, 

2801 scalar_moment=self.get_moment(store, target)) 

2802 

2803 def pyrocko_event(self, store=None, target=None, **kwargs): 

2804 return SourceWithDerivedMagnitude.pyrocko_event( 

2805 self, store, target, 

2806 **kwargs) 

2807 

2808 @classmethod 

2809 def from_pyrocko_event(cls, ev, **kwargs): 

2810 d = {} 

2811 mt = ev.moment_tensor 

2812 if mt: 

2813 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2814 d.update( 

2815 strike=float(strike), 

2816 dip=float(dip), 

2817 rake=float(rake)) 

2818 

2819 d.update(kwargs) 

2820 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2821 

2822 def _discretize_points(self, store, *args, **kwargs): 

2823 ''' 

2824 Discretize source plane with equal vertical and horizontal spacing. 

2825 

2826 Additional ``*args`` and ``**kwargs`` are passed to 

2827 :py:meth:`points_on_source`. 

2828 

2829 :param store: 

2830 Green's function database (needs to cover whole region of the 

2831 source). 

2832 :type store: 

2833 :py:class:`~pyrocko.gf.store.Store` 

2834 

2835 :returns: 

2836 Number of points in strike and dip direction, distance 

2837 between adjacent points, coordinates (latlondepth) and coordinates 

2838 (xy on fault) for discrete points. 

2839 :rtype: 

2840 (int, int, float, :py:class:`~numpy.ndarray`, 

2841 :py:class:`~numpy.ndarray`). 

2842 ''' 

2843 anch_x, anch_y = map_anchor[self.anchor] 

2844 

2845 npoints = int(self.width // km) + 1 

2846 points = num.zeros((npoints, 3)) 

2847 points[:, 1] = num.linspace(-1., 1., npoints) 

2848 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2849 

2850 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2851 points = num.dot(rotmat.T, points.T).T 

2852 points[:, 2] += self.depth 

2853 

2854 vs_min = store.config.get_vs( 

2855 self.lat, self.lon, points, 

2856 interpolation='nearest_neighbor') 

2857 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2858 

2859 oversampling = 10. 

2860 delta_l = self.length / (self.nx * oversampling) 

2861 delta_w = self.width / (self.ny * oversampling) 

2862 

2863 delta = self.eikonal_decimation * num.min([ 

2864 store.config.deltat * vr_min / oversampling, 

2865 delta_l, delta_w] + [ 

2866 deltas for deltas in store.config.deltas]) 

2867 

2868 delta = delta_w / num.ceil(delta_w / delta) 

2869 

2870 nx = int(num.ceil(self.length / delta)) + 1 

2871 ny = int(num.ceil(self.width / delta)) + 1 

2872 

2873 rem_l = (nx-1)*delta - self.length 

2874 lim_x = rem_l / self.length 

2875 

2876 points_xy = num.zeros((nx * ny, 2)) 

2877 points_xy[:, 0] = num.repeat( 

2878 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2879 points_xy[:, 1] = num.tile( 

2880 num.linspace(-1., 1., ny), nx) 

2881 

2882 points = self.points_on_source( 

2883 points_x=points_xy[:, 0], 

2884 points_y=points_xy[:, 1], 

2885 **kwargs) 

2886 

2887 return nx, ny, delta, points, points_xy 

2888 

2889 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2890 points=None): 

2891 ''' 

2892 Get rupture velocity for discrete points on source plane. 

2893 

2894 :param store: 

2895 Green's function database (needs to cover the whole region of the 

2896 source) 

2897 :type store: 

2898 optional, :py:class:`~pyrocko.gf.store.Store` 

2899 

2900 :param interpolation: 

2901 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2902 and ``'multilinear'``). 

2903 :type interpolation: 

2904 optional, str 

2905 

2906 :param points: 

2907 Coordinates on fault (-1.:1.) of discrete points. 

2908 :type points: 

2909 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2910 

2911 :returns: 

2912 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2913 points. 

2914 :rtype: 

2915 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2916 ''' 

2917 

2918 if points is None: 

2919 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2920 

2921 return store.config.get_vs( 

2922 self.lat, self.lon, 

2923 points=points, 

2924 interpolation=interpolation) * self.gamma 

2925 

2926 def discretize_time( 

2927 self, store, interpolation='nearest_neighbor', 

2928 vr=None, times=None, *args, **kwargs): 

2929 ''' 

2930 Get rupture start time for discrete points on source plane. 

2931 

2932 :param store: 

2933 Green's function database (needs to cover whole region of the 

2934 source) 

2935 :type store: 

2936 :py:class:`~pyrocko.gf.store.Store` 

2937 

2938 :param interpolation: 

2939 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2940 and ``'multilinear'``). 

2941 :type interpolation: 

2942 optional, str 

2943 

2944 :param vr: 

2945 Array, containing rupture user defined rupture velocity values. 

2946 :type vr: 

2947 optional, :py:class:`~numpy.ndarray` 

2948 

2949 :param times: 

2950 Array, containing zeros, where rupture is starting, real positive 

2951 numbers at later secondary nucleation points and -1, where time 

2952 will be calculated. If not given, rupture starts at nucleation_x, 

2953 nucleation_y. Times are given for discrete points with equal 

2954 horizontal and vertical spacing. 

2955 :type times: 

2956 optional, :py:class:`~numpy.ndarray` 

2957 

2958 :returns: 

2959 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

2960 rupture propagation time of discrete points. 

2961 :rtype: 

2962 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

2963 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

2964 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

2965 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

2966 ''' 

2967 nx, ny, delta, points, points_xy = self._discretize_points( 

2968 store, cs='xyz') 

2969 

2970 if vr is None or vr.shape != tuple((nx, ny)): 

2971 if vr: 

2972 logger.warning( 

2973 'Given rupture velocities are not in right shape: ' 

2974 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

2975 vr = self._discretize_rupture_v(store, interpolation, points)\ 

2976 .reshape(nx, ny) 

2977 

2978 if vr.shape != tuple((nx, ny)): 

2979 logger.warning( 

2980 'Given rupture velocities are not in right shape. Therefore' 

2981 ' standard rupture velocity array is used.') 

2982 

2983 def initialize_times(): 

2984 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2985 

2986 if nucl_x.shape != nucl_y.shape: 

2987 raise ValueError( 

2988 'Nucleation coordinates have different shape.') 

2989 

2990 dist_points = num.array([ 

2991 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

2992 for x, y in zip(nucl_x, nucl_y)]) 

2993 nucl_indices = num.argmin(dist_points, axis=1) 

2994 

2995 if self.nucleation_time is None: 

2996 nucl_times = num.zeros_like(nucl_indices) 

2997 else: 

2998 if self.nucleation_time.shape == nucl_x.shape: 

2999 nucl_times = self.nucleation_time 

3000 else: 

3001 raise ValueError( 

3002 'Nucleation coordinates and times have different ' 

3003 'shapes') 

3004 

3005 t = num.full(nx * ny, -1.) 

3006 t[nucl_indices] = nucl_times 

3007 return t.reshape(nx, ny) 

3008 

3009 if times is None: 

3010 times = initialize_times() 

3011 elif times.shape != tuple((nx, ny)): 

3012 times = initialize_times() 

3013 logger.warning( 

3014 'Given times are not in right shape. Therefore standard time' 

3015 ' array is used.') 

3016 

3017 eikonal_ext.eikonal_solver_fmm_cartesian( 

3018 speeds=vr, times=times, delta=delta) 

3019 

3020 return points, points_xy, vr, times 

3021 

3022 def get_vr_time_interpolators( 

3023 self, store, interpolation='nearest_neighbor', force=False, 

3024 **kwargs): 

3025 ''' 

3026 Get interpolators for rupture velocity and rupture time. 

3027 

3028 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3029 

3030 :param store: 

3031 Green's function database (needs to cover whole region of the 

3032 source). 

3033 :type store: 

3034 :py:class:`~pyrocko.gf.store.Store` 

3035 

3036 :param interpolation: 

3037 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3038 and ``'multilinear'``). 

3039 :type interpolation: 

3040 optional, str 

3041 

3042 :param force: 

3043 Force recalculation of the interpolators (e.g. after change of 

3044 nucleation point locations/times). Default is ``False``. 

3045 :type force: 

3046 optional, bool 

3047 ''' 

3048 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3049 if interpolation not in interp_map: 

3050 raise TypeError( 

3051 'Interpolation method %s not available' % interpolation) 

3052 

3053 if not self._interpolators.get(interpolation, False) or force: 

3054 _, points_xy, vr, times = self.discretize_time( 

3055 store, **kwargs) 

3056 

3057 if self.length <= 0.: 

3058 raise ValueError( 

3059 'length must be larger then 0. not %g' % self.length) 

3060 

3061 if self.width <= 0.: 

3062 raise ValueError( 

3063 'width must be larger then 0. not %g' % self.width) 

3064 

3065 nx, ny = times.shape 

3066 anch_x, anch_y = map_anchor[self.anchor] 

3067 

3068 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3069 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3070 

3071 ascont = num.ascontiguousarray 

3072 

3073 self._interpolators[interpolation] = ( 

3074 nx, ny, times, vr, 

3075 RegularGridInterpolator( 

3076 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3077 times, 

3078 method=interp_map[interpolation]), 

3079 RegularGridInterpolator( 

3080 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3081 vr, 

3082 method=interp_map[interpolation])) 

3083 

3084 return self._interpolators[interpolation] 

3085 

3086 def discretize_patches( 

3087 self, store, interpolation='nearest_neighbor', force=False, 

3088 grid_shape=(), 

3089 **kwargs): 

3090 ''' 

3091 Get rupture start time and OkadaSource elements for points on rupture. 

3092 

3093 All source elements and their corresponding center points are 

3094 calculated and stored in the :py:attr:`patches` attribute. 

3095 

3096 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3097 

3098 :param store: 

3099 Green's function database (needs to cover whole region of the 

3100 source). 

3101 :type store: 

3102 :py:class:`~pyrocko.gf.store.Store` 

3103 

3104 :param interpolation: 

3105 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3106 and ``'multilinear'``). 

3107 :type interpolation: 

3108 optional, str 

3109 

3110 :param force: 

3111 Force recalculation of the vr and time interpolators ( e.g. after 

3112 change of nucleation point locations/times). Default is ``False``. 

3113 :type force: 

3114 optional, bool 

3115 

3116 :param grid_shape: 

3117 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3118 or grid_shape should be set. 

3119 :type grid_shape: 

3120 optional, tuple of int 

3121 ''' 

3122 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3123 self.get_vr_time_interpolators( 

3124 store, 

3125 interpolation=interpolation, force=force, **kwargs) 

3126 anch_x, anch_y = map_anchor[self.anchor] 

3127 

3128 al = self.length / 2. 

3129 aw = self.width / 2. 

3130 al1 = -(al + anch_x * al) 

3131 al2 = al - anch_x * al 

3132 aw1 = -aw + anch_y * aw 

3133 aw2 = aw + anch_y * aw 

3134 assert num.abs([al1, al2]).sum() == self.length 

3135 assert num.abs([aw1, aw2]).sum() == self.width 

3136 

3137 def get_lame(*a, **kw): 

3138 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3139 lamb = store.config.get_vp(*a, **kw)**2 \ 

3140 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3141 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3142 

3143 shear_mod, poisson = get_lame( 

3144 self.lat, self.lon, 

3145 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3146 interpolation=interpolation) 

3147 

3148 okada_src = OkadaSource( 

3149 lat=self.lat, lon=self.lon, 

3150 strike=self.strike, dip=self.dip, 

3151 north_shift=self.north_shift, east_shift=self.east_shift, 

3152 depth=self.depth, 

3153 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3154 poisson=poisson.mean(), 

3155 shearmod=shear_mod.mean(), 

3156 opening=kwargs.get('opening', 0.)) 

3157 

3158 if not (self.nx and self.ny): 

3159 if grid_shape: 

3160 self.nx, self.ny = grid_shape 

3161 else: 

3162 self.nx = nx 

3163 self.ny = ny 

3164 

3165 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3166 

3167 shear_mod, poisson = get_lame( 

3168 self.lat, self.lon, 

3169 num.array([src.source_patch()[:3] for src in source_disc]), 

3170 interpolation=interpolation) 

3171 

3172 if (self.nx, self.ny) != (nx, ny): 

3173 times_interp = time_interpolator( 

3174 num.ascontiguousarray(source_points[:, :2])) 

3175 vr_interp = vr_interpolator( 

3176 num.ascontiguousarray(source_points[:, :2])) 

3177 else: 

3178 times_interp = times.T.ravel() 

3179 vr_interp = vr.T.ravel() 

3180 

3181 for isrc, src in enumerate(source_disc): 

3182 src.vr = vr_interp[isrc] 

3183 src.time = times_interp[isrc] + self.time 

3184 

3185 self.patches = source_disc 

3186 

3187 def discretize_basesource(self, store, target=None): 

3188 ''' 

3189 Prepare source for synthetic waveform calculation. 

3190 

3191 :param store: 

3192 Green's function database (needs to cover whole region of the 

3193 source). 

3194 :type store: 

3195 :py:class:`~pyrocko.gf.store.Store` 

3196 

3197 :param target: 

3198 Target information. 

3199 :type target: 

3200 optional, :py:class:`~pyrocko.gf.targets.Target` 

3201 

3202 :returns: 

3203 Source discretized by a set of moment tensors and times. 

3204 :rtype: 

3205 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3206 ''' 

3207 if not target: 

3208 interpolation = 'nearest_neighbor' 

3209 else: 

3210 interpolation = target.interpolation 

3211 

3212 if not self.patches: 

3213 self.discretize_patches(store, interpolation) 

3214 

3215 if self.coef_mat is None: 

3216 self.calc_coef_mat() 

3217 

3218 delta_slip, slip_times = self.get_delta_slip(store) 

3219 npatches = self.nx * self.ny 

3220 ntimes = slip_times.size 

3221 

3222 anch_x, anch_y = map_anchor[self.anchor] 

3223 

3224 pln = self.length / self.nx 

3225 pwd = self.width / self.ny 

3226 

3227 patch_coords = num.array([ 

3228 (p.ix, p.iy) 

3229 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3230 

3231 # boundary condition is zero-slip 

3232 # is not valid to avoid unwished interpolation effects 

3233 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3234 slip_grid[1:-1, 1:-1, :, :] = \ 

3235 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3236 

3237 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3238 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3239 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3240 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3241 

3242 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3243 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3244 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3245 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3246 

3247 def make_grid(patch_parameter): 

3248 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3249 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3250 

3251 grid[0, 0] = grid[1, 1] 

3252 grid[0, -1] = grid[1, -2] 

3253 grid[-1, 0] = grid[-2, 1] 

3254 grid[-1, -1] = grid[-2, -2] 

3255 

3256 grid[1:-1, 0] = grid[1:-1, 1] 

3257 grid[1:-1, -1] = grid[1:-1, -2] 

3258 grid[0, 1:-1] = grid[1, 1:-1] 

3259 grid[-1, 1:-1] = grid[-2, 1:-1] 

3260 

3261 return grid 

3262 

3263 lamb = self.get_patch_attribute('lamb') 

3264 mu = self.get_patch_attribute('shearmod') 

3265 

3266 lamb_grid = make_grid(lamb) 

3267 mu_grid = make_grid(mu) 

3268 

3269 coords_x = num.zeros(self.nx + 2) 

3270 coords_x[1:-1] = patch_coords[:, 0, 0] 

3271 coords_x[0] = coords_x[1] - pln / 2 

3272 coords_x[-1] = coords_x[-2] + pln / 2 

3273 

3274 coords_y = num.zeros(self.ny + 2) 

3275 coords_y[1:-1] = patch_coords[0, :, 1] 

3276 coords_y[0] = coords_y[1] - pwd / 2 

3277 coords_y[-1] = coords_y[-2] + pwd / 2 

3278 

3279 slip_interp = RegularGridInterpolator( 

3280 (coords_x, coords_y, slip_times), 

3281 slip_grid, method='nearest') 

3282 

3283 lamb_interp = RegularGridInterpolator( 

3284 (coords_x, coords_y), 

3285 lamb_grid, method='nearest') 

3286 

3287 mu_interp = RegularGridInterpolator( 

3288 (coords_x, coords_y), 

3289 mu_grid, method='nearest') 

3290 

3291 # discretize basesources 

3292 mindeltagf = min(tuple( 

3293 (self.length / self.nx, self.width / self.ny) + 

3294 tuple(store.config.deltas))) 

3295 

3296 nl = int((1. / self.decimation_factor) * 

3297 num.ceil(pln / mindeltagf)) + 1 

3298 nw = int((1. / self.decimation_factor) * 

3299 num.ceil(pwd / mindeltagf)) + 1 

3300 nsrc_patch = int(nl * nw) 

3301 dl = pln / nl 

3302 dw = pwd / nw 

3303 

3304 patch_area = dl * dw 

3305 

3306 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3307 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3308 

3309 base_coords = num.zeros((nsrc_patch, 3)) 

3310 base_coords[:, 0] = num.tile(xl, nw) 

3311 base_coords[:, 1] = num.repeat(xw, nl) 

3312 base_coords = num.tile(base_coords, (npatches, 1)) 

3313 

3314 center_coords = num.zeros((npatches, 3)) 

3315 center_coords[:, 0] = num.repeat( 

3316 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3317 center_coords[:, 1] = num.tile( 

3318 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3319 

3320 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3321 nbaselocs = base_coords.shape[0] 

3322 

3323 base_interp = base_coords.repeat(ntimes, axis=0) 

3324 

3325 base_times = num.tile(slip_times, nbaselocs) 

3326 base_interp[:, 0] -= anch_x * self.length / 2 

3327 base_interp[:, 1] -= anch_y * self.width / 2 

3328 base_interp[:, 2] = base_times 

3329 

3330 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3331 store, interpolation=interpolation) 

3332 

3333 time_eikonal_max = time_interpolator.values.max() 

3334 

3335 nbasesrcs = base_interp.shape[0] 

3336 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3337 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3338 mu = mu_interp(base_interp[:, :2]).ravel() 

3339 

3340 if False: 

3341 try: 

3342 import matplotlib.pyplot as plt 

3343 coords = base_coords.copy() 

3344 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3345 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3346 plt.show() 

3347 except AttributeError: 

3348 pass 

3349 

3350 base_interp[:, 2] = 0. 

3351 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3352 base_interp = num.dot(rotmat.T, base_interp.T).T 

3353 base_interp[:, 0] += self.north_shift 

3354 base_interp[:, 1] += self.east_shift 

3355 base_interp[:, 2] += self.depth 

3356 

3357 slip_strike = delta_slip[:, :, 0].ravel() 

3358 slip_dip = delta_slip[:, :, 1].ravel() 

3359 slip_norm = delta_slip[:, :, 2].ravel() 

3360 

3361 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3362 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3363 

3364 m6s = okada_ext.patch2m6( 

3365 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3366 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3367 rakes=slip_rake, 

3368 disl_shear=slip_shear, 

3369 disl_norm=slip_norm, 

3370 lamb=lamb, 

3371 mu=mu, 

3372 nthreads=self.nthreads) 

3373 

3374 m6s *= patch_area 

3375 

3376 dl = -self.patches[0].al1 + self.patches[0].al2 

3377 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3378 

3379 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3380 

3381 ds = meta.DiscretizedMTSource( 

3382 lat=self.lat, 

3383 lon=self.lon, 

3384 times=base_times + self.time, 

3385 north_shifts=base_interp[:, 0], 

3386 east_shifts=base_interp[:, 1], 

3387 depths=base_interp[:, 2], 

3388 m6s=m6s, 

3389 dl=dl, 

3390 dw=dw, 

3391 nl=self.nx, 

3392 nw=self.ny) 

3393 

3394 return ds 

3395 

3396 def calc_coef_mat(self): 

3397 ''' 

3398 Calculate coefficients connecting tractions and dislocations. 

3399 ''' 

3400 if not self.patches: 

3401 raise ValueError( 

3402 'Patches are needed. Please calculate them first.') 

3403 

3404 self.coef_mat = make_okada_coefficient_matrix( 

3405 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3406 

3407 def get_patch_attribute(self, attr): 

3408 ''' 

3409 Get patch attributes. 

3410 

3411 :param attr: 

3412 Name of selected attribute (see 

3413 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3414 :type attr: 

3415 str 

3416 

3417 :returns: 

3418 Array with attribute value for each fault patch. 

3419 :rtype: 

3420 :py:class:`~numpy.ndarray` 

3421 

3422 ''' 

3423 if not self.patches: 

3424 raise ValueError( 

3425 'Patches are needed. Please calculate them first.') 

3426 return num.array([getattr(p, attr) for p in self.patches]) 

3427 

3428 def get_slip( 

3429 self, 

3430 time=None, 

3431 scale_slip=True, 

3432 interpolation='nearest_neighbor', 

3433 **kwargs): 

3434 ''' 

3435 Get slip per subfault patch for given time after rupture start. 

3436 

3437 :param time: 

3438 Time after origin [s], for which slip is computed. If not 

3439 given, final static slip is returned. 

3440 :type time: 

3441 optional, float > 0. 

3442 

3443 :param scale_slip: 

3444 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3445 to fit the given maximum slip. 

3446 :type scale_slip: 

3447 optional, bool 

3448 

3449 :param interpolation: 

3450 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3451 and ``'multilinear'``). 

3452 :type interpolation: 

3453 optional, str 

3454 

3455 :returns: 

3456 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3457 for each source patch. 

3458 :rtype: 

3459 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3460 ''' 

3461 

3462 if self.patches is None: 

3463 raise ValueError( 

3464 'Please discretize the source first (discretize_patches())') 

3465 npatches = len(self.patches) 

3466 tractions = self.get_tractions() 

3467 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3468 

3469 time_patch = time 

3470 if time is None: 

3471 time_patch = time_patch_max 

3472 

3473 if self.coef_mat is None: 

3474 self.calc_coef_mat() 

3475 

3476 if tractions.shape != (npatches, 3): 

3477 raise AttributeError( 

3478 'The traction vector is of invalid shape.' 

3479 ' Required shape is (npatches, 3)') 

3480 

3481 patch_mask = num.ones(npatches, dtype=bool) 

3482 if self.patch_mask is not None: 

3483 patch_mask = self.patch_mask 

3484 

3485 times = self.get_patch_attribute('time') - self.time 

3486 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3487 relevant_sources = num.nonzero(times <= time_patch)[0] 

3488 disloc_est = num.zeros_like(tractions) 

3489 

3490 if self.smooth_rupture: 

3491 patch_activation = num.zeros(npatches) 

3492 

3493 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3494 self.get_vr_time_interpolators( 

3495 store, interpolation=interpolation) 

3496 

3497 # Getting the native Eikonal grid, bit hackish 

3498 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3499 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3500 times_eikonal = time_interpolator.values 

3501 

3502 time_max = time 

3503 if time is None: 

3504 time_max = times_eikonal.max() 

3505 

3506 for ip, p in enumerate(self.patches): 

3507 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3508 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3509 

3510 idx_length = num.logical_and( 

3511 points_x >= ul[0], points_x <= lr[0]) 

3512 idx_width = num.logical_and( 

3513 points_y >= ul[1], points_y <= lr[1]) 

3514 

3515 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3516 if times_patch.size == 0: 

3517 raise AttributeError('could not use smooth_rupture') 

3518 

3519 patch_activation[ip] = \ 

3520 (times_patch <= time_max).sum() / times_patch.size 

3521 

3522 if time_patch == 0 and time_patch != time_patch_max: 

3523 patch_activation[ip] = 0. 

3524 

3525 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3526 

3527 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3528 

3529 if relevant_sources.size == 0: 

3530 return disloc_est 

3531 

3532 indices_disl = num.repeat(relevant_sources * 3, 3) 

3533 indices_disl[1::3] += 1 

3534 indices_disl[2::3] += 2 

3535 

3536 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3537 stress_field=tractions[relevant_sources, :].ravel(), 

3538 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3539 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3540 epsilon=None, 

3541 **kwargs) 

3542 

3543 if self.smooth_rupture: 

3544 disloc_est *= patch_activation[:, num.newaxis] 

3545 

3546 if scale_slip and self.slip is not None: 

3547 disloc_tmax = num.zeros(npatches) 

3548 

3549 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3550 indices_disl[1::3] += 1 

3551 indices_disl[2::3] += 2 

3552 

3553 disloc_tmax[patch_mask] = num.linalg.norm( 

3554 invert_fault_dislocations_bem( 

3555 stress_field=tractions[patch_mask, :].ravel(), 

3556 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3557 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3558 epsilon=None, 

3559 **kwargs), axis=1) 

3560 

3561 disloc_tmax_max = disloc_tmax.max() 

3562 if disloc_tmax_max == 0.: 

3563 logger.warning( 

3564 'slip scaling not performed. Maximum slip is 0.') 

3565 

3566 disloc_est *= self.slip / disloc_tmax_max 

3567 

3568 return disloc_est 

3569 

3570 def get_delta_slip( 

3571 self, 

3572 store=None, 

3573 deltat=None, 

3574 delta=True, 

3575 interpolation='nearest_neighbor', 

3576 **kwargs): 

3577 ''' 

3578 Get slip change snapshots. 

3579 

3580 The time interval, within which the slip changes are computed is 

3581 determined by the sampling rate of the Green's function database or 

3582 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3583 

3584 :param store: 

3585 Green's function database (needs to cover whole region of of the 

3586 source). Its sampling interval is used as time increment for slip 

3587 difference calculation. Either ``deltat`` or ``store`` should be 

3588 given. 

3589 :type store: 

3590 optional, :py:class:`~pyrocko.gf.store.Store` 

3591 

3592 :param deltat: 

3593 Time interval for slip difference calculation [s]. Either 

3594 ``deltat`` or ``store`` should be given. 

3595 :type deltat: 

3596 optional, float 

3597 

3598 :param delta: 

3599 If ``True``, slip differences between two time steps are given. If 

3600 ``False``, cumulative slip for all time steps. 

3601 :type delta: 

3602 optional, bool 

3603 

3604 :param interpolation: 

3605 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3606 and ``'multilinear'``). 

3607 :type interpolation: 

3608 optional, str 

3609 

3610 :returns: 

3611 Displacement changes(:math:`\\Delta u_{strike}, 

3612 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3613 time; corner times, for which delta slip is computed. The order of 

3614 displacement changes array is: 

3615 

3616 .. math:: 

3617 

3618 &[[\\\\ 

3619 &[\\Delta u_{strike, patch1, t1}, 

3620 \\Delta u_{dip, patch1, t1}, 

3621 \\Delta u_{tensile, patch1, t1}],\\\\ 

3622 &[\\Delta u_{strike, patch1, t2}, 

3623 \\Delta u_{dip, patch1, t2}, 

3624 \\Delta u_{tensile, patch1, t2}]\\\\ 

3625 &], [\\\\ 

3626 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3627 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3628 

3629 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3630 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3631 ''' 

3632 if store and deltat: 

3633 raise AttributeError( 

3634 'Argument collision. ' 

3635 'Please define only the store or the deltat argument.') 

3636 

3637 if store: 

3638 deltat = store.config.deltat 

3639 

3640 if not deltat: 

3641 raise AttributeError('Please give a GF store or set deltat.') 

3642 

3643 npatches = len(self.patches) 

3644 

3645 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3646 store, interpolation=interpolation) 

3647 tmax = time_interpolator.values.max() 

3648 

3649 calc_times = num.arange(0., tmax + deltat, deltat) 

3650 calc_times[calc_times > tmax] = tmax 

3651 

3652 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3653 

3654 for itime, t in enumerate(calc_times): 

3655 disloc_est[:, itime, :] = self.get_slip( 

3656 time=t, scale_slip=False, **kwargs) 

3657 

3658 if self.slip: 

3659 disloc_tmax = num.linalg.norm( 

3660 self.get_slip(scale_slip=False, time=tmax), 

3661 axis=1) 

3662 

3663 disloc_tmax_max = disloc_tmax.max() 

3664 if disloc_tmax_max == 0.: 

3665 logger.warning( 

3666 'Slip scaling not performed. Maximum slip is 0.') 

3667 else: 

3668 disloc_est *= self.slip / disloc_tmax_max 

3669 

3670 if not delta: 

3671 return disloc_est, calc_times 

3672 

3673 # if we have only one timestep there is no gradient 

3674 if calc_times.size > 1: 

3675 disloc_init = disloc_est[:, 0, :] 

3676 disloc_est = num.diff(disloc_est, axis=1) 

3677 disloc_est = num.concatenate(( 

3678 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3679 

3680 calc_times = calc_times 

3681 

3682 return disloc_est, calc_times 

3683 

3684 def get_slip_rate(self, *args, **kwargs): 

3685 ''' 

3686 Get slip rate inverted from patches. 

3687 

3688 The time interval, within which the slip rates are computed is 

3689 determined by the sampling rate of the Green's function database or 

3690 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3691 :py:meth:`get_delta_slip`. 

3692 

3693 :returns: 

3694 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3695 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3696 for each source patch and time; corner times, for which slip rate 

3697 is computed. The order of sliprate array is: 

3698 

3699 .. math:: 

3700 

3701 &[[\\\\ 

3702 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3703 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3704 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3705 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3706 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3707 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3708 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3709 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3710 

3711 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3712 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3713 ''' 

3714 ddisloc_est, calc_times = self.get_delta_slip( 

3715 *args, delta=True, **kwargs) 

3716 

3717 dt = num.concatenate( 

3718 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3719 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3720 

3721 return slip_rate, calc_times 

3722 

3723 def get_moment_rate_patches(self, *args, **kwargs): 

3724 ''' 

3725 Get scalar seismic moment rate for each patch individually. 

3726 

3727 Additional ``*args`` and ``**kwargs`` are passed to 

3728 :py:meth:`get_slip_rate`. 

3729 

3730 :returns: 

3731 Seismic moment rate for each source patch and time; corner times, 

3732 for which patch moment rate is computed based on slip rate. The 

3733 order of the moment rate array is: 

3734 

3735 .. math:: 

3736 

3737 &[\\\\ 

3738 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3739 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3740 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3741 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3742 &[...]]\\\\ 

3743 

3744 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3745 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3746 ''' 

3747 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3748 

3749 shear_mod = self.get_patch_attribute('shearmod') 

3750 p_length = self.get_patch_attribute('length') 

3751 p_width = self.get_patch_attribute('width') 

3752 

3753 dA = p_length * p_width 

3754 

3755 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3756 

3757 return mom_rate, calc_times 

3758 

3759 def get_moment_rate(self, store, target=None, deltat=None): 

3760 ''' 

3761 Get seismic source moment rate for the total source (STF). 

3762 

3763 :param store: 

3764 Green's function database (needs to cover whole region of of the 

3765 source). Its ``deltat`` [s] is used as time increment for slip 

3766 difference calculation. Either ``deltat`` or ``store`` should be 

3767 given. 

3768 :type store: 

3769 :py:class:`~pyrocko.gf.store.Store` 

3770 

3771 :param target: 

3772 Target information, needed for interpolation method. 

3773 :type target: 

3774 optional, :py:class:`~pyrocko.gf.targets.Target` 

3775 

3776 :param deltat: 

3777 Time increment for slip difference calculation [s]. If not given 

3778 ``store.deltat`` is used. 

3779 :type deltat: 

3780 optional, float 

3781 

3782 :return: 

3783 Seismic moment rate [Nm/s] for each time; corner times, for which 

3784 moment rate is computed. The order of the moment rate array is: 

3785 

3786 .. math:: 

3787 

3788 &[\\\\ 

3789 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3790 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3791 &...]\\\\ 

3792 

3793 :rtype: 

3794 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3795 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3796 ''' 

3797 if not deltat: 

3798 deltat = store.config.deltat 

3799 return self.discretize_basesource( 

3800 store, target=target).get_moment_rate(deltat) 

3801 

3802 def get_moment(self, *args, **kwargs): 

3803 ''' 

3804 Get seismic cumulative moment. 

3805 

3806 Additional ``*args`` and ``**kwargs`` are passed to 

3807 :py:meth:`get_magnitude`. 

3808 

3809 :returns: 

3810 Cumulative seismic moment in [Nm]. 

3811 :rtype: 

3812 float 

3813 ''' 

3814 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3815 *args, **kwargs))) 

3816 

3817 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3818 ''' 

3819 Rescale source slip based on given target magnitude or seismic moment. 

3820 

3821 Rescale the maximum source slip to fit the source moment magnitude or 

3822 seismic moment to the given target values. Either ``magnitude`` or 

3823 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3824 :py:meth:`get_moment`. 

3825 

3826 :param magnitude: 

3827 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3828 [Hanks and Kanamori, 1979] 

3829 :type magnitude: 

3830 optional, float 

3831 

3832 :param moment: 

3833 Target seismic moment :math:`M_0` [Nm]. 

3834 :type moment: 

3835 optional, float 

3836 ''' 

3837 if self.slip is None: 

3838 self.slip = 1. 

3839 logger.warning('No slip found for rescaling. ' 

3840 'An initial slip of 1 m is assumed.') 

3841 

3842 if magnitude is None and moment is None: 

3843 raise ValueError( 

3844 'Either target magnitude or moment need to be given.') 

3845 

3846 moment_init = self.get_moment(**kwargs) 

3847 

3848 if magnitude is not None: 

3849 moment = pmt.magnitude_to_moment(magnitude) 

3850 

3851 self.slip *= moment / moment_init 

3852 

3853 def get_centroid(self, store, *args, **kwargs): 

3854 ''' 

3855 Centroid of the pseudo dynamic rupture model. 

3856 

3857 The centroid location and time are derived from the locations and times 

3858 of the individual patches weighted with their moment contribution. 

3859 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3860 

3861 :param store: 

3862 Green's function database (needs to cover whole region of of the 

3863 source). Its ``deltat`` [s] is used as time increment for slip 

3864 difference calculation. Either ``deltat`` or ``store`` should be 

3865 given. 

3866 :type store: 

3867 :py:class:`~pyrocko.gf.store.Store` 

3868 

3869 :returns: 

3870 The centroid location and associated moment tensor. 

3871 :rtype: 

3872 :py:class:`pyrocko.model.Event` 

3873 ''' 

3874 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3875 t_max = time.values.max() 

3876 

3877 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3878 

3879 moment = num.sum(moment_rate * times, axis=1) 

3880 weights = moment / moment.sum() 

3881 

3882 norths = self.get_patch_attribute('north_shift') 

3883 easts = self.get_patch_attribute('east_shift') 

3884 depths = self.get_patch_attribute('depth') 

3885 

3886 centroid_n = num.sum(weights * norths) 

3887 centroid_e = num.sum(weights * easts) 

3888 centroid_d = num.sum(weights * depths) 

3889 

3890 centroid_lat, centroid_lon = ne_to_latlon( 

3891 self.lat, self.lon, centroid_n, centroid_e) 

3892 

3893 moment_rate_, times = self.get_moment_rate(store) 

3894 delta_times = num.concatenate(( 

3895 [times[1] - times[0]], 

3896 num.diff(times))) 

3897 moment_src = delta_times * moment_rate 

3898 

3899 centroid_t = num.sum( 

3900 moment_src / num.sum(moment_src) * times) + self.time 

3901 

3902 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3903 

3904 return model.Event( 

3905 lat=centroid_lat, 

3906 lon=centroid_lon, 

3907 depth=centroid_d, 

3908 time=centroid_t, 

3909 moment_tensor=mt, 

3910 magnitude=mt.magnitude, 

3911 duration=t_max) 

3912 

3913 

3914class DoubleDCSource(SourceWithMagnitude): 

3915 ''' 

3916 Two double-couple point sources separated in space and time. 

3917 Moment share between the sub-sources is controlled by the 

3918 parameter mix. 

3919 The position of the subsources is dependent on the moment 

3920 distribution between the two sources. Depth, east and north 

3921 shift are given for the centroid between the two double-couples. 

3922 The subsources will positioned according to their moment shares 

3923 around this centroid position. 

3924 This is done according to their delta parameters, which are 

3925 therefore in relation to that centroid. 

3926 Note that depth of the subsources therefore can be 

3927 depth+/-delta_depth. For shallow earthquakes therefore 

3928 the depth has to be chosen deeper to avoid sampling 

3929 above surface. 

3930 ''' 

3931 

3932 strike1 = Float.T( 

3933 default=0.0, 

3934 help='strike direction in [deg], measured clockwise from north') 

3935 

3936 dip1 = Float.T( 

3937 default=90.0, 

3938 help='dip angle in [deg], measured downward from horizontal') 

3939 

3940 azimuth = Float.T( 

3941 default=0.0, 

3942 help='azimuth to second double-couple [deg], ' 

3943 'measured at first, clockwise from north') 

3944 

3945 rake1 = Float.T( 

3946 default=0.0, 

3947 help='rake angle in [deg], ' 

3948 'measured counter-clockwise from right-horizontal ' 

3949 'in on-plane view') 

3950 

3951 strike2 = Float.T( 

3952 default=0.0, 

3953 help='strike direction in [deg], measured clockwise from north') 

3954 

3955 dip2 = Float.T( 

3956 default=90.0, 

3957 help='dip angle in [deg], measured downward from horizontal') 

3958 

3959 rake2 = Float.T( 

3960 default=0.0, 

3961 help='rake angle in [deg], ' 

3962 'measured counter-clockwise from right-horizontal ' 

3963 'in on-plane view') 

3964 

3965 delta_time = Float.T( 

3966 default=0.0, 

3967 help='separation of double-couples in time (t2-t1) [s]') 

3968 

3969 delta_depth = Float.T( 

3970 default=0.0, 

3971 help='difference in depth (z2-z1) [m]') 

3972 

3973 distance = Float.T( 

3974 default=0.0, 

3975 help='distance between the two double-couples [m]') 

3976 

3977 mix = Float.T( 

3978 default=0.5, 

3979 help='how to distribute the moment to the two doublecouples ' 

3980 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

3981 

3982 stf1 = STF.T( 

3983 optional=True, 

3984 help='Source time function of subsource 1 ' 

3985 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3986 

3987 stf2 = STF.T( 

3988 optional=True, 

3989 help='Source time function of subsource 2 ' 

3990 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3991 

3992 discretized_source_class = meta.DiscretizedMTSource 

3993 

3994 def base_key(self): 

3995 return ( 

3996 self.time, self.depth, self.lat, self.north_shift, 

3997 self.lon, self.east_shift, type(self).__name__) + \ 

3998 self.effective_stf1_pre().base_key() + \ 

3999 self.effective_stf2_pre().base_key() + ( 

4000 self.strike1, self.dip1, self.rake1, 

4001 self.strike2, self.dip2, self.rake2, 

4002 self.delta_time, self.delta_depth, 

4003 self.azimuth, self.distance, self.mix) 

4004 

4005 def get_factor(self): 

4006 return self.moment 

4007 

4008 def effective_stf1_pre(self): 

4009 return self.stf1 or self.stf or g_unit_pulse 

4010 

4011 def effective_stf2_pre(self): 

4012 return self.stf2 or self.stf or g_unit_pulse 

4013 

4014 def effective_stf_post(self): 

4015 return g_unit_pulse 

4016 

4017 def split(self): 

4018 a1 = 1.0 - self.mix 

4019 a2 = self.mix 

4020 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4021 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4022 

4023 dc1 = DCSource( 

4024 lat=self.lat, 

4025 lon=self.lon, 

4026 time=self.time - self.delta_time * a2, 

4027 north_shift=self.north_shift - delta_north * a2, 

4028 east_shift=self.east_shift - delta_east * a2, 

4029 depth=self.depth - self.delta_depth * a2, 

4030 moment=self.moment * a1, 

4031 strike=self.strike1, 

4032 dip=self.dip1, 

4033 rake=self.rake1, 

4034 stf=self.stf1 or self.stf) 

4035 

4036 dc2 = DCSource( 

4037 lat=self.lat, 

4038 lon=self.lon, 

4039 time=self.time + self.delta_time * a1, 

4040 north_shift=self.north_shift + delta_north * a1, 

4041 east_shift=self.east_shift + delta_east * a1, 

4042 depth=self.depth + self.delta_depth * a1, 

4043 moment=self.moment * a2, 

4044 strike=self.strike2, 

4045 dip=self.dip2, 

4046 rake=self.rake2, 

4047 stf=self.stf2 or self.stf) 

4048 

4049 return [dc1, dc2] 

4050 

4051 def discretize_basesource(self, store, target=None): 

4052 a1 = 1.0 - self.mix 

4053 a2 = self.mix 

4054 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4055 rake=self.rake1, scalar_moment=a1) 

4056 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4057 rake=self.rake2, scalar_moment=a2) 

4058 

4059 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4060 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4061 

4062 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4063 store.config.deltat, self.time - self.delta_time * a2) 

4064 

4065 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4066 store.config.deltat, self.time + self.delta_time * a1) 

4067 

4068 nt1 = times1.size 

4069 nt2 = times2.size 

4070 

4071 ds = meta.DiscretizedMTSource( 

4072 lat=self.lat, 

4073 lon=self.lon, 

4074 times=num.concatenate((times1, times2)), 

4075 north_shifts=num.concatenate(( 

4076 num.repeat(self.north_shift - delta_north * a2, nt1), 

4077 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4078 east_shifts=num.concatenate(( 

4079 num.repeat(self.east_shift - delta_east * a2, nt1), 

4080 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4081 depths=num.concatenate(( 

4082 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4083 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4084 m6s=num.vstack(( 

4085 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4086 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4087 

4088 return ds 

4089 

4090 def pyrocko_moment_tensor(self, store=None, target=None): 

4091 a1 = 1.0 - self.mix 

4092 a2 = self.mix 

4093 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4094 rake=self.rake1, 

4095 scalar_moment=a1 * self.moment) 

4096 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4097 rake=self.rake2, 

4098 scalar_moment=a2 * self.moment) 

4099 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4100 

4101 def pyrocko_event(self, store=None, target=None, **kwargs): 

4102 return SourceWithMagnitude.pyrocko_event( 

4103 self, store, target, 

4104 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4105 **kwargs) 

4106 

4107 @classmethod 

4108 def from_pyrocko_event(cls, ev, **kwargs): 

4109 d = {} 

4110 mt = ev.moment_tensor 

4111 if mt: 

4112 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4113 d.update( 

4114 strike1=float(strike), 

4115 dip1=float(dip), 

4116 rake1=float(rake), 

4117 strike2=float(strike), 

4118 dip2=float(dip), 

4119 rake2=float(rake), 

4120 mix=0.0, 

4121 magnitude=float(mt.moment_magnitude())) 

4122 

4123 d.update(kwargs) 

4124 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4125 source.stf1 = source.stf 

4126 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4127 source.stf = None 

4128 return source 

4129 

4130 

4131class RingfaultSource(SourceWithMagnitude): 

4132 ''' 

4133 A ring fault with vertical doublecouples. 

4134 ''' 

4135 

4136 diameter = Float.T( 

4137 default=1.0, 

4138 help='diameter of the ring in [m]') 

4139 

4140 sign = Float.T( 

4141 default=1.0, 

4142 help='inside of the ring moves up (+1) or down (-1)') 

4143 

4144 strike = Float.T( 

4145 default=0.0, 

4146 help='strike direction of the ring plane, clockwise from north,' 

4147 ' in [deg]') 

4148 

4149 dip = Float.T( 

4150 default=0.0, 

4151 help='dip angle of the ring plane from horizontal in [deg]') 

4152 

4153 npointsources = Int.T( 

4154 default=360, 

4155 help='number of point sources to use') 

4156 

4157 discretized_source_class = meta.DiscretizedMTSource 

4158 

4159 def base_key(self): 

4160 return Source.base_key(self) + ( 

4161 self.strike, self.dip, self.diameter, self.npointsources) 

4162 

4163 def get_factor(self): 

4164 return self.sign * self.moment 

4165 

4166 def discretize_basesource(self, store=None, target=None): 

4167 n = self.npointsources 

4168 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4169 

4170 points = num.zeros((n, 3)) 

4171 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4172 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4173 

4174 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4175 points = num.dot(rotmat.T, points.T).T # !!! ? 

4176 

4177 points[:, 0] += self.north_shift 

4178 points[:, 1] += self.east_shift 

4179 points[:, 2] += self.depth 

4180 

4181 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4182 scalar_moment=1.0 / n).m()) 

4183 

4184 rotmats = num.transpose( 

4185 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4186 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4187 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4188 

4189 ms = num.zeros((n, 3, 3)) 

4190 for i in range(n): 

4191 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4192 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4193 

4194 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4195 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4196 

4197 times, amplitudes = self.effective_stf_pre().discretize_t( 

4198 store.config.deltat, self.time) 

4199 

4200 nt = times.size 

4201 

4202 return meta.DiscretizedMTSource( 

4203 times=num.tile(times, n), 

4204 lat=self.lat, 

4205 lon=self.lon, 

4206 north_shifts=num.repeat(points[:, 0], nt), 

4207 east_shifts=num.repeat(points[:, 1], nt), 

4208 depths=num.repeat(points[:, 2], nt), 

4209 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4210 amplitudes, n)[:, num.newaxis]) 

4211 

4212 

4213class CombiSource(Source): 

4214 ''' 

4215 Composite source model. 

4216 ''' 

4217 

4218 discretized_source_class = meta.DiscretizedMTSource 

4219 

4220 subsources = List.T(Source.T()) 

4221 

4222 def __init__(self, subsources=[], **kwargs): 

4223 if not subsources: 

4224 raise BadRequest( 

4225 'Need at least one sub-source to create a CombiSource object.') 

4226 

4227 lats = num.array( 

4228 [subsource.lat for subsource in subsources], dtype=float) 

4229 lons = num.array( 

4230 [subsource.lon for subsource in subsources], dtype=float) 

4231 

4232 lat, lon = lats[0], lons[0] 

4233 if not num.all(lats == lat) and num.all(lons == lon): 

4234 subsources = [s.clone() for s in subsources] 

4235 for subsource in subsources[1:]: 

4236 subsource.set_origin(lat, lon) 

4237 

4238 depth = float(num.mean([p.depth for p in subsources])) 

4239 time = float(num.mean([p.time for p in subsources])) 

4240 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4241 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4242 kwargs.update( 

4243 time=time, 

4244 lat=float(lat), 

4245 lon=float(lon), 

4246 north_shift=north_shift, 

4247 east_shift=east_shift, 

4248 depth=depth) 

4249 

4250 Source.__init__(self, subsources=subsources, **kwargs) 

4251 

4252 def get_factor(self): 

4253 return 1.0 

4254 

4255 def discretize_basesource(self, store, target=None): 

4256 dsources = [] 

4257 for sf in self.subsources: 

4258 ds = sf.discretize_basesource(store, target) 

4259 ds.m6s *= sf.get_factor() 

4260 dsources.append(ds) 

4261 

4262 return meta.DiscretizedMTSource.combine(dsources) 

4263 

4264 

4265class SFSource(Source): 

4266 ''' 

4267 A single force point source. 

4268 

4269 Supported GF schemes: `'elastic5'`. 

4270 ''' 

4271 

4272 discretized_source_class = meta.DiscretizedSFSource 

4273 

4274 fn = Float.T( 

4275 default=0., 

4276 help='northward component of single force [N]') 

4277 

4278 fe = Float.T( 

4279 default=0., 

4280 help='eastward component of single force [N]') 

4281 

4282 fd = Float.T( 

4283 default=0., 

4284 help='downward component of single force [N]') 

4285 

4286 def __init__(self, **kwargs): 

4287 Source.__init__(self, **kwargs) 

4288 

4289 def base_key(self): 

4290 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4291 

4292 def get_factor(self): 

4293 return 1.0 

4294 

4295 def discretize_basesource(self, store, target=None): 

4296 times, amplitudes = self.effective_stf_pre().discretize_t( 

4297 store.config.deltat, self.time) 

4298 forces = amplitudes[:, num.newaxis] * num.array( 

4299 [[self.fn, self.fe, self.fd]], dtype=float) 

4300 

4301 return meta.DiscretizedSFSource(forces=forces, 

4302 **self._dparams_base_repeated(times)) 

4303 

4304 def pyrocko_event(self, store=None, target=None, **kwargs): 

4305 return Source.pyrocko_event( 

4306 self, store, target, 

4307 **kwargs) 

4308 

4309 @classmethod 

4310 def from_pyrocko_event(cls, ev, **kwargs): 

4311 d = {} 

4312 d.update(kwargs) 

4313 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4314 

4315 

4316class PorePressurePointSource(Source): 

4317 ''' 

4318 Excess pore pressure point source. 

4319 

4320 For poro-elastic initial value problem where an excess pore pressure is 

4321 brought into a small source volume. 

4322 ''' 

4323 

4324 discretized_source_class = meta.DiscretizedPorePressureSource 

4325 

4326 pp = Float.T( 

4327 default=1.0, 

4328 help='initial excess pore pressure in [Pa]') 

4329 

4330 def base_key(self): 

4331 return Source.base_key(self) 

4332 

4333 def get_factor(self): 

4334 return self.pp 

4335 

4336 def discretize_basesource(self, store, target=None): 

4337 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4338 **self._dparams_base()) 

4339 

4340 

4341class PorePressureLineSource(Source): 

4342 ''' 

4343 Excess pore pressure line source. 

4344 

4345 The line source is centered at (north_shift, east_shift, depth). 

4346 ''' 

4347 

4348 discretized_source_class = meta.DiscretizedPorePressureSource 

4349 

4350 pp = Float.T( 

4351 default=1.0, 

4352 help='initial excess pore pressure in [Pa]') 

4353 

4354 length = Float.T( 

4355 default=0.0, 

4356 help='length of the line source [m]') 

4357 

4358 azimuth = Float.T( 

4359 default=0.0, 

4360 help='azimuth direction, clockwise from north [deg]') 

4361 

4362 dip = Float.T( 

4363 default=90., 

4364 help='dip direction, downward from horizontal [deg]') 

4365 

4366 def base_key(self): 

4367 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4368 

4369 def get_factor(self): 

4370 return self.pp 

4371 

4372 def discretize_basesource(self, store, target=None): 

4373 

4374 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4375 

4376 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4377 

4378 sa = math.sin(self.azimuth * d2r) 

4379 ca = math.cos(self.azimuth * d2r) 

4380 sd = math.sin(self.dip * d2r) 

4381 cd = math.cos(self.dip * d2r) 

4382 

4383 points = num.zeros((n, 3)) 

4384 points[:, 0] = self.north_shift + a * ca * cd 

4385 points[:, 1] = self.east_shift + a * sa * cd 

4386 points[:, 2] = self.depth + a * sd 

4387 

4388 return meta.DiscretizedPorePressureSource( 

4389 times=util.num_full(n, self.time), 

4390 lat=self.lat, 

4391 lon=self.lon, 

4392 north_shifts=points[:, 0], 

4393 east_shifts=points[:, 1], 

4394 depths=points[:, 2], 

4395 pp=num.ones(n) / n) 

4396 

4397 

4398class Request(Object): 

4399 ''' 

4400 Synthetic seismogram computation request. 

4401 

4402 :: 

4403 

4404 Request(**kwargs) 

4405 Request(sources, targets, **kwargs) 

4406 ''' 

4407 

4408 sources = List.T( 

4409 Source.T(), 

4410 help='list of sources for which to produce synthetics.') 

4411 

4412 targets = List.T( 

4413 Target.T(), 

4414 help='list of targets for which to produce synthetics.') 

4415 

4416 @classmethod 

4417 def args2kwargs(cls, args): 

4418 if len(args) not in (0, 2, 3): 

4419 raise BadRequest('Invalid arguments.') 

4420 

4421 if len(args) == 2: 

4422 return dict(sources=args[0], targets=args[1]) 

4423 else: 

4424 return {} 

4425 

4426 def __init__(self, *args, **kwargs): 

4427 kwargs.update(self.args2kwargs(args)) 

4428 sources = kwargs.pop('sources', []) 

4429 targets = kwargs.pop('targets', []) 

4430 

4431 if isinstance(sources, Source): 

4432 sources = [sources] 

4433 

4434 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4435 targets = [targets] 

4436 

4437 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4438 

4439 @property 

4440 def targets_dynamic(self): 

4441 return [t for t in self.targets if isinstance(t, Target)] 

4442 

4443 @property 

4444 def targets_static(self): 

4445 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4446 

4447 @property 

4448 def has_dynamic(self): 

4449 return True if len(self.targets_dynamic) > 0 else False 

4450 

4451 @property 

4452 def has_statics(self): 

4453 return True if len(self.targets_static) > 0 else False 

4454 

4455 def subsources_map(self): 

4456 m = defaultdict(list) 

4457 for source in self.sources: 

4458 m[source.base_key()].append(source) 

4459 

4460 return m 

4461 

4462 def subtargets_map(self): 

4463 m = defaultdict(list) 

4464 for target in self.targets: 

4465 m[target.base_key()].append(target) 

4466 

4467 return m 

4468 

4469 def subrequest_map(self): 

4470 ms = self.subsources_map() 

4471 mt = self.subtargets_map() 

4472 m = {} 

4473 for (ks, ls) in ms.items(): 

4474 for (kt, lt) in mt.items(): 

4475 m[ks, kt] = (ls, lt) 

4476 

4477 return m 

4478 

4479 

4480class ProcessingStats(Object): 

4481 t_perc_get_store_and_receiver = Float.T(default=0.) 

4482 t_perc_discretize_source = Float.T(default=0.) 

4483 t_perc_make_base_seismogram = Float.T(default=0.) 

4484 t_perc_make_same_span = Float.T(default=0.) 

4485 t_perc_post_process = Float.T(default=0.) 

4486 t_perc_optimize = Float.T(default=0.) 

4487 t_perc_stack = Float.T(default=0.) 

4488 t_perc_static_get_store = Float.T(default=0.) 

4489 t_perc_static_discretize_basesource = Float.T(default=0.) 

4490 t_perc_static_sum_statics = Float.T(default=0.) 

4491 t_perc_static_post_process = Float.T(default=0.) 

4492 t_wallclock = Float.T(default=0.) 

4493 t_cpu = Float.T(default=0.) 

4494 n_read_blocks = Int.T(default=0) 

4495 n_results = Int.T(default=0) 

4496 n_subrequests = Int.T(default=0) 

4497 n_stores = Int.T(default=0) 

4498 n_records_stacked = Int.T(default=0) 

4499 

4500 

4501class Response(Object): 

4502 ''' 

4503 Resonse object to a synthetic seismogram computation request. 

4504 ''' 

4505 

4506 request = Request.T() 

4507 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4508 stats = ProcessingStats.T() 

4509 

4510 def pyrocko_traces(self): 

4511 ''' 

4512 Return a list of requested 

4513 :class:`~pyrocko.trace.Trace` instances. 

4514 ''' 

4515 

4516 traces = [] 

4517 for results in self.results_list: 

4518 for result in results: 

4519 if not isinstance(result, meta.Result): 

4520 continue 

4521 traces.append(result.trace.pyrocko_trace()) 

4522 

4523 return traces 

4524 

4525 def kite_scenes(self): 

4526 ''' 

4527 Return a list of requested 

4528 :class:`~kite.scenes` instances. 

4529 ''' 

4530 kite_scenes = [] 

4531 for results in self.results_list: 

4532 for result in results: 

4533 if isinstance(result, meta.KiteSceneResult): 

4534 sc = result.get_scene() 

4535 kite_scenes.append(sc) 

4536 

4537 return kite_scenes 

4538 

4539 def static_results(self): 

4540 ''' 

4541 Return a list of requested 

4542 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4543 ''' 

4544 statics = [] 

4545 for results in self.results_list: 

4546 for result in results: 

4547 if not isinstance(result, meta.StaticResult): 

4548 continue 

4549 statics.append(result) 

4550 

4551 return statics 

4552 

4553 def iter_results(self, get='pyrocko_traces'): 

4554 ''' 

4555 Generator function to iterate over results of request. 

4556 

4557 Yields associated :py:class:`Source`, 

4558 :class:`~pyrocko.gf.targets.Target`, 

4559 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4560 ''' 

4561 

4562 for isource, source in enumerate(self.request.sources): 

4563 for itarget, target in enumerate(self.request.targets): 

4564 result = self.results_list[isource][itarget] 

4565 if get == 'pyrocko_traces': 

4566 yield source, target, result.trace.pyrocko_trace() 

4567 elif get == 'results': 

4568 yield source, target, result 

4569 

4570 def snuffle(self, **kwargs): 

4571 ''' 

4572 Open *snuffler* with requested traces. 

4573 ''' 

4574 

4575 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4576 

4577 

4578class Engine(Object): 

4579 ''' 

4580 Base class for synthetic seismogram calculators. 

4581 ''' 

4582 

4583 def get_store_ids(self): 

4584 ''' 

4585 Get list of available GF store IDs 

4586 ''' 

4587 

4588 return [] 

4589 

4590 

4591class Rule(object): 

4592 pass 

4593 

4594 

4595class VectorRule(Rule): 

4596 

4597 def __init__(self, quantity, differentiate=0, integrate=0): 

4598 self.components = [quantity + '.' + c for c in 'ned'] 

4599 self.differentiate = differentiate 

4600 self.integrate = integrate 

4601 

4602 def required_components(self, target): 

4603 n, e, d = self.components 

4604 sa, ca, sd, cd = target.get_sin_cos_factors() 

4605 

4606 comps = [] 

4607 if nonzero(ca * cd): 

4608 comps.append(n) 

4609 

4610 if nonzero(sa * cd): 

4611 comps.append(e) 

4612 

4613 if nonzero(sd): 

4614 comps.append(d) 

4615 

4616 return tuple(comps) 

4617 

4618 def apply_(self, target, base_seismogram): 

4619 n, e, d = self.components 

4620 sa, ca, sd, cd = target.get_sin_cos_factors() 

4621 

4622 if nonzero(ca * cd): 

4623 data = base_seismogram[n].data * (ca * cd) 

4624 deltat = base_seismogram[n].deltat 

4625 else: 

4626 data = 0.0 

4627 

4628 if nonzero(sa * cd): 

4629 data = data + base_seismogram[e].data * (sa * cd) 

4630 deltat = base_seismogram[e].deltat 

4631 

4632 if nonzero(sd): 

4633 data = data + base_seismogram[d].data * sd 

4634 deltat = base_seismogram[d].deltat 

4635 

4636 if self.differentiate: 

4637 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4638 

4639 if self.integrate: 

4640 raise NotImplementedError('Integration is not implemented yet.') 

4641 

4642 return data 

4643 

4644 

4645class HorizontalVectorRule(Rule): 

4646 

4647 def __init__(self, quantity, differentiate=0, integrate=0): 

4648 self.components = [quantity + '.' + c for c in 'ne'] 

4649 self.differentiate = differentiate 

4650 self.integrate = integrate 

4651 

4652 def required_components(self, target): 

4653 n, e = self.components 

4654 sa, ca, _, _ = target.get_sin_cos_factors() 

4655 

4656 comps = [] 

4657 if nonzero(ca): 

4658 comps.append(n) 

4659 

4660 if nonzero(sa): 

4661 comps.append(e) 

4662 

4663 return tuple(comps) 

4664 

4665 def apply_(self, target, base_seismogram): 

4666 n, e = self.components 

4667 sa, ca, _, _ = target.get_sin_cos_factors() 

4668 

4669 if nonzero(ca): 

4670 data = base_seismogram[n].data * ca 

4671 else: 

4672 data = 0.0 

4673 

4674 if nonzero(sa): 

4675 data = data + base_seismogram[e].data * sa 

4676 

4677 if self.differentiate: 

4678 deltat = base_seismogram[e].deltat 

4679 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4680 

4681 if self.integrate: 

4682 raise NotImplementedError('Integration is not implemented yet.') 

4683 

4684 return data 

4685 

4686 

4687class ScalarRule(Rule): 

4688 

4689 def __init__(self, quantity, differentiate=0): 

4690 self.c = quantity 

4691 

4692 def required_components(self, target): 

4693 return (self.c, ) 

4694 

4695 def apply_(self, target, base_seismogram): 

4696 data = base_seismogram[self.c].data.copy() 

4697 deltat = base_seismogram[self.c].deltat 

4698 if self.differentiate: 

4699 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4700 

4701 return data 

4702 

4703 

4704class StaticDisplacement(Rule): 

4705 

4706 def required_components(self, target): 

4707 return tuple(['displacement.%s' % c for c in list('ned')]) 

4708 

4709 def apply_(self, target, base_statics): 

4710 if isinstance(target, SatelliteTarget): 

4711 los_fac = target.get_los_factors() 

4712 base_statics['displacement.los'] =\ 

4713 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4714 los_fac[:, 1] * base_statics['displacement.e'] + 

4715 los_fac[:, 2] * base_statics['displacement.n']) 

4716 return base_statics 

4717 

4718 

4719channel_rules = { 

4720 'displacement': [VectorRule('displacement')], 

4721 'rotation': [VectorRule('rotation')], 

4722 'velocity': [ 

4723 VectorRule('velocity'), 

4724 VectorRule('displacement', differentiate=1)], 

4725 'acceleration': [ 

4726 VectorRule('acceleration'), 

4727 VectorRule('velocity', differentiate=1), 

4728 VectorRule('displacement', differentiate=2)], 

4729 'pore_pressure': [ScalarRule('pore_pressure')], 

4730 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4731 'darcy_velocity': [VectorRule('darcy_velocity')], 

4732} 

4733 

4734static_rules = { 

4735 'displacement': [StaticDisplacement()] 

4736} 

4737 

4738 

4739class OutOfBoundsContext(Object): 

4740 source = Source.T() 

4741 target = Target.T() 

4742 distance = Float.T() 

4743 components = List.T(String.T()) 

4744 

4745 

4746def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4747 dsource_cache = {} 

4748 tcounters = list(range(6)) 

4749 

4750 store_ids = set() 

4751 sources = set() 

4752 targets = set() 

4753 

4754 for itarget, target in enumerate(ptargets): 

4755 target._id = itarget 

4756 

4757 for w in work: 

4758 _, _, isources, itargets = w 

4759 

4760 sources.update([psources[isource] for isource in isources]) 

4761 targets.update([ptargets[itarget] for itarget in itargets]) 

4762 

4763 store_ids = set([t.store_id for t in targets]) 

4764 

4765 for isource, source in enumerate(psources): 

4766 

4767 components = set() 

4768 for itarget, target in enumerate(targets): 

4769 rule = engine.get_rule(source, target) 

4770 components.update(rule.required_components(target)) 

4771 

4772 for store_id in store_ids: 

4773 store_targets = [t for t in targets if t.store_id == store_id] 

4774 

4775 sample_rates = set([t.sample_rate for t in store_targets]) 

4776 interpolations = set([t.interpolation for t in store_targets]) 

4777 

4778 base_seismograms = [] 

4779 store_targets_out = [] 

4780 

4781 for samp_rate in sample_rates: 

4782 for interp in interpolations: 

4783 engine_targets = [ 

4784 t for t in store_targets if t.sample_rate == samp_rate 

4785 and t.interpolation == interp] 

4786 

4787 if not engine_targets: 

4788 continue 

4789 

4790 store_targets_out += engine_targets 

4791 

4792 base_seismograms += engine.base_seismograms( 

4793 source, 

4794 engine_targets, 

4795 components, 

4796 dsource_cache, 

4797 nthreads) 

4798 

4799 for iseis, seismogram in enumerate(base_seismograms): 

4800 for tr in seismogram.values(): 

4801 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4802 e = SeismosizerError( 

4803 'Seismosizer failed with return code %i\n%s' % ( 

4804 tr.err, str( 

4805 OutOfBoundsContext( 

4806 source=source, 

4807 target=store_targets[iseis], 

4808 distance=source.distance_to( 

4809 store_targets[iseis]), 

4810 components=components)))) 

4811 raise e 

4812 

4813 for seismogram, target in zip(base_seismograms, store_targets_out): 

4814 

4815 try: 

4816 result = engine._post_process_dynamic( 

4817 seismogram, source, target) 

4818 except SeismosizerError as e: 

4819 result = e 

4820 

4821 yield (isource, target._id, result), tcounters 

4822 

4823 

4824def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4825 dsource_cache = {} 

4826 

4827 for w in work: 

4828 _, _, isources, itargets = w 

4829 

4830 sources = [psources[isource] for isource in isources] 

4831 targets = [ptargets[itarget] for itarget in itargets] 

4832 

4833 components = set() 

4834 for target in targets: 

4835 rule = engine.get_rule(sources[0], target) 

4836 components.update(rule.required_components(target)) 

4837 

4838 for isource, source in zip(isources, sources): 

4839 for itarget, target in zip(itargets, targets): 

4840 

4841 try: 

4842 base_seismogram, tcounters = engine.base_seismogram( 

4843 source, target, components, dsource_cache, nthreads) 

4844 except meta.OutOfBounds as e: 

4845 e.context = OutOfBoundsContext( 

4846 source=sources[0], 

4847 target=targets[0], 

4848 distance=sources[0].distance_to(targets[0]), 

4849 components=components) 

4850 raise 

4851 

4852 n_records_stacked = 0 

4853 t_optimize = 0.0 

4854 t_stack = 0.0 

4855 

4856 for _, tr in base_seismogram.items(): 

4857 n_records_stacked += tr.n_records_stacked 

4858 t_optimize += tr.t_optimize 

4859 t_stack += tr.t_stack 

4860 

4861 try: 

4862 result = engine._post_process_dynamic( 

4863 base_seismogram, source, target) 

4864 result.n_records_stacked = n_records_stacked 

4865 result.n_shared_stacking = len(sources) *\ 

4866 len(targets) 

4867 result.t_optimize = t_optimize 

4868 result.t_stack = t_stack 

4869 except SeismosizerError as e: 

4870 result = e 

4871 

4872 tcounters.append(xtime()) 

4873 yield (isource, itarget, result), tcounters 

4874 

4875 

4876def process_static(work, psources, ptargets, engine, nthreads=0): 

4877 for w in work: 

4878 _, _, isources, itargets = w 

4879 

4880 sources = [psources[isource] for isource in isources] 

4881 targets = [ptargets[itarget] for itarget in itargets] 

4882 

4883 for isource, source in zip(isources, sources): 

4884 for itarget, target in zip(itargets, targets): 

4885 components = engine.get_rule(source, target)\ 

4886 .required_components(target) 

4887 

4888 try: 

4889 base_statics, tcounters = engine.base_statics( 

4890 source, target, components, nthreads) 

4891 except meta.OutOfBounds as e: 

4892 e.context = OutOfBoundsContext( 

4893 source=sources[0], 

4894 target=targets[0], 

4895 distance=float('nan'), 

4896 components=components) 

4897 raise 

4898 result = engine._post_process_statics( 

4899 base_statics, source, target) 

4900 tcounters.append(xtime()) 

4901 

4902 yield (isource, itarget, result), tcounters 

4903 

4904 

4905class LocalEngine(Engine): 

4906 ''' 

4907 Offline synthetic seismogram calculator. 

4908 

4909 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4910 :py:attr:`store_dirs` with paths set in environment variables 

4911 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4912 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4913 :py:attr:`store_dirs` with paths set in the user's config file. 

4914 

4915 The config file can be found at :file:`~/.pyrocko/config.pf` 

4916 

4917 .. code-block :: python 

4918 

4919 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4920 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4921 ''' 

4922 

4923 store_superdirs = List.T( 

4924 String.T(), 

4925 help='directories which are searched for Green\'s function stores') 

4926 

4927 store_dirs = List.T( 

4928 String.T(), 

4929 help='additional individual Green\'s function store directories') 

4930 

4931 default_store_id = String.T( 

4932 optional=True, 

4933 help='default store ID to be used when a request does not provide ' 

4934 'one') 

4935 

4936 def __init__(self, **kwargs): 

4937 use_env = kwargs.pop('use_env', False) 

4938 use_config = kwargs.pop('use_config', False) 

4939 Engine.__init__(self, **kwargs) 

4940 if use_env: 

4941 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4942 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4943 if env_store_superdirs: 

4944 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4945 

4946 if env_store_dirs: 

4947 self.store_dirs.extend(env_store_dirs.split(':')) 

4948 

4949 if use_config: 

4950 c = config.config() 

4951 self.store_superdirs.extend(c.gf_store_superdirs) 

4952 self.store_dirs.extend(c.gf_store_dirs) 

4953 

4954 self._check_store_dirs_type() 

4955 self._id_to_store_dir = {} 

4956 self._open_stores = {} 

4957 self._effective_default_store_id = None 

4958 

4959 def _check_store_dirs_type(self): 

4960 for sdir in ['store_dirs', 'store_superdirs']: 

4961 if not isinstance(self.__getattribute__(sdir), list): 

4962 raise TypeError("{} of {} is not of type list".format( 

4963 sdir, self.__class__.__name__)) 

4964 

4965 def _get_store_id(self, store_dir): 

4966 store_ = store.Store(store_dir) 

4967 store_id = store_.config.id 

4968 store_.close() 

4969 return store_id 

4970 

4971 def _looks_like_store_dir(self, store_dir): 

4972 return os.path.isdir(store_dir) and \ 

4973 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4974 ('index', 'traces', 'config')) 

4975 

4976 def iter_store_dirs(self): 

4977 store_dirs = set() 

4978 for d in self.store_superdirs: 

4979 if not os.path.exists(d): 

4980 logger.warning('store_superdir not available: %s' % d) 

4981 continue 

4982 

4983 for entry in os.listdir(d): 

4984 store_dir = os.path.realpath(pjoin(d, entry)) 

4985 if self._looks_like_store_dir(store_dir): 

4986 store_dirs.add(store_dir) 

4987 

4988 for store_dir in self.store_dirs: 

4989 store_dirs.add(os.path.realpath(store_dir)) 

4990 

4991 return store_dirs 

4992 

4993 def _scan_stores(self): 

4994 for store_dir in self.iter_store_dirs(): 

4995 store_id = self._get_store_id(store_dir) 

4996 if store_id not in self._id_to_store_dir: 

4997 self._id_to_store_dir[store_id] = store_dir 

4998 else: 

4999 if store_dir != self._id_to_store_dir[store_id]: 

5000 raise DuplicateStoreId( 

5001 'GF store ID %s is used in (at least) two ' 

5002 'different stores. Locations are: %s and %s' % 

5003 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5004 

5005 def get_store_dir(self, store_id): 

5006 ''' 

5007 Lookup directory given a GF store ID. 

5008 ''' 

5009 

5010 if store_id not in self._id_to_store_dir: 

5011 self._scan_stores() 

5012 

5013 if store_id not in self._id_to_store_dir: 

5014 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5015 

5016 return self._id_to_store_dir[store_id] 

5017 

5018 def get_store_ids(self): 

5019 ''' 

5020 Get list of available store IDs. 

5021 ''' 

5022 

5023 self._scan_stores() 

5024 return sorted(self._id_to_store_dir.keys()) 

5025 

5026 def effective_default_store_id(self): 

5027 if self._effective_default_store_id is None: 

5028 if self.default_store_id is None: 

5029 store_ids = self.get_store_ids() 

5030 if len(store_ids) == 1: 

5031 self._effective_default_store_id = self.get_store_ids()[0] 

5032 else: 

5033 raise NoDefaultStoreSet() 

5034 else: 

5035 self._effective_default_store_id = self.default_store_id 

5036 

5037 return self._effective_default_store_id 

5038 

5039 def get_store(self, store_id=None): 

5040 ''' 

5041 Get a store from the engine. 

5042 

5043 :param store_id: identifier of the store (optional) 

5044 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5045 

5046 If no ``store_id`` is provided the store 

5047 associated with the :py:gattr:`default_store_id` is returned. 

5048 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5049 undefined. 

5050 ''' 

5051 

5052 if store_id is None: 

5053 store_id = self.effective_default_store_id() 

5054 

5055 if store_id not in self._open_stores: 

5056 store_dir = self.get_store_dir(store_id) 

5057 self._open_stores[store_id] = store.Store(store_dir) 

5058 

5059 return self._open_stores[store_id] 

5060 

5061 def get_store_config(self, store_id): 

5062 store = self.get_store(store_id) 

5063 return store.config 

5064 

5065 def get_store_extra(self, store_id, key): 

5066 store = self.get_store(store_id) 

5067 return store.get_extra(key) 

5068 

5069 def close_cashed_stores(self): 

5070 ''' 

5071 Close and remove ids from cashed stores. 

5072 ''' 

5073 store_ids = [] 

5074 for store_id, store_ in self._open_stores.items(): 

5075 store_.close() 

5076 store_ids.append(store_id) 

5077 

5078 for store_id in store_ids: 

5079 self._open_stores.pop(store_id) 

5080 

5081 def get_rule(self, source, target): 

5082 cprovided = self.get_store(target.store_id).get_provided_components() 

5083 

5084 if isinstance(target, StaticTarget): 

5085 quantity = target.quantity 

5086 available_rules = static_rules 

5087 elif isinstance(target, Target): 

5088 quantity = target.effective_quantity() 

5089 available_rules = channel_rules 

5090 

5091 try: 

5092 for rule in available_rules[quantity]: 

5093 cneeded = rule.required_components(target) 

5094 if all(c in cprovided for c in cneeded): 

5095 return rule 

5096 

5097 except KeyError: 

5098 pass 

5099 

5100 raise BadRequest( 

5101 'No rule to calculate "%s" with GFs from store "%s" ' 

5102 'for source model "%s".' % ( 

5103 target.effective_quantity(), 

5104 target.store_id, 

5105 source.__class__.__name__)) 

5106 

5107 def _cached_discretize_basesource(self, source, store, cache, target): 

5108 if (source, store) not in cache: 

5109 cache[source, store] = source.discretize_basesource(store, target) 

5110 

5111 return cache[source, store] 

5112 

5113 def base_seismograms(self, source, targets, components, dsource_cache, 

5114 nthreads=0): 

5115 

5116 target = targets[0] 

5117 

5118 interp = set([t.interpolation for t in targets]) 

5119 if len(interp) > 1: 

5120 raise BadRequest('Targets have different interpolation schemes.') 

5121 

5122 rates = set([t.sample_rate for t in targets]) 

5123 if len(rates) > 1: 

5124 raise BadRequest('Targets have different sample rates.') 

5125 

5126 store_ = self.get_store(target.store_id) 

5127 receivers = [t.receiver(store_) for t in targets] 

5128 

5129 if target.sample_rate is not None: 

5130 deltat = 1. / target.sample_rate 

5131 rate = target.sample_rate 

5132 else: 

5133 deltat = None 

5134 rate = store_.config.sample_rate 

5135 

5136 tmin = num.fromiter( 

5137 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5138 tmax = num.fromiter( 

5139 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5140 

5141 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax)) 

5142 

5143 itmin = num.zeros_like(tmin, dtype=num.int64) 

5144 itmax = num.zeros_like(tmin, dtype=num.int64) 

5145 nsamples = num.full_like(tmin, -1, dtype=num.int64) 

5146 

5147 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64) 

5148 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64) 

5149 nsamples = itmax - itmin + 1 

5150 nsamples[num.logical_not(mask)] = -1 

5151 

5152 base_source = self._cached_discretize_basesource( 

5153 source, store_, dsource_cache, target) 

5154 

5155 base_seismograms = store_.calc_seismograms( 

5156 base_source, receivers, components, 

5157 deltat=deltat, 

5158 itmin=itmin, nsamples=nsamples, 

5159 interpolation=target.interpolation, 

5160 optimization=target.optimization, 

5161 nthreads=nthreads) 

5162 

5163 for i, base_seismogram in enumerate(base_seismograms): 

5164 base_seismograms[i] = store.make_same_span(base_seismogram) 

5165 

5166 return base_seismograms 

5167 

5168 def base_seismogram(self, source, target, components, dsource_cache, 

5169 nthreads): 

5170 

5171 tcounters = [xtime()] 

5172 

5173 store_ = self.get_store(target.store_id) 

5174 receiver = target.receiver(store_) 

5175 

5176 if target.tmin and target.tmax is not None: 

5177 rate = store_.config.sample_rate 

5178 itmin = int(num.floor(target.tmin * rate)) 

5179 itmax = int(num.ceil(target.tmax * rate)) 

5180 nsamples = itmax - itmin + 1 

5181 else: 

5182 itmin = None 

5183 nsamples = None 

5184 

5185 tcounters.append(xtime()) 

5186 base_source = self._cached_discretize_basesource( 

5187 source, store_, dsource_cache, target) 

5188 

5189 tcounters.append(xtime()) 

5190 

5191 if target.sample_rate is not None: 

5192 deltat = 1. / target.sample_rate 

5193 else: 

5194 deltat = None 

5195 

5196 base_seismogram = store_.seismogram( 

5197 base_source, receiver, components, 

5198 deltat=deltat, 

5199 itmin=itmin, nsamples=nsamples, 

5200 interpolation=target.interpolation, 

5201 optimization=target.optimization, 

5202 nthreads=nthreads) 

5203 

5204 tcounters.append(xtime()) 

5205 

5206 base_seismogram = store.make_same_span(base_seismogram) 

5207 

5208 tcounters.append(xtime()) 

5209 

5210 return base_seismogram, tcounters 

5211 

5212 def base_statics(self, source, target, components, nthreads): 

5213 tcounters = [xtime()] 

5214 store_ = self.get_store(target.store_id) 

5215 

5216 if target.tsnapshot is not None: 

5217 rate = store_.config.sample_rate 

5218 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5219 else: 

5220 itsnapshot = None 

5221 tcounters.append(xtime()) 

5222 

5223 base_source = source.discretize_basesource(store_, target=target) 

5224 

5225 tcounters.append(xtime()) 

5226 

5227 base_statics = store_.statics( 

5228 base_source, 

5229 target, 

5230 itsnapshot, 

5231 components, 

5232 target.interpolation, 

5233 nthreads) 

5234 

5235 tcounters.append(xtime()) 

5236 

5237 return base_statics, tcounters 

5238 

5239 def _post_process_dynamic(self, base_seismogram, source, target): 

5240 base_any = next(iter(base_seismogram.values())) 

5241 deltat = base_any.deltat 

5242 itmin = base_any.itmin 

5243 

5244 rule = self.get_rule(source, target) 

5245 data = rule.apply_(target, base_seismogram) 

5246 

5247 factor = source.get_factor() * target.get_factor() 

5248 if factor != 1.0: 

5249 data = data * factor 

5250 

5251 stf = source.effective_stf_post() 

5252 

5253 times, amplitudes = stf.discretize_t( 

5254 deltat, 0.0) 

5255 

5256 # repeat end point to prevent boundary effects 

5257 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5258 padded_data[:data.size] = data 

5259 padded_data[data.size:] = data[-1] 

5260 data = num.convolve(amplitudes, padded_data) 

5261 

5262 tmin = itmin * deltat + times[0] 

5263 

5264 tr = meta.SeismosizerTrace( 

5265 codes=target.codes, 

5266 data=data[:-amplitudes.size], 

5267 deltat=deltat, 

5268 tmin=tmin) 

5269 

5270 return target.post_process(self, source, tr) 

5271 

5272 def _post_process_statics(self, base_statics, source, starget): 

5273 rule = self.get_rule(source, starget) 

5274 data = rule.apply_(starget, base_statics) 

5275 

5276 factor = source.get_factor() 

5277 if factor != 1.0: 

5278 for v in data.values(): 

5279 v *= factor 

5280 

5281 return starget.post_process(self, source, base_statics) 

5282 

5283 def process(self, *args, **kwargs): 

5284 ''' 

5285 Process a request. 

5286 

5287 :: 

5288 

5289 process(**kwargs) 

5290 process(request, **kwargs) 

5291 process(sources, targets, **kwargs) 

5292 

5293 The request can be given a a :py:class:`Request` object, or such an 

5294 object is created using ``Request(**kwargs)`` for convenience. 

5295 

5296 :returns: :py:class:`Response` object 

5297 ''' 

5298 

5299 if len(args) not in (0, 1, 2): 

5300 raise BadRequest('Invalid arguments.') 

5301 

5302 if len(args) == 1: 

5303 kwargs['request'] = args[0] 

5304 

5305 elif len(args) == 2: 

5306 kwargs.update(Request.args2kwargs(args)) 

5307 

5308 request = kwargs.pop('request', None) 

5309 status_callback = kwargs.pop('status_callback', None) 

5310 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5311 

5312 nprocs = kwargs.pop('nprocs', None) 

5313 nthreads = kwargs.pop('nthreads', 1) 

5314 if nprocs is not None: 

5315 nthreads = nprocs 

5316 

5317 if request is None: 

5318 request = Request(**kwargs) 

5319 

5320 if resource: 

5321 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5322 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5323 tt0 = xtime() 

5324 

5325 # make sure stores are open before fork() 

5326 store_ids = set(target.store_id for target in request.targets) 

5327 for store_id in store_ids: 

5328 self.get_store(store_id) 

5329 

5330 source_index = dict((x, i) for (i, x) in 

5331 enumerate(request.sources)) 

5332 target_index = dict((x, i) for (i, x) in 

5333 enumerate(request.targets)) 

5334 

5335 m = request.subrequest_map() 

5336 

5337 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5338 results_list = [] 

5339 

5340 for i in range(len(request.sources)): 

5341 results_list.append([None] * len(request.targets)) 

5342 

5343 tcounters_dyn_list = [] 

5344 tcounters_static_list = [] 

5345 nsub = len(skeys) 

5346 isub = 0 

5347 

5348 # Processing dynamic targets through 

5349 # parimap(process_subrequest_dynamic) 

5350 

5351 if calc_timeseries: 

5352 _process_dynamic = process_dynamic_timeseries 

5353 else: 

5354 _process_dynamic = process_dynamic 

5355 

5356 if request.has_dynamic: 

5357 work_dynamic = [ 

5358 (i, nsub, 

5359 [source_index[source] for source in m[k][0]], 

5360 [target_index[target] for target in m[k][1] 

5361 if not isinstance(target, StaticTarget)]) 

5362 for (i, k) in enumerate(skeys)] 

5363 

5364 for ii_results, tcounters_dyn in _process_dynamic( 

5365 work_dynamic, request.sources, request.targets, self, 

5366 nthreads): 

5367 

5368 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5369 isource, itarget, result = ii_results 

5370 results_list[isource][itarget] = result 

5371 

5372 if status_callback: 

5373 status_callback(isub, nsub) 

5374 

5375 isub += 1 

5376 

5377 # Processing static targets through process_static 

5378 if request.has_statics: 

5379 work_static = [ 

5380 (i, nsub, 

5381 [source_index[source] for source in m[k][0]], 

5382 [target_index[target] for target in m[k][1] 

5383 if isinstance(target, StaticTarget)]) 

5384 for (i, k) in enumerate(skeys)] 

5385 

5386 for ii_results, tcounters_static in process_static( 

5387 work_static, request.sources, request.targets, self, 

5388 nthreads=nthreads): 

5389 

5390 tcounters_static_list.append(num.diff(tcounters_static)) 

5391 isource, itarget, result = ii_results 

5392 results_list[isource][itarget] = result 

5393 

5394 if status_callback: 

5395 status_callback(isub, nsub) 

5396 

5397 isub += 1 

5398 

5399 if status_callback: 

5400 status_callback(nsub, nsub) 

5401 

5402 tt1 = time.time() 

5403 if resource: 

5404 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5405 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5406 

5407 s = ProcessingStats() 

5408 

5409 if request.has_dynamic: 

5410 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5411 t_dyn = float(num.sum(tcumu_dyn)) 

5412 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5413 (s.t_perc_get_store_and_receiver, 

5414 s.t_perc_discretize_source, 

5415 s.t_perc_make_base_seismogram, 

5416 s.t_perc_make_same_span, 

5417 s.t_perc_post_process) = perc_dyn 

5418 else: 

5419 t_dyn = 0. 

5420 

5421 if request.has_statics: 

5422 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5423 t_static = num.sum(tcumu_static) 

5424 perc_static = map(float, tcumu_static / t_static * 100.) 

5425 (s.t_perc_static_get_store, 

5426 s.t_perc_static_discretize_basesource, 

5427 s.t_perc_static_sum_statics, 

5428 s.t_perc_static_post_process) = perc_static 

5429 

5430 s.t_wallclock = tt1 - tt0 

5431 if resource: 

5432 s.t_cpu = ( 

5433 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5434 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5435 s.n_read_blocks = ( 

5436 (rs1.ru_inblock + rc1.ru_inblock) - 

5437 (rs0.ru_inblock + rc0.ru_inblock)) 

5438 

5439 n_records_stacked = 0. 

5440 for results in results_list: 

5441 for result in results: 

5442 if not isinstance(result, meta.Result): 

5443 continue 

5444 shr = float(result.n_shared_stacking) 

5445 n_records_stacked += result.n_records_stacked / shr 

5446 s.t_perc_optimize += result.t_optimize / shr 

5447 s.t_perc_stack += result.t_stack / shr 

5448 s.n_records_stacked = int(n_records_stacked) 

5449 if t_dyn != 0.: 

5450 s.t_perc_optimize /= t_dyn * 100 

5451 s.t_perc_stack /= t_dyn * 100 

5452 

5453 return Response( 

5454 request=request, 

5455 results_list=results_list, 

5456 stats=s) 

5457 

5458 

5459class RemoteEngine(Engine): 

5460 ''' 

5461 Client for remote synthetic seismogram calculator. 

5462 ''' 

5463 

5464 site = String.T(default=ws.g_default_site, optional=True) 

5465 url = String.T(default=ws.g_url, optional=True) 

5466 

5467 def process(self, request=None, status_callback=None, **kwargs): 

5468 

5469 if request is None: 

5470 request = Request(**kwargs) 

5471 

5472 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5473 

5474 

5475g_engine = None 

5476 

5477 

5478def get_engine(store_superdirs=[]): 

5479 global g_engine 

5480 if g_engine is None: 

5481 g_engine = LocalEngine(use_env=True, use_config=True) 

5482 

5483 for d in store_superdirs: 

5484 if d not in g_engine.store_superdirs: 

5485 g_engine.store_superdirs.append(d) 

5486 

5487 return g_engine 

5488 

5489 

5490class SourceGroup(Object): 

5491 

5492 def __getattr__(self, k): 

5493 return num.fromiter((getattr(s, k) for s in self), 

5494 dtype=float) 

5495 

5496 def __iter__(self): 

5497 raise NotImplementedError( 

5498 'This method should be implemented in subclass.') 

5499 

5500 def __len__(self): 

5501 raise NotImplementedError( 

5502 'This method should be implemented in subclass.') 

5503 

5504 

5505class SourceList(SourceGroup): 

5506 sources = List.T(Source.T()) 

5507 

5508 def append(self, s): 

5509 self.sources.append(s) 

5510 

5511 def __iter__(self): 

5512 return iter(self.sources) 

5513 

5514 def __len__(self): 

5515 return len(self.sources) 

5516 

5517 

5518class SourceGrid(SourceGroup): 

5519 

5520 base = Source.T() 

5521 variables = Dict.T(String.T(), Range.T()) 

5522 order = List.T(String.T()) 

5523 

5524 def __len__(self): 

5525 n = 1 

5526 for (k, v) in self.make_coords(self.base): 

5527 n *= len(list(v)) 

5528 

5529 return n 

5530 

5531 def __iter__(self): 

5532 for items in permudef(self.make_coords(self.base)): 

5533 s = self.base.clone(**{k: v for (k, v) in items}) 

5534 s.regularize() 

5535 yield s 

5536 

5537 def ordered_params(self): 

5538 ks = list(self.variables.keys()) 

5539 for k in self.order + list(self.base.keys()): 

5540 if k in ks: 

5541 yield k 

5542 ks.remove(k) 

5543 if ks: 

5544 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5545 (ks[0], self.base.__class__.__name__)) 

5546 

5547 def make_coords(self, base): 

5548 return [(param, self.variables[param].make(base=base[param])) 

5549 for param in self.ordered_params()] 

5550 

5551 

5552source_classes = [ 

5553 Source, 

5554 SourceWithMagnitude, 

5555 SourceWithDerivedMagnitude, 

5556 ExplosionSource, 

5557 RectangularExplosionSource, 

5558 DCSource, 

5559 CLVDSource, 

5560 VLVDSource, 

5561 MTSource, 

5562 RectangularSource, 

5563 PseudoDynamicRupture, 

5564 DoubleDCSource, 

5565 RingfaultSource, 

5566 CombiSource, 

5567 SFSource, 

5568 PorePressurePointSource, 

5569 PorePressureLineSource, 

5570] 

5571 

5572stf_classes = [ 

5573 STF, 

5574 BoxcarSTF, 

5575 TriangularSTF, 

5576 HalfSinusoidSTF, 

5577 ResonatorSTF, 

5578] 

5579 

5580__all__ = ''' 

5581SeismosizerError 

5582BadRequest 

5583NoSuchStore 

5584DerivedMagnitudeError 

5585STFMode 

5586'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5587Request 

5588ProcessingStats 

5589Response 

5590Engine 

5591LocalEngine 

5592RemoteEngine 

5593source_classes 

5594get_engine 

5595Range 

5596SourceGroup 

5597SourceList 

5598SourceGrid 

5599map_anchor 

5600'''.split()