1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28from collections import defaultdict 

29from functools import cmp_to_key 

30import time 

31import math 

32import os 

33import re 

34import logging 

35try: 

36 import resource 

37except ImportError: 

38 resource = None 

39from hashlib import sha1 

40 

41import numpy as num 

42from scipy.interpolate import RegularGridInterpolator 

43 

44from pyrocko.guts import (Object, Float, String, StringChoice, List, 

45 Timestamp, Int, SObject, ArgumentError, Dict, 

46 ValidationError, Bool) 

47from pyrocko.guts_array import Array 

48 

49from pyrocko import moment_tensor as pmt 

50from pyrocko import trace, util, config, model, eikonal_ext 

51from pyrocko.orthodrome import ne_to_latlon 

52from pyrocko.model import Location 

53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

54 okada_ext, invert_fault_dislocations_bem 

55 

56from . import meta, store, ws 

57from .tractions import TractionField, DirectedTractions 

58from .targets import Target, StaticTarget, SatelliteTarget 

59 

60pjoin = os.path.join 

61 

62guts_prefix = 'pf' 

63 

64d2r = math.pi / 180. 

65r2d = 180. / math.pi 

66km = 1e3 

67 

68logger = logging.getLogger('pyrocko.gf.seismosizer') 

69 

70 

71def cmp_none_aware(a, b): 

72 if isinstance(a, tuple) and isinstance(b, tuple): 

73 for xa, xb in zip(a, b): 

74 rv = cmp_none_aware(xa, xb) 

75 if rv != 0: 

76 return rv 

77 

78 return 0 

79 

80 anone = a is None 

81 bnone = b is None 

82 

83 if anone and bnone: 

84 return 0 

85 

86 if anone: 

87 return -1 

88 

89 if bnone: 

90 return 1 

91 

92 return bool(a > b) - bool(a < b) 

93 

94 

95def xtime(): 

96 return time.time() 

97 

98 

99class SeismosizerError(Exception): 

100 pass 

101 

102 

103class BadRequest(SeismosizerError): 

104 pass 

105 

106 

107class DuplicateStoreId(Exception): 

108 pass 

109 

110 

111class NoDefaultStoreSet(Exception): 

112 pass 

113 

114 

115class ConversionError(Exception): 

116 pass 

117 

118 

119class NoSuchStore(BadRequest): 

120 

121 def __init__(self, store_id=None, dirs=None): 

122 BadRequest.__init__(self) 

123 self.store_id = store_id 

124 self.dirs = dirs 

125 

126 def __str__(self): 

127 if self.store_id is not None: 

128 rstr = 'no GF store with id "%s" found.' % self.store_id 

129 else: 

130 rstr = 'GF store not found.' 

131 

132 if self.dirs is not None: 

133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

134 return rstr 

135 

136 

137def ufloat(s): 

138 units = { 

139 'k': 1e3, 

140 'M': 1e6, 

141 } 

142 

143 factor = 1.0 

144 if s and s[-1] in units: 

145 factor = units[s[-1]] 

146 s = s[:-1] 

147 if not s: 

148 raise ValueError("unit without a number: '%s'" % s) 

149 

150 return float(s) * factor 

151 

152 

153def ufloat_or_none(s): 

154 if s: 

155 return ufloat(s) 

156 else: 

157 return None 

158 

159 

160def int_or_none(s): 

161 if s: 

162 return int(s) 

163 else: 

164 return None 

165 

166 

167def nonzero(x, eps=1e-15): 

168 return abs(x) > eps 

169 

170 

171def permudef(ln, j=0): 

172 if j < len(ln): 

173 k, v = ln[j] 

174 for y in v: 

175 ln[j] = k, y 

176 for s in permudef(ln, j + 1): 

177 yield s 

178 

179 ln[j] = k, v 

180 return 

181 else: 

182 yield ln 

183 

184 

185def arr(x): 

186 return num.atleast_1d(num.asarray(x)) 

187 

188 

189def discretize_rect_source(deltas, deltat, time, north, east, depth, 

190 strike, dip, length, width, 

191 anchor, velocity=None, stf=None, 

192 nucleation_x=None, nucleation_y=None, 

193 decimation_factor=1, pointsonly=False, 

194 plane_coords=False, 

195 aggressive_oversampling=False): 

196 

197 if stf is None: 

198 stf = STF() 

199 

200 if not velocity and not pointsonly: 

201 raise AttributeError('velocity is required in time mode') 

202 

203 mindeltagf = float(num.min(deltas)) 

204 if velocity: 

205 mindeltagf = min(mindeltagf, deltat * velocity) 

206 

207 ln = length 

208 wd = width 

209 

210 if aggressive_oversampling: 

211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

213 else: 

214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 

217 n = int(nl * nw) 

218 

219 dl = ln / nl 

220 dw = wd / nw 

221 

222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

224 

225 points = num.zeros((n, 3)) 

226 points[:, 0] = num.tile(xl, nw) 

227 points[:, 1] = num.repeat(xw, nl) 

228 

229 if nucleation_x is not None: 

230 dist_x = num.abs(nucleation_x - points[:, 0]) 

231 else: 

232 dist_x = num.zeros(n) 

233 

234 if nucleation_y is not None: 

235 dist_y = num.abs(nucleation_y - points[:, 1]) 

236 else: 

237 dist_y = num.zeros(n) 

238 

239 dist = num.sqrt(dist_x**2 + dist_y**2) 

240 times = dist / velocity 

241 

242 anch_x, anch_y = map_anchor[anchor] 

243 

244 points[:, 0] -= anch_x * 0.5 * length 

245 points[:, 1] -= anch_y * 0.5 * width 

246 

247 if plane_coords: 

248 return points, dl, dw, nl, nw 

249 

250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

251 points = num.dot(rotmat.T, points.T).T 

252 

253 points[:, 0] += north 

254 points[:, 1] += east 

255 points[:, 2] += depth 

256 

257 if pointsonly: 

258 return points, dl, dw, nl, nw 

259 

260 xtau, amplitudes = stf.discretize_t(deltat, time) 

261 nt = xtau.size 

262 

263 points2 = num.repeat(points, nt, axis=0) 

264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

265 amplitudes2 = num.tile(amplitudes, n) 

266 

267 return points2, times2, amplitudes2, dl, dw, nl, nw 

268 

269 

270def check_rect_source_discretisation(points2, nl, nw, store): 

271 # We assume a non-rotated fault plane 

272 N_CRITICAL = 8 

273 points = points2.T.reshape((3, nl, nw)) 

274 if points.size <= N_CRITICAL: 

275 logger.warning('RectangularSource is defined by only %d sub-sources!' 

276 % points.size) 

277 return True 

278 

279 distances = num.sqrt( 

280 (points[0, 0, :] - points[0, 1, :])**2 + 

281 (points[1, 0, :] - points[1, 1, :])**2 + 

282 (points[2, 0, :] - points[2, 1, :])**2) 

283 

284 depths = points[2, 0, :] 

285 vs_profile = store.config.get_vs( 

286 lat=0., lon=0., 

287 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

288 interpolation='multilinear') 

289 

290 min_wavelength = vs_profile * (store.config.deltat * 2) 

291 if not num.all(min_wavelength > distances / 2): 

292 return False 

293 return True 

294 

295 

296def outline_rect_source(strike, dip, length, width, anchor): 

297 ln = length 

298 wd = width 

299 points = num.array( 

300 [[-0.5 * ln, -0.5 * wd, 0.], 

301 [0.5 * ln, -0.5 * wd, 0.], 

302 [0.5 * ln, 0.5 * wd, 0.], 

303 [-0.5 * ln, 0.5 * wd, 0.], 

304 [-0.5 * ln, -0.5 * wd, 0.]]) 

305 

306 anch_x, anch_y = map_anchor[anchor] 

307 points[:, 0] -= anch_x * 0.5 * length 

308 points[:, 1] -= anch_y * 0.5 * width 

309 

310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

311 

312 return num.dot(rotmat.T, points.T).T 

313 

314 

315def from_plane_coords( 

316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

317 lat=0., lon=0., 

318 north_shift=0, east_shift=0, 

319 anchor='top', cs='xy'): 

320 

321 ln = length 

322 wd = width 

323 x_abs = [] 

324 y_abs = [] 

325 if not isinstance(x_plane_coords, list): 

326 x_plane_coords = [x_plane_coords] 

327 y_plane_coords = [y_plane_coords] 

328 

329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

330 points = num.array( 

331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

336 

337 anch_x, anch_y = map_anchor[anchor] 

338 points[:, 0] -= anch_x * 0.5 * length 

339 points[:, 1] -= anch_y * 0.5 * width 

340 

341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

342 

343 points = num.dot(rotmat.T, points.T).T 

344 points[:, 0] += north_shift 

345 points[:, 1] += east_shift 

346 points[:, 2] += depth 

347 if cs in ('latlon', 'lonlat'): 

348 latlon = ne_to_latlon(lat, lon, 

349 points[:, 0], points[:, 1]) 

350 latlon = num.array(latlon).T 

351 x_abs.append(latlon[1:2, 1]) 

352 y_abs.append(latlon[2:3, 0]) 

353 if cs == 'xy': 

354 x_abs.append(points[1:2, 1]) 

355 y_abs.append(points[2:3, 0]) 

356 

357 if cs == 'lonlat': 

358 return y_abs, x_abs 

359 else: 

360 return x_abs, y_abs 

361 

362 

363def points_on_rect_source( 

364 strike, dip, length, width, anchor, 

365 discretized_basesource=None, points_x=None, points_y=None): 

366 

367 ln = length 

368 wd = width 

369 

370 if isinstance(points_x, list) or isinstance(points_x, float): 

371 points_x = num.array([points_x]) 

372 if isinstance(points_y, list) or isinstance(points_y, float): 

373 points_y = num.array([points_y]) 

374 

375 if discretized_basesource: 

376 ds = discretized_basesource 

377 

378 nl_patches = ds.nl + 1 

379 nw_patches = ds.nw + 1 

380 

381 npoints = nl_patches * nw_patches 

382 points = num.zeros((npoints, 3)) 

383 ln_patches = num.array([il for il in range(nl_patches)]) 

384 wd_patches = num.array([iw for iw in range(nw_patches)]) 

385 

386 points_ln =\ 

387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

388 points_wd =\ 

389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

390 

391 for il in range(nl_patches): 

392 for iw in range(nw_patches): 

393 points[il * nw_patches + iw, :] = num.array([ 

394 points_ln[il] * ln * 0.5, 

395 points_wd[iw] * wd * 0.5, 0.0]) 

396 

397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

398 points = num.zeros(shape=((len(points_x), 3))) 

399 for i, (x, y) in enumerate(zip(points_x, points_y)): 

400 points[i, :] = num.array( 

401 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

402 

403 anch_x, anch_y = map_anchor[anchor] 

404 

405 points[:, 0] -= anch_x * 0.5 * ln 

406 points[:, 1] -= anch_y * 0.5 * wd 

407 

408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

409 

410 return num.dot(rotmat.T, points.T).T 

411 

412 

413class InvalidGridDef(Exception): 

414 pass 

415 

416 

417class Range(SObject): 

418 ''' 

419 Convenient range specification. 

420 

421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

422 

423 Range('0 .. 10k : 1k') 

424 Range(start=0., stop=10e3, step=1e3) 

425 Range(0, 10e3, 1e3) 

426 Range('0 .. 10k @ 11') 

427 Range(start=0., stop=10*km, n=11) 

428 

429 Range(0, 10e3, n=11) 

430 Range(values=[x*1e3 for x in range(11)]) 

431 

432 Depending on the use context, it can be possible to omit any part of the 

433 specification. E.g. in the context of extracting a subset of an already 

434 existing range, the existing range's specification values would be filled 

435 in where missing. 

436 

437 The values are distributed with equal spacing, unless the ``spacing`` 

438 argument is modified. The values can be created offset or relative to an 

439 external base value with the ``relative`` argument if the use context 

440 supports this. 

441 

442 The range specification can be expressed with a short string 

443 representation:: 

444 

445 'start .. stop @ num | spacing, relative' 

446 'start .. stop : step | spacing, relative' 

447 

448 most parts of the expression can be omitted if not needed. Whitespace is 

449 allowed for readability but can also be omitted. 

450 ''' 

451 

452 start = Float.T(optional=True) 

453 stop = Float.T(optional=True) 

454 step = Float.T(optional=True) 

455 n = Int.T(optional=True) 

456 values = Array.T(optional=True, dtype=float, shape=(None,)) 

457 

458 spacing = StringChoice.T( 

459 choices=['lin', 'log', 'symlog'], 

460 default='lin', 

461 optional=True) 

462 

463 relative = StringChoice.T( 

464 choices=['', 'add', 'mult'], 

465 default='', 

466 optional=True) 

467 

468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

470 r'(\|(?P<stuff>.+))?$') 

471 

472 def __init__(self, *args, **kwargs): 

473 d = {} 

474 if len(args) == 1: 

475 d = self.parse(args[0]) 

476 elif len(args) in (2, 3): 

477 d['start'], d['stop'] = [float(x) for x in args[:2]] 

478 if len(args) == 3: 

479 d['step'] = float(args[2]) 

480 

481 for k, v in kwargs.items(): 

482 if k in d: 

483 raise ArgumentError('%s specified more than once' % k) 

484 

485 d[k] = v 

486 

487 SObject.__init__(self, **d) 

488 

489 def __str__(self): 

490 def sfloat(x): 

491 if x is not None: 

492 return '%g' % x 

493 else: 

494 return '' 

495 

496 if self.values: 

497 return ','.join('%g' % x for x in self.values) 

498 

499 if self.start is None and self.stop is None: 

500 s0 = '' 

501 else: 

502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

503 

504 s1 = '' 

505 if self.step is not None: 

506 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

507 elif self.n is not None: 

508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

509 

510 if self.spacing == 'lin' and self.relative == '': 

511 s2 = '' 

512 else: 

513 x = [] 

514 if self.spacing != 'lin': 

515 x.append(self.spacing) 

516 

517 if self.relative != '': 

518 x.append(self.relative) 

519 

520 s2 = ' | %s' % ','.join(x) 

521 

522 return s0 + s1 + s2 

523 

524 @classmethod 

525 def parse(cls, s): 

526 s = re.sub(r'\s+', '', s) 

527 m = cls.pattern.match(s) 

528 if not m: 

529 try: 

530 vals = [ufloat(x) for x in s.split(',')] 

531 except Exception: 

532 raise InvalidGridDef( 

533 '"%s" is not a valid range specification' % s) 

534 

535 return dict(values=num.array(vals, dtype=float)) 

536 

537 d = m.groupdict() 

538 try: 

539 start = ufloat_or_none(d['start']) 

540 stop = ufloat_or_none(d['stop']) 

541 step = ufloat_or_none(d['step']) 

542 n = int_or_none(d['n']) 

543 except Exception: 

544 raise InvalidGridDef( 

545 '"%s" is not a valid range specification' % s) 

546 

547 spacing = 'lin' 

548 relative = '' 

549 

550 if d['stuff'] is not None: 

551 t = d['stuff'].split(',') 

552 for x in t: 

553 if x in cls.spacing.choices: 

554 spacing = x 

555 elif x and x in cls.relative.choices: 

556 relative = x 

557 else: 

558 raise InvalidGridDef( 

559 '"%s" is not a valid range specification' % s) 

560 

561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

562 relative=relative) 

563 

564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

565 if self.values: 

566 return self.values 

567 

568 start = self.start 

569 stop = self.stop 

570 step = self.step 

571 n = self.n 

572 

573 swap = step is not None and step < 0. 

574 if start is None: 

575 start = [mi, ma][swap] 

576 if stop is None: 

577 stop = [ma, mi][swap] 

578 if step is None and inc is not None: 

579 step = [inc, -inc][ma < mi] 

580 

581 if start is None or stop is None: 

582 raise InvalidGridDef( 

583 'Cannot use range specification "%s" without start ' 

584 'and stop in this context' % self) 

585 

586 if step is None and n is None: 

587 step = stop - start 

588 

589 if n is None: 

590 if (step < 0) != (stop - start < 0): 

591 raise InvalidGridDef( 

592 'Range specification "%s" has inconsistent ordering ' 

593 '(step < 0 => stop > start)' % self) 

594 

595 n = int(round((stop - start) / step)) + 1 

596 stop2 = start + (n - 1) * step 

597 if abs(stop - stop2) > eps: 

598 n = int(math.floor((stop - start) / step)) + 1 

599 stop = start + (n - 1) * step 

600 else: 

601 stop = stop2 

602 

603 if start == stop: 

604 n = 1 

605 

606 if self.spacing == 'lin': 

607 vals = num.linspace(start, stop, n) 

608 

609 elif self.spacing in ('log', 'symlog'): 

610 if start > 0. and stop > 0.: 

611 vals = num.exp(num.linspace(num.log(start), 

612 num.log(stop), n)) 

613 elif start < 0. and stop < 0.: 

614 vals = -num.exp(num.linspace(num.log(-start), 

615 num.log(-stop), n)) 

616 else: 

617 raise InvalidGridDef( 

618 'Log ranges should not include or cross zero ' 

619 '(in range specification "%s").' % self) 

620 

621 if self.spacing == 'symlog': 

622 nvals = - vals 

623 vals = num.concatenate((nvals[::-1], vals)) 

624 

625 if self.relative in ('add', 'mult') and base is None: 

626 raise InvalidGridDef( 

627 'Cannot use relative range specification in this context.') 

628 

629 vals = self.make_relative(base, vals) 

630 

631 return list(map(float, vals)) 

632 

633 def make_relative(self, base, vals): 

634 if self.relative == 'add': 

635 vals += base 

636 

637 if self.relative == 'mult': 

638 vals *= base 

639 

640 return vals 

641 

642 

643class GridDefElement(Object): 

644 

645 param = meta.StringID.T() 

646 rs = Range.T() 

647 

648 def __init__(self, shorthand=None, **kwargs): 

649 if shorthand is not None: 

650 t = shorthand.split('=') 

651 if len(t) != 2: 

652 raise InvalidGridDef( 

653 'Invalid grid specification element: %s' % shorthand) 

654 

655 sp, sr = t[0].strip(), t[1].strip() 

656 

657 kwargs['param'] = sp 

658 kwargs['rs'] = Range(sr) 

659 

660 Object.__init__(self, **kwargs) 

661 

662 def shorthand(self): 

663 return self.param + ' = ' + str(self.rs) 

664 

665 

666class GridDef(Object): 

667 

668 elements = List.T(GridDefElement.T()) 

669 

670 def __init__(self, shorthand=None, **kwargs): 

671 if shorthand is not None: 

672 t = shorthand.splitlines() 

673 tt = [] 

674 for x in t: 

675 x = x.strip() 

676 if x: 

677 tt.extend(x.split(';')) 

678 

679 elements = [] 

680 for se in tt: 

681 elements.append(GridDef(se)) 

682 

683 kwargs['elements'] = elements 

684 

685 Object.__init__(self, **kwargs) 

686 

687 def shorthand(self): 

688 return '; '.join(str(x) for x in self.elements) 

689 

690 

691class Cloneable(object): 

692 

693 def __iter__(self): 

694 return iter(self.T.propnames) 

695 

696 def __getitem__(self, k): 

697 if k not in self.keys(): 

698 raise KeyError(k) 

699 

700 return getattr(self, k) 

701 

702 def __setitem__(self, k, v): 

703 if k not in self.keys(): 

704 raise KeyError(k) 

705 

706 return setattr(self, k, v) 

707 

708 def clone(self, **kwargs): 

709 ''' 

710 Make a copy of the object. 

711 

712 A new object of the same class is created and initialized with the 

713 parameters of the object on which this method is called on. If 

714 ``kwargs`` are given, these are used to override any of the 

715 initialization parameters. 

716 ''' 

717 

718 d = dict(self) 

719 for k in d: 

720 v = d[k] 

721 if isinstance(v, Cloneable): 

722 d[k] = v.clone() 

723 

724 d.update(kwargs) 

725 return self.__class__(**d) 

726 

727 @classmethod 

728 def keys(cls): 

729 ''' 

730 Get list of the source model's parameter names. 

731 ''' 

732 

733 return cls.T.propnames 

734 

735 

736class STF(Object, Cloneable): 

737 

738 ''' 

739 Base class for source time functions. 

740 ''' 

741 

742 def __init__(self, effective_duration=None, **kwargs): 

743 if effective_duration is not None: 

744 kwargs['duration'] = effective_duration / \ 

745 self.factor_duration_to_effective() 

746 

747 Object.__init__(self, **kwargs) 

748 

749 @classmethod 

750 def factor_duration_to_effective(cls): 

751 return 1.0 

752 

753 def centroid_time(self, tref): 

754 return tref 

755 

756 @property 

757 def effective_duration(self): 

758 return self.duration * self.factor_duration_to_effective() 

759 

760 def discretize_t(self, deltat, tref): 

761 tl = math.floor(tref / deltat) * deltat 

762 th = math.ceil(tref / deltat) * deltat 

763 if tl == th: 

764 return num.array([tl], dtype=float), num.ones(1) 

765 else: 

766 return ( 

767 num.array([tl, th], dtype=float), 

768 num.array([th - tref, tref - tl], dtype=float) / deltat) 

769 

770 def base_key(self): 

771 return (type(self).__name__,) 

772 

773 

774g_unit_pulse = STF() 

775 

776 

777def sshift(times, amplitudes, tshift, deltat): 

778 

779 t0 = math.floor(tshift / deltat) * deltat 

780 t1 = math.ceil(tshift / deltat) * deltat 

781 if t0 == t1: 

782 return times, amplitudes 

783 

784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

785 

786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

788 

789 times2 = num.arange(times.size + 1, dtype=float) * \ 

790 deltat + times[0] + t0 

791 

792 return times2, amplitudes2 

793 

794 

795class BoxcarSTF(STF): 

796 

797 ''' 

798 Boxcar type source time function. 

799 

800 .. figure :: /static/stf-BoxcarSTF.svg 

801 :width: 40% 

802 :align: center 

803 :alt: boxcar source time function 

804 ''' 

805 

806 duration = Float.T( 

807 default=0.0, 

808 help='duration of the boxcar') 

809 

810 anchor = Float.T( 

811 default=0.0, 

812 help='anchor point with respect to source.time: (' 

813 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

816 

817 @classmethod 

818 def factor_duration_to_effective(cls): 

819 return 1.0 

820 

821 def centroid_time(self, tref): 

822 return tref - 0.5 * self.duration * self.anchor 

823 

824 def discretize_t(self, deltat, tref): 

825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

827 tmin = round(tmin_stf / deltat) * deltat 

828 tmax = round(tmax_stf / deltat) * deltat 

829 nt = int(round((tmax - tmin) / deltat)) + 1 

830 times = num.linspace(tmin, tmax, nt) 

831 amplitudes = num.ones_like(times) 

832 if times.size > 1: 

833 t_edges = num.linspace( 

834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

835 t = tmin_stf + self.duration * num.array( 

836 [0.0, 0.0, 1.0, 1.0], dtype=float) 

837 f = num.array([0., 1., 1., 0.], dtype=float) 

838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

839 amplitudes /= num.sum(amplitudes) 

840 

841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

842 

843 return sshift(times, amplitudes, -tshift, deltat) 

844 

845 def base_key(self): 

846 return (type(self).__name__, self.duration, self.anchor) 

847 

848 

849class TriangularSTF(STF): 

850 

851 ''' 

852 Triangular type source time function. 

853 

854 .. figure :: /static/stf-TriangularSTF.svg 

855 :width: 40% 

856 :align: center 

857 :alt: triangular source time function 

858 ''' 

859 

860 duration = Float.T( 

861 default=0.0, 

862 help='baseline of the triangle') 

863 

864 peak_ratio = Float.T( 

865 default=0.5, 

866 help='fraction of time compared to duration, ' 

867 'when the maximum amplitude is reached') 

868 

869 anchor = Float.T( 

870 default=0.0, 

871 help='anchor point with respect to source.time: (' 

872 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

875 

876 @classmethod 

877 def factor_duration_to_effective(cls, peak_ratio=None): 

878 if peak_ratio is None: 

879 peak_ratio = cls.peak_ratio.default() 

880 

881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

882 

883 def __init__(self, effective_duration=None, **kwargs): 

884 if effective_duration is not None: 

885 kwargs['duration'] = effective_duration / \ 

886 self.factor_duration_to_effective( 

887 kwargs.get('peak_ratio', None)) 

888 

889 STF.__init__(self, **kwargs) 

890 

891 @property 

892 def centroid_ratio(self): 

893 ra = self.peak_ratio 

894 rb = 1.0 - ra 

895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

896 

897 def centroid_time(self, tref): 

898 ca = self.centroid_ratio 

899 cb = 1.0 - ca 

900 if self.anchor <= 0.: 

901 return tref - ca * self.duration * self.anchor 

902 else: 

903 return tref - cb * self.duration * self.anchor 

904 

905 @property 

906 def effective_duration(self): 

907 return self.duration * self.factor_duration_to_effective( 

908 self.peak_ratio) 

909 

910 def tminmax_stf(self, tref): 

911 ca = self.centroid_ratio 

912 cb = 1.0 - ca 

913 if self.anchor <= 0.: 

914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

915 tmax_stf = tmin_stf + self.duration 

916 else: 

917 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

918 tmin_stf = tmax_stf - self.duration 

919 

920 return tmin_stf, tmax_stf 

921 

922 def discretize_t(self, deltat, tref): 

923 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

924 

925 tmin = round(tmin_stf / deltat) * deltat 

926 tmax = round(tmax_stf / deltat) * deltat 

927 nt = int(round((tmax - tmin) / deltat)) + 1 

928 if nt > 1: 

929 t_edges = num.linspace( 

930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

931 t = tmin_stf + self.duration * num.array( 

932 [0.0, self.peak_ratio, 1.0], dtype=float) 

933 f = num.array([0., 1., 0.], dtype=float) 

934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

935 amplitudes /= num.sum(amplitudes) 

936 else: 

937 amplitudes = num.ones(1) 

938 

939 times = num.linspace(tmin, tmax, nt) 

940 return times, amplitudes 

941 

942 def base_key(self): 

943 return ( 

944 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

945 

946 

947class HalfSinusoidSTF(STF): 

948 

949 ''' 

950 Half sinusoid type source time function. 

951 

952 .. figure :: /static/stf-HalfSinusoidSTF.svg 

953 :width: 40% 

954 :align: center 

955 :alt: half-sinusouid source time function 

956 ''' 

957 

958 duration = Float.T( 

959 default=0.0, 

960 help='duration of the half-sinusoid (baseline)') 

961 

962 anchor = Float.T( 

963 default=0.0, 

964 help='anchor point with respect to source.time: (' 

965 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

968 

969 exponent = Int.T( 

970 default=1, 

971 help='set to 2 to use square of the half-period sinusoidal function.') 

972 

973 def __init__(self, effective_duration=None, **kwargs): 

974 if effective_duration is not None: 

975 kwargs['duration'] = effective_duration / \ 

976 self.factor_duration_to_effective( 

977 kwargs.get('exponent', 1)) 

978 

979 STF.__init__(self, **kwargs) 

980 

981 @classmethod 

982 def factor_duration_to_effective(cls, exponent): 

983 if exponent == 1: 

984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

985 elif exponent == 2: 

986 return math.sqrt(math.pi**2 - 6) / math.pi 

987 else: 

988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

989 

990 @property 

991 def effective_duration(self): 

992 return self.duration * self.factor_duration_to_effective(self.exponent) 

993 

994 def centroid_time(self, tref): 

995 return tref - 0.5 * self.duration * self.anchor 

996 

997 def discretize_t(self, deltat, tref): 

998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1000 tmin = round(tmin_stf / deltat) * deltat 

1001 tmax = round(tmax_stf / deltat) * deltat 

1002 nt = int(round((tmax - tmin) / deltat)) + 1 

1003 if nt > 1: 

1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1006 

1007 if self.exponent == 1: 

1008 fint = -num.cos( 

1009 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1010 

1011 elif self.exponent == 2: 

1012 fint = (t_edges - tmin_stf) / self.duration \ 

1013 - 1.0 / (2.0 * math.pi) * num.sin( 

1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1015 else: 

1016 raise ValueError( 

1017 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1018 

1019 amplitudes = fint[1:] - fint[:-1] 

1020 amplitudes /= num.sum(amplitudes) 

1021 else: 

1022 amplitudes = num.ones(1) 

1023 

1024 times = num.linspace(tmin, tmax, nt) 

1025 return times, amplitudes 

1026 

1027 def base_key(self): 

1028 return (type(self).__name__, self.duration, self.anchor) 

1029 

1030 

1031class SmoothRampSTF(STF): 

1032 ''' 

1033 Smooth-ramp type source time function for near-field displacement. 

1034 Based on moment function of double-couple point source proposed by Bruestle 

1035 and Mueller (PEPI, 1983). 

1036 

1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1038 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1039 312-324. 

1040 

1041 .. figure :: /static/stf-SmoothRampSTF.svg 

1042 :width: 40% 

1043 :alt: smooth ramp source time function 

1044 ''' 

1045 duration = Float.T( 

1046 default=0.0, 

1047 help='duration of the ramp (baseline)') 

1048 

1049 rise_ratio = Float.T( 

1050 default=0.5, 

1051 help='fraction of time compared to duration, ' 

1052 'when the maximum amplitude is reached') 

1053 

1054 anchor = Float.T( 

1055 default=0.0, 

1056 help='anchor point with respect to source.time: (' 

1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1060 

1061 def discretize_t(self, deltat, tref): 

1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1064 tmin = round(tmin_stf / deltat) * deltat 

1065 tmax = round(tmax_stf / deltat) * deltat 

1066 D = round((tmax - tmin) / deltat) * deltat 

1067 nt = int(round(D / deltat)) + 1 

1068 times = num.linspace(tmin, tmax, nt) 

1069 if nt > 1: 

1070 rise_time = self.rise_ratio * self.duration 

1071 amplitudes = num.ones_like(times) 

1072 tp = tmin + rise_time 

1073 ii = num.where(times <= tp) 

1074 t_inc = times[ii] 

1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1078 

1079 amplitudes /= num.sum(amplitudes) 

1080 else: 

1081 amplitudes = num.ones(1) 

1082 

1083 return times, amplitudes 

1084 

1085 def base_key(self): 

1086 return (type(self).__name__, 

1087 self.duration, self.rise_ratio, self.anchor) 

1088 

1089 

1090class ResonatorSTF(STF): 

1091 ''' 

1092 Simple resonator like source time function. 

1093 

1094 .. math :: 

1095 

1096 f(t) = 0 for t < 0 

1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1098 

1099 

1100 .. figure :: /static/stf-SmoothRampSTF.svg 

1101 :width: 40% 

1102 :alt: smooth ramp source time function 

1103 

1104 ''' 

1105 

1106 duration = Float.T( 

1107 default=0.0, 

1108 help='decay time') 

1109 

1110 frequency = Float.T( 

1111 default=1.0, 

1112 help='resonance frequency') 

1113 

1114 def discretize_t(self, deltat, tref): 

1115 tmin_stf = tref 

1116 tmax_stf = tref + self.duration * 3 

1117 tmin = math.floor(tmin_stf / deltat) * deltat 

1118 tmax = math.ceil(tmax_stf / deltat) * deltat 

1119 times = util.arange2(tmin, tmax, deltat) 

1120 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1122 

1123 return times, amplitudes 

1124 

1125 def base_key(self): 

1126 return (type(self).__name__, 

1127 self.duration, self.frequency) 

1128 

1129 

1130class STFMode(StringChoice): 

1131 choices = ['pre', 'post'] 

1132 

1133 

1134class Source(Location, Cloneable): 

1135 ''' 

1136 Base class for all source models. 

1137 ''' 

1138 

1139 name = String.T(optional=True, default='') 

1140 

1141 time = Timestamp.T( 

1142 default=Timestamp.D('1970-01-01 00:00:00'), 

1143 help='source origin time.') 

1144 

1145 stf = STF.T( 

1146 optional=True, 

1147 help='source time function.') 

1148 

1149 stf_mode = STFMode.T( 

1150 default='post', 

1151 help='whether to apply source time function in pre or ' 

1152 'post-processing.') 

1153 

1154 def __init__(self, **kwargs): 

1155 Location.__init__(self, **kwargs) 

1156 

1157 def update(self, **kwargs): 

1158 ''' 

1159 Change some of the source models parameters. 

1160 

1161 Example:: 

1162 

1163 >>> from pyrocko import gf 

1164 >>> s = gf.DCSource() 

1165 >>> s.update(strike=66., dip=33.) 

1166 >>> print(s) 

1167 --- !pf.DCSource 

1168 depth: 0.0 

1169 time: 1970-01-01 00:00:00 

1170 magnitude: 6.0 

1171 strike: 66.0 

1172 dip: 33.0 

1173 rake: 0.0 

1174 

1175 ''' 

1176 

1177 for (k, v) in kwargs.items(): 

1178 self[k] = v 

1179 

1180 def grid(self, **variables): 

1181 ''' 

1182 Create grid of source model variations. 

1183 

1184 :returns: :py:class:`SourceGrid` instance. 

1185 

1186 Example:: 

1187 

1188 >>> from pyrocko import gf 

1189 >>> base = DCSource() 

1190 >>> R = gf.Range 

1191 >>> for s in base.grid(R(' 

1192 

1193 ''' 

1194 return SourceGrid(base=self, variables=variables) 

1195 

1196 def base_key(self): 

1197 ''' 

1198 Get key to decide about source discretization / GF stack sharing. 

1199 

1200 When two source models differ only in amplitude and origin time, the 

1201 discretization and the GF stacking can be done only once for a unit 

1202 amplitude and a zero origin time and the amplitude and origin times of 

1203 the seismograms can be applied during post-processing of the synthetic 

1204 seismogram. 

1205 

1206 For any derived parameterized source model, this method is called to 

1207 decide if discretization and stacking of the source should be shared. 

1208 When two source models return an equal vector of values discretization 

1209 is shared. 

1210 ''' 

1211 return (self.depth, self.lat, self.north_shift, 

1212 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1213 self.effective_stf_pre().base_key() 

1214 

1215 def get_factor(self): 

1216 ''' 

1217 Get the scaling factor to be applied during post-processing. 

1218 

1219 Discretization of the base seismogram is usually done for a unit 

1220 amplitude, because a common factor can be efficiently multiplied to 

1221 final seismograms. This eliminates to do repeat the stacking when 

1222 creating seismograms for a series of source models only differing in 

1223 amplitude. 

1224 

1225 This method should return the scaling factor to apply in the 

1226 post-processing (often this is simply the scalar moment of the source). 

1227 ''' 

1228 

1229 return 1.0 

1230 

1231 def effective_stf_pre(self): 

1232 ''' 

1233 Return the STF applied before stacking of the Green's functions. 

1234 

1235 This STF is used during discretization of the parameterized source 

1236 models, i.e. to produce a temporal distribution of point sources. 

1237 

1238 Handling of the STF before stacking of the GFs is less efficient but 

1239 allows to use different source time functions for different parts of 

1240 the source. 

1241 ''' 

1242 

1243 if self.stf is not None and self.stf_mode == 'pre': 

1244 return self.stf 

1245 else: 

1246 return g_unit_pulse 

1247 

1248 def effective_stf_post(self): 

1249 ''' 

1250 Return the STF applied after stacking of the Green's fuctions. 

1251 

1252 This STF is used in the post-processing of the synthetic seismograms. 

1253 

1254 Handling of the STF after stacking of the GFs is usually more efficient 

1255 but is only possible when a common STF is used for all subsources. 

1256 ''' 

1257 

1258 if self.stf is not None and self.stf_mode == 'post': 

1259 return self.stf 

1260 else: 

1261 return g_unit_pulse 

1262 

1263 def _dparams_base(self): 

1264 return dict(times=arr(self.time), 

1265 lat=self.lat, lon=self.lon, 

1266 north_shifts=arr(self.north_shift), 

1267 east_shifts=arr(self.east_shift), 

1268 depths=arr(self.depth)) 

1269 

1270 def _hash(self): 

1271 sha = sha1() 

1272 for k in self.base_key(): 

1273 sha.update(str(k).encode()) 

1274 return sha.hexdigest() 

1275 

1276 def _dparams_base_repeated(self, times): 

1277 if times is None: 

1278 return self._dparams_base() 

1279 

1280 nt = times.size 

1281 north_shifts = num.repeat(self.north_shift, nt) 

1282 east_shifts = num.repeat(self.east_shift, nt) 

1283 depths = num.repeat(self.depth, nt) 

1284 return dict(times=times, 

1285 lat=self.lat, lon=self.lon, 

1286 north_shifts=north_shifts, 

1287 east_shifts=east_shifts, 

1288 depths=depths) 

1289 

1290 def pyrocko_event(self, store=None, target=None, **kwargs): 

1291 duration = None 

1292 if self.stf: 

1293 duration = self.stf.effective_duration 

1294 

1295 return model.Event( 

1296 lat=self.lat, 

1297 lon=self.lon, 

1298 north_shift=self.north_shift, 

1299 east_shift=self.east_shift, 

1300 time=self.time, 

1301 name=self.name, 

1302 depth=self.depth, 

1303 duration=duration, 

1304 **kwargs) 

1305 

1306 def geometry(self, **kwargs): 

1307 raise NotImplementedError 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 

1527 bulk_moduli = store.config.get_bulk_moduli( 

1528 self.lat, self.lon, 

1529 points=points, 

1530 interpolation=interpolation)[0] 

1531 except meta.OutOfBounds: 

1532 raise DerivedMagnitudeError( 

1533 'Could not get shear modulus at source position.') 

1534 

1535 return float(2. * shear_moduli + bulk_moduli) 

1536 

1537 def get_factor(self): 

1538 return 1.0 

1539 

1540 def discretize_basesource(self, store, target=None): 

1541 times, amplitudes = self.effective_stf_pre().discretize_t( 

1542 store.config.deltat, self.time) 

1543 

1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1545 

1546 if self.volume_change is not None: 

1547 if self.volume_change < 0.: 

1548 amplitudes *= -1 

1549 

1550 return meta.DiscretizedExplosionSource( 

1551 m0s=amplitudes, 

1552 **self._dparams_base_repeated(times)) 

1553 

1554 def pyrocko_moment_tensor(self, store=None, target=None): 

1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1557 

1558 

1559class RectangularExplosionSource(ExplosionSource): 

1560 ''' 

1561 Rectangular or line explosion source. 

1562 ''' 

1563 

1564 discretized_source_class = meta.DiscretizedExplosionSource 

1565 

1566 strike = Float.T( 

1567 default=0.0, 

1568 help='strike direction in [deg], measured clockwise from north') 

1569 

1570 dip = Float.T( 

1571 default=90.0, 

1572 help='dip angle in [deg], measured downward from horizontal') 

1573 

1574 length = Float.T( 

1575 default=0., 

1576 help='length of rectangular source area [m]') 

1577 

1578 width = Float.T( 

1579 default=0., 

1580 help='width of rectangular source area [m]') 

1581 

1582 anchor = StringChoice.T( 

1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1584 'bottom_left', 'bottom_right'], 

1585 default='center', 

1586 optional=True, 

1587 help='Anchor point for positioning the plane, can be: top, center or' 

1588 'bottom and also top_left, top_right,bottom_left,' 

1589 'bottom_right, center_left and center right') 

1590 

1591 nucleation_x = Float.T( 

1592 optional=True, 

1593 help='horizontal position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = left edge, +1 = right edge)') 

1595 

1596 nucleation_y = Float.T( 

1597 optional=True, 

1598 help='down-dip position of rupture nucleation in normalized fault ' 

1599 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1600 

1601 velocity = Float.T( 

1602 default=3500., 

1603 help='speed of explosion front [m/s]') 

1604 

1605 aggressive_oversampling = Bool.T( 

1606 default=False, 

1607 help='Aggressive oversampling for basesource discretization. ' 

1608 "When using 'multilinear' interpolation oversampling has" 

1609 ' practically no effect.') 

1610 

1611 def base_key(self): 

1612 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1613 self.width, self.nucleation_x, 

1614 self.nucleation_y, self.velocity, 

1615 self.anchor) 

1616 

1617 def discretize_basesource(self, store, target=None): 

1618 

1619 if self.nucleation_x is not None: 

1620 nucx = self.nucleation_x * 0.5 * self.length 

1621 else: 

1622 nucx = None 

1623 

1624 if self.nucleation_y is not None: 

1625 nucy = self.nucleation_y * 0.5 * self.width 

1626 else: 

1627 nucy = None 

1628 

1629 stf = self.effective_stf_pre() 

1630 

1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1632 store.config.deltas, store.config.deltat, 

1633 self.time, self.north_shift, self.east_shift, self.depth, 

1634 self.strike, self.dip, self.length, self.width, self.anchor, 

1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1636 

1637 amplitudes /= num.sum(amplitudes) 

1638 amplitudes *= self.get_moment(store, target) 

1639 

1640 return meta.DiscretizedExplosionSource( 

1641 lat=self.lat, 

1642 lon=self.lon, 

1643 times=times, 

1644 north_shifts=points[:, 0], 

1645 east_shifts=points[:, 1], 

1646 depths=points[:, 2], 

1647 m0s=amplitudes) 

1648 

1649 def outline(self, cs='xyz'): 

1650 points = outline_rect_source(self.strike, self.dip, self.length, 

1651 self.width, self.anchor) 

1652 

1653 points[:, 0] += self.north_shift 

1654 points[:, 1] += self.east_shift 

1655 points[:, 2] += self.depth 

1656 if cs == 'xyz': 

1657 return points 

1658 elif cs == 'xy': 

1659 return points[:, :2] 

1660 elif cs in ('latlon', 'lonlat'): 

1661 latlon = ne_to_latlon( 

1662 self.lat, self.lon, points[:, 0], points[:, 1]) 

1663 

1664 latlon = num.array(latlon).T 

1665 if cs == 'latlon': 

1666 return latlon 

1667 else: 

1668 return latlon[:, ::-1] 

1669 

1670 def get_nucleation_abs_coord(self, cs='xy'): 

1671 

1672 if self.nucleation_x is None: 

1673 return None, None 

1674 

1675 coords = from_plane_coords(self.strike, self.dip, self.length, 

1676 self.width, self.depth, self.nucleation_x, 

1677 self.nucleation_y, lat=self.lat, 

1678 lon=self.lon, north_shift=self.north_shift, 

1679 east_shift=self.east_shift, cs=cs) 

1680 return coords 

1681 

1682 

1683class DCSource(SourceWithMagnitude): 

1684 ''' 

1685 A double-couple point source. 

1686 ''' 

1687 

1688 strike = Float.T( 

1689 default=0.0, 

1690 help='strike direction in [deg], measured clockwise from north') 

1691 

1692 dip = Float.T( 

1693 default=90.0, 

1694 help='dip angle in [deg], measured downward from horizontal') 

1695 

1696 rake = Float.T( 

1697 default=0.0, 

1698 help='rake angle in [deg], ' 

1699 'measured counter-clockwise from right-horizontal ' 

1700 'in on-plane view') 

1701 

1702 discretized_source_class = meta.DiscretizedMTSource 

1703 

1704 def base_key(self): 

1705 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1706 

1707 def get_factor(self): 

1708 return float(pmt.magnitude_to_moment(self.magnitude)) 

1709 

1710 def discretize_basesource(self, store, target=None): 

1711 mot = pmt.MomentTensor( 

1712 strike=self.strike, dip=self.dip, rake=self.rake) 

1713 

1714 times, amplitudes = self.effective_stf_pre().discretize_t( 

1715 store.config.deltat, self.time) 

1716 return meta.DiscretizedMTSource( 

1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1718 **self._dparams_base_repeated(times)) 

1719 

1720 def pyrocko_moment_tensor(self, store=None, target=None): 

1721 return pmt.MomentTensor( 

1722 strike=self.strike, 

1723 dip=self.dip, 

1724 rake=self.rake, 

1725 scalar_moment=self.moment) 

1726 

1727 def pyrocko_event(self, store=None, target=None, **kwargs): 

1728 return SourceWithMagnitude.pyrocko_event( 

1729 self, store, target, 

1730 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1731 **kwargs) 

1732 

1733 @classmethod 

1734 def from_pyrocko_event(cls, ev, **kwargs): 

1735 d = {} 

1736 mt = ev.moment_tensor 

1737 if mt: 

1738 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1739 d.update( 

1740 strike=float(strike), 

1741 dip=float(dip), 

1742 rake=float(rake), 

1743 magnitude=float(mt.moment_magnitude())) 

1744 

1745 d.update(kwargs) 

1746 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1747 

1748 

1749class CLVDSource(SourceWithMagnitude): 

1750 ''' 

1751 A pure CLVD point source. 

1752 ''' 

1753 

1754 discretized_source_class = meta.DiscretizedMTSource 

1755 

1756 azimuth = Float.T( 

1757 default=0.0, 

1758 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1759 

1760 dip = Float.T( 

1761 default=90., 

1762 help='dip direction of largest dipole, downward from horizontal [deg]') 

1763 

1764 def base_key(self): 

1765 return Source.base_key(self) + (self.azimuth, self.dip) 

1766 

1767 def get_factor(self): 

1768 return float(pmt.magnitude_to_moment(self.magnitude)) 

1769 

1770 @property 

1771 def m6(self): 

1772 a = math.sqrt(4. / 3.) * self.get_factor() 

1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1774 rotmat1 = pmt.euler_to_matrix( 

1775 d2r * (self.dip - 90.), 

1776 d2r * (self.azimuth - 90.), 

1777 0.) 

1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1779 return pmt.to6(m) 

1780 

1781 @property 

1782 def m6_astuple(self): 

1783 return tuple(self.m6.tolist()) 

1784 

1785 def discretize_basesource(self, store, target=None): 

1786 factor = self.get_factor() 

1787 times, amplitudes = self.effective_stf_pre().discretize_t( 

1788 store.config.deltat, self.time) 

1789 return meta.DiscretizedMTSource( 

1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1791 **self._dparams_base_repeated(times)) 

1792 

1793 def pyrocko_moment_tensor(self, store=None, target=None): 

1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1795 

1796 def pyrocko_event(self, store=None, target=None, **kwargs): 

1797 mt = self.pyrocko_moment_tensor(store, target) 

1798 return Source.pyrocko_event( 

1799 self, store, target, 

1800 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1801 magnitude=float(mt.moment_magnitude()), 

1802 **kwargs) 

1803 

1804 

1805class VLVDSource(SourceWithDerivedMagnitude): 

1806 ''' 

1807 Volumetric linear vector dipole source. 

1808 

1809 This source is a parameterization for a restricted moment tensor point 

1810 source, useful to represent dyke or sill like inflation or deflation 

1811 sources. The restriction is such that the moment tensor is rotational 

1812 symmetric. It can be represented by a superposition of a linear vector 

1813 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1814 restricted moment tensor has 4 degrees of freedom: 2 independent 

1815 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1816 

1817 In this parameterization, the isotropic component is controlled by 

1818 ``volume_change``. To define the moment tensor, it must be converted to the 

1819 scalar moment of the the MT's isotropic component. For the conversion, the 

1820 shear modulus at the source's position must be known. This value is 

1821 extracted from the earth model defined in the GF store in use. 

1822 

1823 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1825 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1826 ''' 

1827 

1828 discretized_source_class = meta.DiscretizedMTSource 

1829 

1830 azimuth = Float.T( 

1831 default=0.0, 

1832 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1833 

1834 dip = Float.T( 

1835 default=90., 

1836 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1837 

1838 volume_change = Float.T( 

1839 default=0., 

1840 help='volume change of the inflation/deflation [m^3].') 

1841 

1842 clvd_moment = Float.T( 

1843 default=0., 

1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1845 'controls the sign of the CLVD (the sign of its largest ' 

1846 'eigenvalue).') 

1847 

1848 def get_moment_to_volume_change_ratio(self, store, target): 

1849 if store is None or target is None: 

1850 raise DerivedMagnitudeError( 

1851 'Need earth model to convert between volume change and ' 

1852 'magnitude.') 

1853 

1854 points = num.array( 

1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1856 

1857 try: 

1858 shear_moduli = store.config.get_shear_moduli( 

1859 self.lat, self.lon, 

1860 points=points, 

1861 interpolation=target.interpolation)[0] 

1862 

1863 bulk_moduli = store.config.get_bulk_moduli( 

1864 self.lat, self.lon, 

1865 points=points, 

1866 interpolation=target.interpolation)[0] 

1867 except meta.OutOfBounds: 

1868 raise DerivedMagnitudeError( 

1869 'Could not get shear modulus at source position.') 

1870 

1871 return float(2. * shear_moduli + bulk_moduli) 

1872 

1873 def base_key(self): 

1874 return Source.base_key(self) + \ 

1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1876 

1877 def get_magnitude(self, store=None, target=None): 

1878 mt = self.pyrocko_moment_tensor(store, target) 

1879 return float(pmt.moment_to_magnitude(mt.moment)) 

1880 

1881 def get_m6(self, store, target): 

1882 a = math.sqrt(4. / 3.) * self.clvd_moment 

1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1884 

1885 rotmat1 = pmt.euler_to_matrix( 

1886 d2r * (self.dip - 90.), 

1887 d2r * (self.azimuth - 90.), 

1888 0.) 

1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1890 

1891 m_iso = self.volume_change * \ 

1892 self.get_moment_to_volume_change_ratio(store, target) 

1893 

1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1895 0., 0.,) * math.sqrt(2. / 3) 

1896 

1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1898 return m 

1899 

1900 def get_moment(self, store=None, target=None): 

1901 return float(pmt.magnitude_to_moment( 

1902 self.get_magnitude(store, target))) 

1903 

1904 def get_m6_astuple(self, store, target): 

1905 m6 = self.get_m6(store, target) 

1906 return tuple(m6.tolist()) 

1907 

1908 def discretize_basesource(self, store, target=None): 

1909 times, amplitudes = self.effective_stf_pre().discretize_t( 

1910 store.config.deltat, self.time) 

1911 

1912 m6 = self.get_m6(store, target) 

1913 m6 *= amplitudes / self.get_factor() 

1914 

1915 return meta.DiscretizedMTSource( 

1916 m6s=m6[num.newaxis, :], 

1917 **self._dparams_base_repeated(times)) 

1918 

1919 def pyrocko_moment_tensor(self, store=None, target=None): 

1920 m6_astuple = self.get_m6_astuple(store, target) 

1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1922 

1923 

1924class MTSource(Source): 

1925 ''' 

1926 A moment tensor point source. 

1927 ''' 

1928 

1929 discretized_source_class = meta.DiscretizedMTSource 

1930 

1931 mnn = Float.T( 

1932 default=1., 

1933 help='north-north component of moment tensor in [Nm]') 

1934 

1935 mee = Float.T( 

1936 default=1., 

1937 help='east-east component of moment tensor in [Nm]') 

1938 

1939 mdd = Float.T( 

1940 default=1., 

1941 help='down-down component of moment tensor in [Nm]') 

1942 

1943 mne = Float.T( 

1944 default=0., 

1945 help='north-east component of moment tensor in [Nm]') 

1946 

1947 mnd = Float.T( 

1948 default=0., 

1949 help='north-down component of moment tensor in [Nm]') 

1950 

1951 med = Float.T( 

1952 default=0., 

1953 help='east-down component of moment tensor in [Nm]') 

1954 

1955 def __init__(self, **kwargs): 

1956 if 'm6' in kwargs: 

1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1958 kwargs.pop('m6')): 

1959 kwargs[k] = float(v) 

1960 

1961 Source.__init__(self, **kwargs) 

1962 

1963 @property 

1964 def m6(self): 

1965 return num.array(self.m6_astuple) 

1966 

1967 @property 

1968 def m6_astuple(self): 

1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1970 

1971 @m6.setter 

1972 def m6(self, value): 

1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1974 

1975 def base_key(self): 

1976 return Source.base_key(self) + self.m6_astuple 

1977 

1978 def discretize_basesource(self, store, target=None): 

1979 times, amplitudes = self.effective_stf_pre().discretize_t( 

1980 store.config.deltat, self.time) 

1981 return meta.DiscretizedMTSource( 

1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1983 **self._dparams_base_repeated(times)) 

1984 

1985 def get_magnitude(self, store=None, target=None): 

1986 m6 = self.m6 

1987 return pmt.moment_to_magnitude( 

1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1989 math.sqrt(2.)) 

1990 

1991 def pyrocko_moment_tensor(self, store=None, target=None): 

1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1993 

1994 def pyrocko_event(self, store=None, target=None, **kwargs): 

1995 mt = self.pyrocko_moment_tensor(store, target) 

1996 return Source.pyrocko_event( 

1997 self, store, target, 

1998 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1999 magnitude=float(mt.moment_magnitude()), 

2000 **kwargs) 

2001 

2002 @classmethod 

2003 def from_pyrocko_event(cls, ev, **kwargs): 

2004 d = {} 

2005 mt = ev.moment_tensor 

2006 if mt: 

2007 d.update(m6=tuple(map(float, mt.m6()))) 

2008 else: 

2009 if ev.magnitude is not None: 

2010 mom = pmt.magnitude_to_moment(ev.magnitude) 

2011 v = math.sqrt(2. / 3.) * mom 

2012 d.update(m6=(v, v, v, 0., 0., 0.)) 

2013 

2014 d.update(kwargs) 

2015 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2016 

2017 

2018map_anchor = { 

2019 'center': (0.0, 0.0), 

2020 'center_left': (-1.0, 0.0), 

2021 'center_right': (1.0, 0.0), 

2022 'top': (0.0, -1.0), 

2023 'top_left': (-1.0, -1.0), 

2024 'top_right': (1.0, -1.0), 

2025 'bottom': (0.0, 1.0), 

2026 'bottom_left': (-1.0, 1.0), 

2027 'bottom_right': (1.0, 1.0)} 

2028 

2029 

2030class RectangularSource(SourceWithDerivedMagnitude): 

2031 ''' 

2032 Classical Haskell source model modified for bilateral rupture. 

2033 ''' 

2034 

2035 discretized_source_class = meta.DiscretizedMTSource 

2036 

2037 magnitude = Float.T( 

2038 optional=True, 

2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2040 

2041 strike = Float.T( 

2042 default=0.0, 

2043 help='strike direction in [deg], measured clockwise from north') 

2044 

2045 dip = Float.T( 

2046 default=90.0, 

2047 help='dip angle in [deg], measured downward from horizontal') 

2048 

2049 rake = Float.T( 

2050 default=0.0, 

2051 help='rake angle in [deg], ' 

2052 'measured counter-clockwise from right-horizontal ' 

2053 'in on-plane view') 

2054 

2055 length = Float.T( 

2056 default=0., 

2057 help='length of rectangular source area [m]') 

2058 

2059 width = Float.T( 

2060 default=0., 

2061 help='width of rectangular source area [m]') 

2062 

2063 anchor = StringChoice.T( 

2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2065 'bottom_left', 'bottom_right'], 

2066 default='center', 

2067 optional=True, 

2068 help='Anchor point for positioning the plane, can be: ``top, center ' 

2069 'bottom, top_left, top_right,bottom_left,' 

2070 'bottom_right, center_left, center right``.') 

2071 

2072 nucleation_x = Float.T( 

2073 optional=True, 

2074 help='horizontal position of rupture nucleation in normalized fault ' 

2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2076 

2077 nucleation_y = Float.T( 

2078 optional=True, 

2079 help='down-dip position of rupture nucleation in normalized fault ' 

2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2081 

2082 velocity = Float.T( 

2083 default=3500., 

2084 help='speed of rupture front [m/s]') 

2085 

2086 slip = Float.T( 

2087 optional=True, 

2088 help='Slip on the rectangular source area [m]') 

2089 

2090 opening_fraction = Float.T( 

2091 default=0., 

2092 help='Determines fraction of slip related to opening. ' 

2093 '(``-1``: pure tensile closing, ' 

2094 '``0``: pure shear, ' 

2095 '``1``: pure tensile opening)') 

2096 

2097 decimation_factor = Int.T( 

2098 optional=True, 

2099 default=1, 

2100 help='Sub-source decimation factor, a larger decimation will' 

2101 ' make the result inaccurate but shorten the necessary' 

2102 ' computation time (use for testing puposes only).') 

2103 

2104 aggressive_oversampling = Bool.T( 

2105 default=False, 

2106 help='Aggressive oversampling for basesource discretization. ' 

2107 "When using 'multilinear' interpolation oversampling has" 

2108 ' practically no effect.') 

2109 

2110 def __init__(self, **kwargs): 

2111 if 'moment' in kwargs: 

2112 mom = kwargs.pop('moment') 

2113 if 'magnitude' not in kwargs: 

2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2115 

2116 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2117 

2118 def base_key(self): 

2119 return SourceWithDerivedMagnitude.base_key(self) + ( 

2120 self.magnitude, 

2121 self.slip, 

2122 self.strike, 

2123 self.dip, 

2124 self.rake, 

2125 self.length, 

2126 self.width, 

2127 self.nucleation_x, 

2128 self.nucleation_y, 

2129 self.velocity, 

2130 self.decimation_factor, 

2131 self.anchor) 

2132 

2133 def check_conflicts(self): 

2134 if self.magnitude is not None and self.slip is not None: 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude and slip are both defined.') 

2137 

2138 def get_magnitude(self, store=None, target=None): 

2139 self.check_conflicts() 

2140 if self.magnitude is not None: 

2141 return self.magnitude 

2142 

2143 elif self.slip is not None: 

2144 if None in (store, target): 

2145 raise DerivedMagnitudeError( 

2146 'Magnitude for a rectangular source with slip defined ' 

2147 'can only be derived when earth model and target ' 

2148 'interpolation method are available.') 

2149 

2150 amplitudes = self._discretize(store, target)[2] 

2151 if amplitudes.ndim == 2: 

2152 # CLVD component has no net moment, leave out 

2153 return float(pmt.moment_to_magnitude( 

2154 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2155 else: 

2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2157 

2158 else: 

2159 return float(pmt.moment_to_magnitude(1.0)) 

2160 

2161 def get_factor(self): 

2162 return 1.0 

2163 

2164 def get_slip_tensile(self): 

2165 return self.slip * self.opening_fraction 

2166 

2167 def get_slip_shear(self): 

2168 return self.slip - abs(self.get_slip_tensile) 

2169 

2170 def _discretize(self, store, target): 

2171 if self.nucleation_x is not None: 

2172 nucx = self.nucleation_x * 0.5 * self.length 

2173 else: 

2174 nucx = None 

2175 

2176 if self.nucleation_y is not None: 

2177 nucy = self.nucleation_y * 0.5 * self.width 

2178 else: 

2179 nucy = None 

2180 

2181 stf = self.effective_stf_pre() 

2182 

2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2184 store.config.deltas, store.config.deltat, 

2185 self.time, self.north_shift, self.east_shift, self.depth, 

2186 self.strike, self.dip, self.length, self.width, self.anchor, 

2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2188 decimation_factor=self.decimation_factor, 

2189 aggressive_oversampling=self.aggressive_oversampling) 

2190 

2191 if self.slip is not None: 

2192 if target is not None: 

2193 interpolation = target.interpolation 

2194 else: 

2195 interpolation = 'nearest_neighbor' 

2196 logger.warning( 

2197 'no target information available, will use ' 

2198 '"nearest_neighbor" interpolation when extracting shear ' 

2199 'modulus from earth model') 

2200 

2201 shear_moduli = store.config.get_shear_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_slip = self.get_slip_tensile() 

2207 shear_slip = self.slip - abs(tensile_slip) 

2208 

2209 amplitudes_total = [shear_moduli * shear_slip] 

2210 if tensile_slip != 0: 

2211 bulk_moduli = store.config.get_bulk_moduli( 

2212 self.lat, self.lon, 

2213 points=points, 

2214 interpolation=interpolation) 

2215 

2216 tensile_iso = bulk_moduli * tensile_slip 

2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2218 

2219 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2220 

2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2222 amplitudes * dl * dw 

2223 

2224 else: 

2225 # normalization to retain total moment 

2226 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2227 moment = self.get_moment(store, target) 

2228 

2229 amplitudes_total = [ 

2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2231 if self.opening_fraction != 0.: 

2232 amplitudes_total.append( 

2233 amplitudes_norm * self.opening_fraction * moment) 

2234 

2235 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2236 

2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2238 

2239 def discretize_basesource(self, store, target=None): 

2240 

2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2242 store, target) 

2243 

2244 mot = pmt.MomentTensor( 

2245 strike=self.strike, dip=self.dip, rake=self.rake) 

2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2247 

2248 if amplitudes.ndim == 1: 

2249 m6s[:, :] *= amplitudes[:, num.newaxis] 

2250 elif amplitudes.ndim == 2: 

2251 # shear MT components 

2252 rotmat1 = pmt.euler_to_matrix( 

2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2255 

2256 if amplitudes.shape[0] == 2: 

2257 # tensile MT components - moment/magnitude input 

2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2259 rot_tensile = pmt.to6( 

2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2261 

2262 m6s_tensile = rot_tensile[ 

2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2264 m6s += m6s_tensile 

2265 

2266 elif amplitudes.shape[0] == 3: 

2267 # tensile MT components - slip input 

2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2270 

2271 rot_iso = pmt.to6( 

2272 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2273 rot_clvd = pmt.to6( 

2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2275 

2276 m6s_iso = rot_iso[ 

2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2278 m6s_clvd = rot_clvd[ 

2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2280 m6s += m6s_iso + m6s_clvd 

2281 else: 

2282 raise ValueError('Unknwown amplitudes shape!') 

2283 else: 

2284 raise ValueError( 

2285 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2286 

2287 ds = meta.DiscretizedMTSource( 

2288 lat=self.lat, 

2289 lon=self.lon, 

2290 times=times, 

2291 north_shifts=points[:, 0], 

2292 east_shifts=points[:, 1], 

2293 depths=points[:, 2], 

2294 m6s=m6s, 

2295 dl=dl, 

2296 dw=dw, 

2297 nl=nl, 

2298 nw=nw) 

2299 

2300 return ds 

2301 

2302 def xy_to_coord(self, x, y, cs='xyz'): 

2303 ln, wd = self.length, self.width 

2304 strike, dip = self.strike, self.dip 

2305 

2306 def array_check(variable): 

2307 if not isinstance(variable, num.ndarray): 

2308 return num.array(variable) 

2309 else: 

2310 return variable 

2311 

2312 x, y = array_check(x), array_check(y) 

2313 

2314 if x.shape[0] != y.shape[0]: 

2315 raise ValueError('Shapes of x and y mismatch') 

2316 

2317 x, y = x * 0.5 * ln, y * 0.5 * wd 

2318 

2319 points = num.hstack(( 

2320 x.reshape(-1, 1), y.reshape(-1, 1), num.zeros((x.shape[0], 1)))) 

2321 

2322 anch_x, anch_y = map_anchor[self.anchor] 

2323 points[:, 0] -= anch_x * 0.5 * ln 

2324 points[:, 1] -= anch_y * 0.5 * wd 

2325 

2326 rotmat = num.asarray( 

2327 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

2328 

2329 points_rot = num.dot(rotmat.T, points.T).T 

2330 

2331 points_rot[:, 0] += self.north_shift 

2332 points_rot[:, 1] += self.east_shift 

2333 points_rot[:, 2] += self.depth 

2334 

2335 if cs == 'xyz': 

2336 return points_rot 

2337 elif cs == 'xy': 

2338 return points_rot[:, :2] 

2339 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2340 latlon = ne_to_latlon( 

2341 self.lat, self.lon, points_rot[:, 0], points_rot[:, 1]) 

2342 latlon = num.array(latlon).T 

2343 if cs == 'latlon': 

2344 return latlon 

2345 elif cs == 'lonlat': 

2346 return latlon[:, ::-1] 

2347 else: 

2348 return num.concatenate( 

2349 (latlon, points_rot[:, 2].reshape((len(points_rot), 1))), 

2350 axis=1) 

2351 

2352 def outline(self, cs='xyz'): 

2353 x = num.array([-1., 1., 1., -1., -1.]) 

2354 y = num.array([-1., -1., 1., 1., -1.]) 

2355 

2356 return self.xy_to_coord(x, y, cs=cs) 

2357 

2358 def points_on_source(self, cs='xyz', **kwargs): 

2359 

2360 points = points_on_rect_source( 

2361 self.strike, self.dip, self.length, self.width, 

2362 self.anchor, **kwargs) 

2363 

2364 points[:, 0] += self.north_shift 

2365 points[:, 1] += self.east_shift 

2366 points[:, 2] += self.depth 

2367 if cs == 'xyz': 

2368 return points 

2369 elif cs == 'xy': 

2370 return points[:, :2] 

2371 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2372 latlon = ne_to_latlon( 

2373 self.lat, self.lon, points[:, 0], points[:, 1]) 

2374 

2375 latlon = num.array(latlon).T 

2376 if cs == 'latlon': 

2377 return latlon 

2378 elif cs == 'lonlat': 

2379 return latlon[:, ::-1] 

2380 else: 

2381 return num.concatenate( 

2382 (latlon, points[:, 2].reshape((len(points), 1))), 

2383 axis=1) 

2384 

2385 def geometry(self, *args, **kwargs): 

2386 from pyrocko.model import Geometry 

2387 

2388 ds = self.discretize_basesource(*args, **kwargs) 

2389 nx, ny = ds.nl, ds.nw 

2390 

2391 def patch_outlines_xy(nx, ny): 

2392 points = num.zeros((nx * ny, 2)) 

2393 points[:, 0] = num.tile(num.linspace(-1., 1., nx), ny) 

2394 points[:, 1] = num.repeat(num.linspace(-1., 1., ny), nx) 

2395 

2396 return points 

2397 

2398 points_ds = patch_outlines_xy(nx + 1, ny + 1) 

2399 npoints = (nx + 1) * (ny + 1) 

2400 

2401 vertices = num.hstack(( 

2402 num.ones((npoints, 1)) * self.lat, 

2403 num.ones((npoints, 1)) * self.lon, 

2404 self.xy_to_coord(points_ds[:, 0], points_ds[:, 1], cs='xyz'))) 

2405 

2406 faces = num.array([[ 

2407 iy * (nx + 1) + ix, 

2408 iy * (nx + 1) + ix + 1, 

2409 (iy + 1) * (nx + 1) + ix + 1, 

2410 (iy + 1) * (nx + 1) + ix, 

2411 iy * (nx + 1) + ix] 

2412 for iy in range(ny) for ix in range(nx)]) 

2413 

2414 xyz = self.outline('xyz') 

2415 latlon = num.ones((5, 2)) * num.array([self.lat, self.lon]) 

2416 patchverts = num.hstack((latlon, xyz)) 

2417 

2418 geom = Geometry() 

2419 geom.setup(vertices, faces) 

2420 geom.set_outlines([patchverts]) 

2421 

2422 if self.stf: 

2423 geom.times = num.unique(ds.times) 

2424 

2425 if self.nucleation_x is not None and self.nucleation_y is not None: 

2426 geom.add_property('t_arrival', ds.times) 

2427 

2428 geom.add_property( 

2429 'moment', ds.moments().reshape(ds.nl*ds.nw, -1)) 

2430 

2431 return geom 

2432 

2433 def get_nucleation_abs_coord(self, cs='xy'): 

2434 

2435 if self.nucleation_x is None: 

2436 return None, None 

2437 

2438 coords = from_plane_coords(self.strike, self.dip, self.length, 

2439 self.width, self.depth, self.nucleation_x, 

2440 self.nucleation_y, lat=self.lat, 

2441 lon=self.lon, north_shift=self.north_shift, 

2442 east_shift=self.east_shift, cs=cs) 

2443 return coords 

2444 

2445 def pyrocko_moment_tensor(self, store=None, target=None): 

2446 return pmt.MomentTensor( 

2447 strike=self.strike, 

2448 dip=self.dip, 

2449 rake=self.rake, 

2450 scalar_moment=self.get_moment(store, target)) 

2451 

2452 def pyrocko_event(self, store=None, target=None, **kwargs): 

2453 return SourceWithDerivedMagnitude.pyrocko_event( 

2454 self, store, target, 

2455 **kwargs) 

2456 

2457 @classmethod 

2458 def from_pyrocko_event(cls, ev, **kwargs): 

2459 d = {} 

2460 mt = ev.moment_tensor 

2461 if mt: 

2462 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2463 d.update( 

2464 strike=float(strike), 

2465 dip=float(dip), 

2466 rake=float(rake), 

2467 magnitude=float(mt.moment_magnitude())) 

2468 

2469 d.update(kwargs) 

2470 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2471 

2472 

2473class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2474 ''' 

2475 Combined Eikonal and Okada quasi-dynamic rupture model. 

2476 

2477 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2478 Note: attribute `stf` is not used so far, but kept for future applications. 

2479 ''' 

2480 

2481 discretized_source_class = meta.DiscretizedMTSource 

2482 

2483 strike = Float.T( 

2484 default=0.0, 

2485 help='Strike direction in [deg], measured clockwise from north.') 

2486 

2487 dip = Float.T( 

2488 default=0.0, 

2489 help='Dip angle in [deg], measured downward from horizontal.') 

2490 

2491 length = Float.T( 

2492 default=10. * km, 

2493 help='Length of rectangular source area in [m].') 

2494 

2495 width = Float.T( 

2496 default=5. * km, 

2497 help='Width of rectangular source area in [m].') 

2498 

2499 anchor = StringChoice.T( 

2500 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2501 'bottom_left', 'bottom_right'], 

2502 default='center', 

2503 optional=True, 

2504 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2505 'bottom, top_left, top_right, bottom_left, ' 

2506 'bottom_right, center_left, center_right``.') 

2507 

2508 nucleation_x__ = Array.T( 

2509 default=num.array([0.]), 

2510 dtype=num.float64, 

2511 serialize_as='list', 

2512 help='Horizontal position of rupture nucleation in normalized fault ' 

2513 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2514 

2515 nucleation_y__ = Array.T( 

2516 default=num.array([0.]), 

2517 dtype=num.float64, 

2518 serialize_as='list', 

2519 help='Down-dip position of rupture nucleation in normalized fault ' 

2520 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2521 

2522 nucleation_time__ = Array.T( 

2523 optional=True, 

2524 help='Time in [s] after origin, when nucleation points defined by ' 

2525 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2526 dtype=num.float64, 

2527 serialize_as='list') 

2528 

2529 gamma = Float.T( 

2530 default=0.8, 

2531 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2532 r':math:`v_r = \gamma * v_s`.') 

2533 

2534 nx = Int.T( 

2535 default=2, 

2536 help='Number of discrete source patches in x direction (along ' 

2537 'strike).') 

2538 

2539 ny = Int.T( 

2540 default=2, 

2541 help='Number of discrete source patches in y direction (down dip).') 

2542 

2543 slip = Float.T( 

2544 optional=True, 

2545 help='Maximum slip of the rectangular source [m]. ' 

2546 'Setting the slip the tractions/stress field ' 

2547 'will be normalized to accomodate the desired maximum slip.') 

2548 

2549 rake = Float.T( 

2550 optional=True, 

2551 help='Rake angle in [deg], ' 

2552 'measured counter-clockwise from right-horizontal ' 

2553 'in on-plane view. Rake is translated into homogenous tractions ' 

2554 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2555 'with tractions parameter.') 

2556 

2557 patches = List.T( 

2558 OkadaSource.T(), 

2559 optional=True, 

2560 help='List of all boundary elements/sub faults/fault patches.') 

2561 

2562 patch_mask__ = Array.T( 

2563 dtype=bool, 

2564 serialize_as='list', 

2565 shape=(None,), 

2566 optional=True, 

2567 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2568 'leaves the patch in the calculation, False excludes the patch.') 

2569 

2570 tractions = TractionField.T( 

2571 optional=True, 

2572 help='Traction field the rupture plane is exposed to. See the' 

2573 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2574 'If ``tractions=None`` and ``rake`` is given' 

2575 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2576 ' be used.') 

2577 

2578 coef_mat = Array.T( 

2579 optional=True, 

2580 help='Coefficient matrix linking traction and dislocation field.', 

2581 dtype=num.float64, 

2582 shape=(None, None)) 

2583 

2584 eikonal_decimation = Int.T( 

2585 optional=True, 

2586 default=1, 

2587 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2588 ' increase the accuracy of rupture front calculation but' 

2589 ' increases also the computation time.') 

2590 

2591 decimation_factor = Int.T( 

2592 optional=True, 

2593 default=1, 

2594 help='Sub-source decimation factor, a larger decimation will' 

2595 ' make the result inaccurate but shorten the necessary' 

2596 ' computation time (use for testing puposes only).') 

2597 

2598 nthreads = Int.T( 

2599 optional=True, 

2600 default=1, 

2601 help='Number of threads for Okada forward modelling, ' 

2602 'matrix inversion and calculation of point subsources. ' 

2603 'Note: for small/medium matrices 1 thread is most efficient.') 

2604 

2605 pure_shear = Bool.T( 

2606 optional=True, 

2607 default=False, 

2608 help='Calculate only shear tractions and omit tensile tractions.') 

2609 

2610 smooth_rupture = Bool.T( 

2611 default=True, 

2612 help='Smooth the tractions by weighting partially ruptured' 

2613 ' fault patches.') 

2614 

2615 aggressive_oversampling = Bool.T( 

2616 default=False, 

2617 help='Aggressive oversampling for basesource discretization. ' 

2618 "When using 'multilinear' interpolation oversampling has" 

2619 ' practically no effect.') 

2620 

2621 def __init__(self, **kwargs): 

2622 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2623 self._interpolators = {} 

2624 self.check_conflicts() 

2625 

2626 @property 

2627 def nucleation_x(self): 

2628 return self.nucleation_x__ 

2629 

2630 @nucleation_x.setter 

2631 def nucleation_x(self, nucleation_x): 

2632 if isinstance(nucleation_x, list): 

2633 nucleation_x = num.array(nucleation_x) 

2634 

2635 elif not isinstance( 

2636 nucleation_x, num.ndarray) and nucleation_x is not None: 

2637 

2638 nucleation_x = num.array([nucleation_x]) 

2639 self.nucleation_x__ = nucleation_x 

2640 

2641 @property 

2642 def nucleation_y(self): 

2643 return self.nucleation_y__ 

2644 

2645 @nucleation_y.setter 

2646 def nucleation_y(self, nucleation_y): 

2647 if isinstance(nucleation_y, list): 

2648 nucleation_y = num.array(nucleation_y) 

2649 

2650 elif not isinstance(nucleation_y, num.ndarray) \ 

2651 and nucleation_y is not None: 

2652 nucleation_y = num.array([nucleation_y]) 

2653 

2654 self.nucleation_y__ = nucleation_y 

2655 

2656 @property 

2657 def nucleation(self): 

2658 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2659 

2660 if (nucl_x is None) or (nucl_y is None): 

2661 return None 

2662 

2663 assert nucl_x.shape[0] == nucl_y.shape[0] 

2664 

2665 return num.concatenate( 

2666 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2667 

2668 @nucleation.setter 

2669 def nucleation(self, nucleation): 

2670 if isinstance(nucleation, list): 

2671 nucleation = num.array(nucleation) 

2672 

2673 assert nucleation.shape[1] == 2 

2674 

2675 self.nucleation_x = nucleation[:, 0] 

2676 self.nucleation_y = nucleation[:, 1] 

2677 

2678 @property 

2679 def nucleation_time(self): 

2680 return self.nucleation_time__ 

2681 

2682 @nucleation_time.setter 

2683 def nucleation_time(self, nucleation_time): 

2684 if not isinstance(nucleation_time, num.ndarray) \ 

2685 and nucleation_time is not None: 

2686 nucleation_time = num.array([nucleation_time]) 

2687 

2688 self.nucleation_time__ = nucleation_time 

2689 

2690 @property 

2691 def patch_mask(self): 

2692 if (self.patch_mask__ is not None and 

2693 self.patch_mask__.shape == (self.nx * self.ny,)): 

2694 

2695 return self.patch_mask__ 

2696 else: 

2697 return num.ones(self.nx * self.ny, dtype=bool) 

2698 

2699 @patch_mask.setter 

2700 def patch_mask(self, patch_mask): 

2701 if isinstance(patch_mask, list): 

2702 patch_mask = num.array(patch_mask) 

2703 

2704 self.patch_mask__ = patch_mask 

2705 

2706 def get_tractions(self): 

2707 ''' 

2708 Get source traction vectors. 

2709 

2710 If :py:attr:`rake` is given, unit length directed traction vectors 

2711 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2712 else the given :py:attr:`tractions` are used. 

2713 

2714 :returns: 

2715 Traction vectors per patch. 

2716 :rtype: 

2717 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2718 ''' 

2719 

2720 if self.rake is not None: 

2721 if num.isnan(self.rake): 

2722 raise ValueError('Rake must be a real number, not NaN.') 

2723 

2724 logger.warning( 

2725 'Tractions are derived based on the given source rake.') 

2726 tractions = DirectedTractions(rake=self.rake) 

2727 else: 

2728 tractions = self.tractions 

2729 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2730 

2731 def base_key(self): 

2732 return SourceWithDerivedMagnitude.base_key(self) + ( 

2733 self.slip, 

2734 self.strike, 

2735 self.dip, 

2736 self.rake, 

2737 self.length, 

2738 self.width, 

2739 float(self.nucleation_x.mean()), 

2740 float(self.nucleation_y.mean()), 

2741 self.decimation_factor, 

2742 self.anchor, 

2743 self.pure_shear, 

2744 self.gamma, 

2745 tuple(self.patch_mask)) 

2746 

2747 def check_conflicts(self): 

2748 if self.tractions and self.rake: 

2749 raise AttributeError( 

2750 'Tractions and rake are mutually exclusive.') 

2751 if self.tractions is None and self.rake is None: 

2752 self.rake = 0. 

2753 

2754 def get_magnitude(self, store=None, target=None): 

2755 self.check_conflicts() 

2756 if self.slip is not None or self.tractions is not None: 

2757 if store is None: 

2758 raise DerivedMagnitudeError( 

2759 'Magnitude for a rectangular source with slip or ' 

2760 'tractions defined can only be derived when earth model ' 

2761 'is set.') 

2762 

2763 moment_rate, calc_times = self.discretize_basesource( 

2764 store, target=target).get_moment_rate(store.config.deltat) 

2765 

2766 deltat = num.concatenate(( 

2767 (num.diff(calc_times)[0],), 

2768 num.diff(calc_times))) 

2769 

2770 return float(pmt.moment_to_magnitude( 

2771 num.sum(moment_rate * deltat))) 

2772 

2773 else: 

2774 return float(pmt.moment_to_magnitude(1.0)) 

2775 

2776 def get_factor(self): 

2777 return 1.0 

2778 

2779 def outline(self, cs='xyz'): 

2780 ''' 

2781 Get source outline corner coordinates. 

2782 

2783 :param cs: 

2784 :ref:`Output coordinate system <coordinate-system-names>`. 

2785 :type cs: 

2786 optional, str 

2787 

2788 :returns: 

2789 Corner points in desired coordinate system. 

2790 :rtype: 

2791 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2792 ''' 

2793 points = outline_rect_source(self.strike, self.dip, self.length, 

2794 self.width, self.anchor) 

2795 

2796 points[:, 0] += self.north_shift 

2797 points[:, 1] += self.east_shift 

2798 points[:, 2] += self.depth 

2799 if cs == 'xyz': 

2800 return points 

2801 elif cs == 'xy': 

2802 return points[:, :2] 

2803 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2804 latlon = ne_to_latlon( 

2805 self.lat, self.lon, points[:, 0], points[:, 1]) 

2806 

2807 latlon = num.array(latlon).T 

2808 if cs == 'latlon': 

2809 return latlon 

2810 elif cs == 'lonlat': 

2811 return latlon[:, ::-1] 

2812 else: 

2813 return num.concatenate( 

2814 (latlon, points[:, 2].reshape((len(points), 1))), 

2815 axis=1) 

2816 

2817 def points_on_source(self, cs='xyz', **kwargs): 

2818 ''' 

2819 Convert relative plane coordinates to geographical coordinates. 

2820 

2821 Given x and y coordinates (relative source coordinates between -1. 

2822 and 1.) are converted to desired geographical coordinates. Coordinates 

2823 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2824 and ``points_y``. 

2825 

2826 :param cs: 

2827 :ref:`Output coordinate system <coordinate-system-names>`. 

2828 :type cs: 

2829 optional, str 

2830 

2831 :returns: 

2832 Point coordinates in desired coordinate system. 

2833 :rtype: 

2834 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2835 ''' 

2836 points = points_on_rect_source( 

2837 self.strike, self.dip, self.length, self.width, 

2838 self.anchor, **kwargs) 

2839 

2840 points[:, 0] += self.north_shift 

2841 points[:, 1] += self.east_shift 

2842 points[:, 2] += self.depth 

2843 if cs == 'xyz': 

2844 return points 

2845 elif cs == 'xy': 

2846 return points[:, :2] 

2847 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2848 latlon = ne_to_latlon( 

2849 self.lat, self.lon, points[:, 0], points[:, 1]) 

2850 

2851 latlon = num.array(latlon).T 

2852 if cs == 'latlon': 

2853 return latlon 

2854 elif cs == 'lonlat': 

2855 return latlon[:, ::-1] 

2856 else: 

2857 return num.concatenate( 

2858 (latlon, points[:, 2].reshape((len(points), 1))), 

2859 axis=1) 

2860 

2861 def pyrocko_moment_tensor(self, store=None, target=None): 

2862 if store is not None: 

2863 if not self.patches: 

2864 self.discretize_patches(store) 

2865 

2866 data = self.get_slip() 

2867 else: 

2868 data = self.get_tractions() 

2869 

2870 weights = num.linalg.norm(data, axis=1) 

2871 weights /= weights.sum() 

2872 

2873 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2874 rake = num.average(rakes, weights=weights) 

2875 

2876 return pmt.MomentTensor( 

2877 strike=self.strike, 

2878 dip=self.dip, 

2879 rake=rake, 

2880 scalar_moment=self.get_moment(store, target)) 

2881 

2882 def pyrocko_event(self, store=None, target=None, **kwargs): 

2883 return SourceWithDerivedMagnitude.pyrocko_event( 

2884 self, store, target, 

2885 **kwargs) 

2886 

2887 @classmethod 

2888 def from_pyrocko_event(cls, ev, **kwargs): 

2889 d = {} 

2890 mt = ev.moment_tensor 

2891 if mt: 

2892 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2893 d.update( 

2894 strike=float(strike), 

2895 dip=float(dip), 

2896 rake=float(rake)) 

2897 

2898 d.update(kwargs) 

2899 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2900 

2901 def _discretize_points(self, store, *args, **kwargs): 

2902 ''' 

2903 Discretize source plane with equal vertical and horizontal spacing. 

2904 

2905 Additional ``*args`` and ``**kwargs`` are passed to 

2906 :py:meth:`points_on_source`. 

2907 

2908 :param store: 

2909 Green's function database (needs to cover whole region of the 

2910 source). 

2911 :type store: 

2912 :py:class:`~pyrocko.gf.store.Store` 

2913 

2914 :returns: 

2915 Number of points in strike and dip direction, distance 

2916 between adjacent points, coordinates (latlondepth) and coordinates 

2917 (xy on fault) for discrete points. 

2918 :rtype: 

2919 (int, int, float, :py:class:`~numpy.ndarray`, 

2920 :py:class:`~numpy.ndarray`). 

2921 ''' 

2922 anch_x, anch_y = map_anchor[self.anchor] 

2923 

2924 npoints = int(self.width // km) + 1 

2925 points = num.zeros((npoints, 3)) 

2926 points[:, 1] = num.linspace(-1., 1., npoints) 

2927 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2928 

2929 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2930 points = num.dot(rotmat.T, points.T).T 

2931 points[:, 2] += self.depth 

2932 

2933 vs_min = store.config.get_vs( 

2934 self.lat, self.lon, points, 

2935 interpolation='nearest_neighbor') 

2936 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2937 

2938 oversampling = 10. 

2939 delta_l = self.length / (self.nx * oversampling) 

2940 delta_w = self.width / (self.ny * oversampling) 

2941 

2942 delta = self.eikonal_decimation * num.min([ 

2943 store.config.deltat * vr_min / oversampling, 

2944 delta_l, delta_w] + [ 

2945 deltas for deltas in store.config.deltas]) 

2946 

2947 delta = delta_w / num.ceil(delta_w / delta) 

2948 

2949 nx = int(num.ceil(self.length / delta)) + 1 

2950 ny = int(num.ceil(self.width / delta)) + 1 

2951 

2952 rem_l = (nx-1)*delta - self.length 

2953 lim_x = rem_l / self.length 

2954 

2955 points_xy = num.zeros((nx * ny, 2)) 

2956 points_xy[:, 0] = num.repeat( 

2957 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2958 points_xy[:, 1] = num.tile( 

2959 num.linspace(-1., 1., ny), nx) 

2960 

2961 points = self.points_on_source( 

2962 points_x=points_xy[:, 0], 

2963 points_y=points_xy[:, 1], 

2964 **kwargs) 

2965 

2966 return nx, ny, delta, points, points_xy 

2967 

2968 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2969 points=None): 

2970 ''' 

2971 Get rupture velocity for discrete points on source plane. 

2972 

2973 :param store: 

2974 Green's function database (needs to cover the whole region of the 

2975 source) 

2976 :type store: 

2977 optional, :py:class:`~pyrocko.gf.store.Store` 

2978 

2979 :param interpolation: 

2980 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2981 and ``'multilinear'``). 

2982 :type interpolation: 

2983 optional, str 

2984 

2985 :param points: 

2986 Coordinates on fault (-1.:1.) of discrete points. 

2987 :type points: 

2988 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2989 

2990 :returns: 

2991 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2992 points. 

2993 :rtype: 

2994 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2995 ''' 

2996 

2997 if points is None: 

2998 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2999 

3000 return store.config.get_vs( 

3001 self.lat, self.lon, 

3002 points=points, 

3003 interpolation=interpolation) * self.gamma 

3004 

3005 def discretize_time( 

3006 self, store, interpolation='nearest_neighbor', 

3007 vr=None, times=None, *args, **kwargs): 

3008 ''' 

3009 Get rupture start time for discrete points on source plane. 

3010 

3011 :param store: 

3012 Green's function database (needs to cover whole region of the 

3013 source) 

3014 :type store: 

3015 :py:class:`~pyrocko.gf.store.Store` 

3016 

3017 :param interpolation: 

3018 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3019 and ``'multilinear'``). 

3020 :type interpolation: 

3021 optional, str 

3022 

3023 :param vr: 

3024 Array, containing rupture user defined rupture velocity values. 

3025 :type vr: 

3026 optional, :py:class:`~numpy.ndarray` 

3027 

3028 :param times: 

3029 Array, containing zeros, where rupture is starting, real positive 

3030 numbers at later secondary nucleation points and -1, where time 

3031 will be calculated. If not given, rupture starts at nucleation_x, 

3032 nucleation_y. Times are given for discrete points with equal 

3033 horizontal and vertical spacing. 

3034 :type times: 

3035 optional, :py:class:`~numpy.ndarray` 

3036 

3037 :returns: 

3038 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

3039 rupture propagation time of discrete points. 

3040 :rtype: 

3041 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

3042 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

3043 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

3044 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

3045 ''' 

3046 nx, ny, delta, points, points_xy = self._discretize_points( 

3047 store, cs='xyz') 

3048 

3049 if vr is None or vr.shape != tuple((nx, ny)): 

3050 if vr: 

3051 logger.warning( 

3052 'Given rupture velocities are not in right shape: ' 

3053 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

3054 vr = self._discretize_rupture_v(store, interpolation, points)\ 

3055 .reshape(nx, ny) 

3056 

3057 if vr.shape != tuple((nx, ny)): 

3058 logger.warning( 

3059 'Given rupture velocities are not in right shape. Therefore' 

3060 ' standard rupture velocity array is used.') 

3061 

3062 def initialize_times(): 

3063 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

3064 

3065 if nucl_x.shape != nucl_y.shape: 

3066 raise ValueError( 

3067 'Nucleation coordinates have different shape.') 

3068 

3069 dist_points = num.array([ 

3070 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

3071 for x, y in zip(nucl_x, nucl_y)]) 

3072 nucl_indices = num.argmin(dist_points, axis=1) 

3073 

3074 if self.nucleation_time is None: 

3075 nucl_times = num.zeros_like(nucl_indices) 

3076 else: 

3077 if self.nucleation_time.shape == nucl_x.shape: 

3078 nucl_times = self.nucleation_time 

3079 else: 

3080 raise ValueError( 

3081 'Nucleation coordinates and times have different ' 

3082 'shapes') 

3083 

3084 t = num.full(nx * ny, -1.) 

3085 t[nucl_indices] = nucl_times 

3086 return t.reshape(nx, ny) 

3087 

3088 if times is None: 

3089 times = initialize_times() 

3090 elif times.shape != tuple((nx, ny)): 

3091 times = initialize_times() 

3092 logger.warning( 

3093 'Given times are not in right shape. Therefore standard time' 

3094 ' array is used.') 

3095 

3096 eikonal_ext.eikonal_solver_fmm_cartesian( 

3097 speeds=vr, times=times, delta=delta) 

3098 

3099 return points, points_xy, vr, times 

3100 

3101 def get_vr_time_interpolators( 

3102 self, store, interpolation='nearest_neighbor', force=False, 

3103 **kwargs): 

3104 ''' 

3105 Get interpolators for rupture velocity and rupture time. 

3106 

3107 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3108 

3109 :param store: 

3110 Green's function database (needs to cover whole region of the 

3111 source). 

3112 :type store: 

3113 :py:class:`~pyrocko.gf.store.Store` 

3114 

3115 :param interpolation: 

3116 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3117 and ``'multilinear'``). 

3118 :type interpolation: 

3119 optional, str 

3120 

3121 :param force: 

3122 Force recalculation of the interpolators (e.g. after change of 

3123 nucleation point locations/times). Default is ``False``. 

3124 :type force: 

3125 optional, bool 

3126 ''' 

3127 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3128 if interpolation not in interp_map: 

3129 raise TypeError( 

3130 'Interpolation method %s not available' % interpolation) 

3131 

3132 if not self._interpolators.get(interpolation, False) or force: 

3133 _, points_xy, vr, times = self.discretize_time( 

3134 store, **kwargs) 

3135 

3136 if self.length <= 0.: 

3137 raise ValueError( 

3138 'length must be larger then 0. not %g' % self.length) 

3139 

3140 if self.width <= 0.: 

3141 raise ValueError( 

3142 'width must be larger then 0. not %g' % self.width) 

3143 

3144 nx, ny = times.shape 

3145 anch_x, anch_y = map_anchor[self.anchor] 

3146 

3147 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3148 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3149 

3150 ascont = num.ascontiguousarray 

3151 

3152 self._interpolators[interpolation] = ( 

3153 nx, ny, times, vr, 

3154 RegularGridInterpolator( 

3155 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3156 times, 

3157 method=interp_map[interpolation]), 

3158 RegularGridInterpolator( 

3159 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3160 vr, 

3161 method=interp_map[interpolation])) 

3162 

3163 return self._interpolators[interpolation] 

3164 

3165 def discretize_patches( 

3166 self, store, interpolation='nearest_neighbor', force=False, 

3167 grid_shape=(), 

3168 **kwargs): 

3169 ''' 

3170 Get rupture start time and OkadaSource elements for points on rupture. 

3171 

3172 All source elements and their corresponding center points are 

3173 calculated and stored in the :py:attr:`patches` attribute. 

3174 

3175 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3176 

3177 :param store: 

3178 Green's function database (needs to cover whole region of the 

3179 source). 

3180 :type store: 

3181 :py:class:`~pyrocko.gf.store.Store` 

3182 

3183 :param interpolation: 

3184 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3185 and ``'multilinear'``). 

3186 :type interpolation: 

3187 optional, str 

3188 

3189 :param force: 

3190 Force recalculation of the vr and time interpolators ( e.g. after 

3191 change of nucleation point locations/times). Default is ``False``. 

3192 :type force: 

3193 optional, bool 

3194 

3195 :param grid_shape: 

3196 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3197 or grid_shape should be set. 

3198 :type grid_shape: 

3199 optional, tuple of int 

3200 ''' 

3201 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3202 self.get_vr_time_interpolators( 

3203 store, 

3204 interpolation=interpolation, force=force, **kwargs) 

3205 anch_x, anch_y = map_anchor[self.anchor] 

3206 

3207 al = self.length / 2. 

3208 aw = self.width / 2. 

3209 al1 = -(al + anch_x * al) 

3210 al2 = al - anch_x * al 

3211 aw1 = -aw + anch_y * aw 

3212 aw2 = aw + anch_y * aw 

3213 assert num.abs([al1, al2]).sum() == self.length 

3214 assert num.abs([aw1, aw2]).sum() == self.width 

3215 

3216 def get_lame(*a, **kw): 

3217 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3218 lamb = store.config.get_vp(*a, **kw)**2 \ 

3219 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3220 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3221 

3222 shear_mod, poisson = get_lame( 

3223 self.lat, self.lon, 

3224 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3225 interpolation=interpolation) 

3226 

3227 okada_src = OkadaSource( 

3228 lat=self.lat, lon=self.lon, 

3229 strike=self.strike, dip=self.dip, 

3230 north_shift=self.north_shift, east_shift=self.east_shift, 

3231 depth=self.depth, 

3232 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3233 poisson=poisson.mean(), 

3234 shearmod=shear_mod.mean(), 

3235 opening=kwargs.get('opening', 0.)) 

3236 

3237 if not (self.nx and self.ny): 

3238 if grid_shape: 

3239 self.nx, self.ny = grid_shape 

3240 else: 

3241 self.nx = nx 

3242 self.ny = ny 

3243 

3244 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3245 

3246 shear_mod, poisson = get_lame( 

3247 self.lat, self.lon, 

3248 num.array([src.source_patch()[:3] for src in source_disc]), 

3249 interpolation=interpolation) 

3250 

3251 if (self.nx, self.ny) != (nx, ny): 

3252 times_interp = time_interpolator( 

3253 num.ascontiguousarray(source_points[:, :2])) 

3254 vr_interp = vr_interpolator( 

3255 num.ascontiguousarray(source_points[:, :2])) 

3256 else: 

3257 times_interp = times.T.ravel() 

3258 vr_interp = vr.T.ravel() 

3259 

3260 for isrc, src in enumerate(source_disc): 

3261 src.vr = vr_interp[isrc] 

3262 src.time = times_interp[isrc] + self.time 

3263 

3264 self.patches = source_disc 

3265 

3266 def discretize_basesource(self, store, target=None): 

3267 ''' 

3268 Prepare source for synthetic waveform calculation. 

3269 

3270 :param store: 

3271 Green's function database (needs to cover whole region of the 

3272 source). 

3273 :type store: 

3274 :py:class:`~pyrocko.gf.store.Store` 

3275 

3276 :param target: 

3277 Target information. 

3278 :type target: 

3279 optional, :py:class:`~pyrocko.gf.targets.Target` 

3280 

3281 :returns: 

3282 Source discretized by a set of moment tensors and times. 

3283 :rtype: 

3284 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3285 ''' 

3286 if not target: 

3287 interpolation = 'nearest_neighbor' 

3288 else: 

3289 interpolation = target.interpolation 

3290 

3291 if not self.patches: 

3292 self.discretize_patches(store, interpolation) 

3293 

3294 if self.coef_mat is None: 

3295 self.calc_coef_mat() 

3296 

3297 delta_slip, slip_times = self.get_delta_slip(store) 

3298 npatches = self.nx * self.ny 

3299 ntimes = slip_times.size 

3300 

3301 anch_x, anch_y = map_anchor[self.anchor] 

3302 

3303 pln = self.length / self.nx 

3304 pwd = self.width / self.ny 

3305 

3306 patch_coords = num.array([ 

3307 (p.ix, p.iy) 

3308 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3309 

3310 # boundary condition is zero-slip 

3311 # is not valid to avoid unwished interpolation effects 

3312 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3313 slip_grid[1:-1, 1:-1, :, :] = \ 

3314 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3315 

3316 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3317 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3318 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3319 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3320 

3321 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3322 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3323 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3324 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3325 

3326 def make_grid(patch_parameter): 

3327 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3328 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3329 

3330 grid[0, 0] = grid[1, 1] 

3331 grid[0, -1] = grid[1, -2] 

3332 grid[-1, 0] = grid[-2, 1] 

3333 grid[-1, -1] = grid[-2, -2] 

3334 

3335 grid[1:-1, 0] = grid[1:-1, 1] 

3336 grid[1:-1, -1] = grid[1:-1, -2] 

3337 grid[0, 1:-1] = grid[1, 1:-1] 

3338 grid[-1, 1:-1] = grid[-2, 1:-1] 

3339 

3340 return grid 

3341 

3342 lamb = self.get_patch_attribute('lamb') 

3343 mu = self.get_patch_attribute('shearmod') 

3344 

3345 lamb_grid = make_grid(lamb) 

3346 mu_grid = make_grid(mu) 

3347 

3348 coords_x = num.zeros(self.nx + 2) 

3349 coords_x[1:-1] = patch_coords[:, 0, 0] 

3350 coords_x[0] = coords_x[1] - pln / 2 

3351 coords_x[-1] = coords_x[-2] + pln / 2 

3352 

3353 coords_y = num.zeros(self.ny + 2) 

3354 coords_y[1:-1] = patch_coords[0, :, 1] 

3355 coords_y[0] = coords_y[1] - pwd / 2 

3356 coords_y[-1] = coords_y[-2] + pwd / 2 

3357 

3358 slip_interp = RegularGridInterpolator( 

3359 (coords_x, coords_y, slip_times), 

3360 slip_grid, method='nearest') 

3361 

3362 lamb_interp = RegularGridInterpolator( 

3363 (coords_x, coords_y), 

3364 lamb_grid, method='nearest') 

3365 

3366 mu_interp = RegularGridInterpolator( 

3367 (coords_x, coords_y), 

3368 mu_grid, method='nearest') 

3369 

3370 # discretize basesources 

3371 mindeltagf = min(tuple( 

3372 (self.length / self.nx, self.width / self.ny) + 

3373 tuple(store.config.deltas))) 

3374 

3375 nl = int((1. / self.decimation_factor) * 

3376 num.ceil(pln / mindeltagf)) + 1 

3377 nw = int((1. / self.decimation_factor) * 

3378 num.ceil(pwd / mindeltagf)) + 1 

3379 nsrc_patch = int(nl * nw) 

3380 dl = pln / nl 

3381 dw = pwd / nw 

3382 

3383 patch_area = dl * dw 

3384 

3385 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3386 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3387 

3388 base_coords = num.zeros((nsrc_patch, 3)) 

3389 base_coords[:, 0] = num.tile(xl, nw) 

3390 base_coords[:, 1] = num.repeat(xw, nl) 

3391 base_coords = num.tile(base_coords, (npatches, 1)) 

3392 

3393 center_coords = num.zeros((npatches, 3)) 

3394 center_coords[:, 0] = num.repeat( 

3395 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3396 center_coords[:, 1] = num.tile( 

3397 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3398 

3399 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3400 nbaselocs = base_coords.shape[0] 

3401 

3402 base_interp = base_coords.repeat(ntimes, axis=0) 

3403 

3404 base_times = num.tile(slip_times, nbaselocs) 

3405 base_interp[:, 0] -= anch_x * self.length / 2 

3406 base_interp[:, 1] -= anch_y * self.width / 2 

3407 base_interp[:, 2] = base_times 

3408 

3409 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3410 store, interpolation=interpolation) 

3411 

3412 time_eikonal_max = time_interpolator.values.max() 

3413 

3414 nbasesrcs = base_interp.shape[0] 

3415 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3416 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3417 mu = mu_interp(base_interp[:, :2]).ravel() 

3418 

3419 if False: 

3420 try: 

3421 import matplotlib.pyplot as plt 

3422 coords = base_coords.copy() 

3423 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3424 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3425 plt.show() 

3426 except AttributeError: 

3427 pass 

3428 

3429 base_interp[:, 2] = 0. 

3430 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3431 base_interp = num.dot(rotmat.T, base_interp.T).T 

3432 base_interp[:, 0] += self.north_shift 

3433 base_interp[:, 1] += self.east_shift 

3434 base_interp[:, 2] += self.depth 

3435 

3436 slip_strike = delta_slip[:, :, 0].ravel() 

3437 slip_dip = delta_slip[:, :, 1].ravel() 

3438 slip_norm = delta_slip[:, :, 2].ravel() 

3439 

3440 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3441 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3442 

3443 m6s = okada_ext.patch2m6( 

3444 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3445 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3446 rakes=slip_rake, 

3447 disl_shear=slip_shear, 

3448 disl_norm=slip_norm, 

3449 lamb=lamb, 

3450 mu=mu, 

3451 nthreads=self.nthreads) 

3452 

3453 m6s *= patch_area 

3454 

3455 dl = -self.patches[0].al1 + self.patches[0].al2 

3456 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3457 

3458 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3459 

3460 ds = meta.DiscretizedMTSource( 

3461 lat=self.lat, 

3462 lon=self.lon, 

3463 times=base_times + self.time, 

3464 north_shifts=base_interp[:, 0], 

3465 east_shifts=base_interp[:, 1], 

3466 depths=base_interp[:, 2], 

3467 m6s=m6s, 

3468 dl=dl, 

3469 dw=dw, 

3470 nl=self.nx, 

3471 nw=self.ny) 

3472 

3473 return ds 

3474 

3475 def calc_coef_mat(self): 

3476 ''' 

3477 Calculate coefficients connecting tractions and dislocations. 

3478 ''' 

3479 if not self.patches: 

3480 raise ValueError( 

3481 'Patches are needed. Please calculate them first.') 

3482 

3483 self.coef_mat = make_okada_coefficient_matrix( 

3484 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3485 

3486 def get_patch_attribute(self, attr): 

3487 ''' 

3488 Get patch attributes. 

3489 

3490 :param attr: 

3491 Name of selected attribute (see 

3492 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3493 :type attr: 

3494 str 

3495 

3496 :returns: 

3497 Array with attribute value for each fault patch. 

3498 :rtype: 

3499 :py:class:`~numpy.ndarray` 

3500 

3501 ''' 

3502 if not self.patches: 

3503 raise ValueError( 

3504 'Patches are needed. Please calculate them first.') 

3505 return num.array([getattr(p, attr) for p in self.patches]) 

3506 

3507 def get_slip( 

3508 self, 

3509 time=None, 

3510 scale_slip=True, 

3511 interpolation='nearest_neighbor', 

3512 **kwargs): 

3513 ''' 

3514 Get slip per subfault patch for given time after rupture start. 

3515 

3516 :param time: 

3517 Time after origin [s], for which slip is computed. If not 

3518 given, final static slip is returned. 

3519 :type time: 

3520 optional, float > 0. 

3521 

3522 :param scale_slip: 

3523 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3524 to fit the given maximum slip. 

3525 :type scale_slip: 

3526 optional, bool 

3527 

3528 :param interpolation: 

3529 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3530 and ``'multilinear'``). 

3531 :type interpolation: 

3532 optional, str 

3533 

3534 :returns: 

3535 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3536 for each source patch. 

3537 :rtype: 

3538 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3539 ''' 

3540 

3541 if self.patches is None: 

3542 raise ValueError( 

3543 'Please discretize the source first (discretize_patches())') 

3544 npatches = len(self.patches) 

3545 tractions = self.get_tractions() 

3546 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3547 

3548 time_patch = time 

3549 if time is None: 

3550 time_patch = time_patch_max 

3551 

3552 if self.coef_mat is None: 

3553 self.calc_coef_mat() 

3554 

3555 if tractions.shape != (npatches, 3): 

3556 raise AttributeError( 

3557 'The traction vector is of invalid shape.' 

3558 ' Required shape is (npatches, 3)') 

3559 

3560 patch_mask = num.ones(npatches, dtype=bool) 

3561 if self.patch_mask is not None: 

3562 patch_mask = self.patch_mask 

3563 

3564 times = self.get_patch_attribute('time') - self.time 

3565 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3566 relevant_sources = num.nonzero(times <= time_patch)[0] 

3567 disloc_est = num.zeros_like(tractions) 

3568 

3569 if self.smooth_rupture: 

3570 patch_activation = num.zeros(npatches) 

3571 

3572 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3573 self.get_vr_time_interpolators( 

3574 store, interpolation=interpolation) 

3575 

3576 # Getting the native Eikonal grid, bit hackish 

3577 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3578 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3579 times_eikonal = time_interpolator.values 

3580 

3581 time_max = time 

3582 if time is None: 

3583 time_max = times_eikonal.max() 

3584 

3585 for ip, p in enumerate(self.patches): 

3586 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3587 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3588 

3589 idx_length = num.logical_and( 

3590 points_x >= ul[0], points_x <= lr[0]) 

3591 idx_width = num.logical_and( 

3592 points_y >= ul[1], points_y <= lr[1]) 

3593 

3594 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3595 if times_patch.size == 0: 

3596 raise AttributeError('could not use smooth_rupture') 

3597 

3598 patch_activation[ip] = \ 

3599 (times_patch <= time_max).sum() / times_patch.size 

3600 

3601 if time_patch == 0 and time_patch != time_patch_max: 

3602 patch_activation[ip] = 0. 

3603 

3604 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3605 

3606 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3607 

3608 if relevant_sources.size == 0: 

3609 return disloc_est 

3610 

3611 indices_disl = num.repeat(relevant_sources * 3, 3) 

3612 indices_disl[1::3] += 1 

3613 indices_disl[2::3] += 2 

3614 

3615 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3616 stress_field=tractions[relevant_sources, :].ravel(), 

3617 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3618 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3619 epsilon=None, 

3620 **kwargs) 

3621 

3622 if self.smooth_rupture: 

3623 disloc_est *= patch_activation[:, num.newaxis] 

3624 

3625 if scale_slip and self.slip is not None: 

3626 disloc_tmax = num.zeros(npatches) 

3627 

3628 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3629 indices_disl[1::3] += 1 

3630 indices_disl[2::3] += 2 

3631 

3632 disloc_tmax[patch_mask] = num.linalg.norm( 

3633 invert_fault_dislocations_bem( 

3634 stress_field=tractions[patch_mask, :].ravel(), 

3635 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3636 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3637 epsilon=None, 

3638 **kwargs), axis=1) 

3639 

3640 disloc_tmax_max = disloc_tmax.max() 

3641 if disloc_tmax_max == 0.: 

3642 logger.warning( 

3643 'slip scaling not performed. Maximum slip is 0.') 

3644 

3645 disloc_est *= self.slip / disloc_tmax_max 

3646 

3647 return disloc_est 

3648 

3649 def get_delta_slip( 

3650 self, 

3651 store=None, 

3652 deltat=None, 

3653 delta=True, 

3654 interpolation='nearest_neighbor', 

3655 **kwargs): 

3656 ''' 

3657 Get slip change snapshots. 

3658 

3659 The time interval, within which the slip changes are computed is 

3660 determined by the sampling rate of the Green's function database or 

3661 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3662 

3663 :param store: 

3664 Green's function database (needs to cover whole region of of the 

3665 source). Its sampling interval is used as time increment for slip 

3666 difference calculation. Either ``deltat`` or ``store`` should be 

3667 given. 

3668 :type store: 

3669 optional, :py:class:`~pyrocko.gf.store.Store` 

3670 

3671 :param deltat: 

3672 Time interval for slip difference calculation [s]. Either 

3673 ``deltat`` or ``store`` should be given. 

3674 :type deltat: 

3675 optional, float 

3676 

3677 :param delta: 

3678 If ``True``, slip differences between two time steps are given. If 

3679 ``False``, cumulative slip for all time steps. 

3680 :type delta: 

3681 optional, bool 

3682 

3683 :param interpolation: 

3684 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3685 and ``'multilinear'``). 

3686 :type interpolation: 

3687 optional, str 

3688 

3689 :returns: 

3690 Displacement changes(:math:`\\Delta u_{strike}, 

3691 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3692 time; corner times, for which delta slip is computed. The order of 

3693 displacement changes array is: 

3694 

3695 .. math:: 

3696 

3697 &[[\\\\ 

3698 &[\\Delta u_{strike, patch1, t1}, 

3699 \\Delta u_{dip, patch1, t1}, 

3700 \\Delta u_{tensile, patch1, t1}],\\\\ 

3701 &[\\Delta u_{strike, patch1, t2}, 

3702 \\Delta u_{dip, patch1, t2}, 

3703 \\Delta u_{tensile, patch1, t2}]\\\\ 

3704 &], [\\\\ 

3705 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3706 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3707 

3708 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3709 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3710 ''' 

3711 if store and deltat: 

3712 raise AttributeError( 

3713 'Argument collision. ' 

3714 'Please define only the store or the deltat argument.') 

3715 

3716 if store: 

3717 deltat = store.config.deltat 

3718 

3719 if not deltat: 

3720 raise AttributeError('Please give a GF store or set deltat.') 

3721 

3722 npatches = len(self.patches) 

3723 

3724 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3725 store, interpolation=interpolation) 

3726 tmax = time_interpolator.values.max() 

3727 

3728 calc_times = num.arange(0., tmax + deltat, deltat) 

3729 calc_times[calc_times > tmax] = tmax 

3730 

3731 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3732 

3733 for itime, t in enumerate(calc_times): 

3734 disloc_est[:, itime, :] = self.get_slip( 

3735 time=t, scale_slip=False, **kwargs) 

3736 

3737 if self.slip: 

3738 disloc_tmax = num.linalg.norm( 

3739 self.get_slip(scale_slip=False, time=tmax), 

3740 axis=1) 

3741 

3742 disloc_tmax_max = disloc_tmax.max() 

3743 if disloc_tmax_max == 0.: 

3744 logger.warning( 

3745 'Slip scaling not performed. Maximum slip is 0.') 

3746 else: 

3747 disloc_est *= self.slip / disloc_tmax_max 

3748 

3749 if not delta: 

3750 return disloc_est, calc_times 

3751 

3752 # if we have only one timestep there is no gradient 

3753 if calc_times.size > 1: 

3754 disloc_init = disloc_est[:, 0, :] 

3755 disloc_est = num.diff(disloc_est, axis=1) 

3756 disloc_est = num.concatenate(( 

3757 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3758 

3759 calc_times = calc_times 

3760 

3761 return disloc_est, calc_times 

3762 

3763 def get_slip_rate(self, *args, **kwargs): 

3764 ''' 

3765 Get slip rate inverted from patches. 

3766 

3767 The time interval, within which the slip rates are computed is 

3768 determined by the sampling rate of the Green's function database or 

3769 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3770 :py:meth:`get_delta_slip`. 

3771 

3772 :returns: 

3773 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3774 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3775 for each source patch and time; corner times, for which slip rate 

3776 is computed. The order of sliprate array is: 

3777 

3778 .. math:: 

3779 

3780 &[[\\\\ 

3781 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3782 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3783 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3784 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3785 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3786 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3787 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3788 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3789 

3790 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3791 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3792 ''' 

3793 ddisloc_est, calc_times = self.get_delta_slip( 

3794 *args, delta=True, **kwargs) 

3795 

3796 dt = num.concatenate( 

3797 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3798 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3799 

3800 return slip_rate, calc_times 

3801 

3802 def get_moment_rate_patches(self, *args, **kwargs): 

3803 ''' 

3804 Get scalar seismic moment rate for each patch individually. 

3805 

3806 Additional ``*args`` and ``**kwargs`` are passed to 

3807 :py:meth:`get_slip_rate`. 

3808 

3809 :returns: 

3810 Seismic moment rate for each source patch and time; corner times, 

3811 for which patch moment rate is computed based on slip rate. The 

3812 order of the moment rate array is: 

3813 

3814 .. math:: 

3815 

3816 &[\\\\ 

3817 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3818 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3819 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3820 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3821 &[...]]\\\\ 

3822 

3823 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3824 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3825 ''' 

3826 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3827 

3828 shear_mod = self.get_patch_attribute('shearmod') 

3829 p_length = self.get_patch_attribute('length') 

3830 p_width = self.get_patch_attribute('width') 

3831 

3832 dA = p_length * p_width 

3833 

3834 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3835 

3836 return mom_rate, calc_times 

3837 

3838 def get_moment_rate(self, store, target=None, deltat=None): 

3839 ''' 

3840 Get seismic source moment rate for the total source (STF). 

3841 

3842 :param store: 

3843 Green's function database (needs to cover whole region of of the 

3844 source). Its ``deltat`` [s] is used as time increment for slip 

3845 difference calculation. Either ``deltat`` or ``store`` should be 

3846 given. 

3847 :type store: 

3848 :py:class:`~pyrocko.gf.store.Store` 

3849 

3850 :param target: 

3851 Target information, needed for interpolation method. 

3852 :type target: 

3853 optional, :py:class:`~pyrocko.gf.targets.Target` 

3854 

3855 :param deltat: 

3856 Time increment for slip difference calculation [s]. If not given 

3857 ``store.deltat`` is used. 

3858 :type deltat: 

3859 optional, float 

3860 

3861 :return: 

3862 Seismic moment rate [Nm/s] for each time; corner times, for which 

3863 moment rate is computed. The order of the moment rate array is: 

3864 

3865 .. math:: 

3866 

3867 &[\\\\ 

3868 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3869 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3870 &...]\\\\ 

3871 

3872 :rtype: 

3873 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3874 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3875 ''' 

3876 if not deltat: 

3877 deltat = store.config.deltat 

3878 return self.discretize_basesource( 

3879 store, target=target).get_moment_rate(deltat) 

3880 

3881 def get_moment(self, *args, **kwargs): 

3882 ''' 

3883 Get seismic cumulative moment. 

3884 

3885 Additional ``*args`` and ``**kwargs`` are passed to 

3886 :py:meth:`get_magnitude`. 

3887 

3888 :returns: 

3889 Cumulative seismic moment in [Nm]. 

3890 :rtype: 

3891 float 

3892 ''' 

3893 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3894 *args, **kwargs))) 

3895 

3896 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3897 ''' 

3898 Rescale source slip based on given target magnitude or seismic moment. 

3899 

3900 Rescale the maximum source slip to fit the source moment magnitude or 

3901 seismic moment to the given target values. Either ``magnitude`` or 

3902 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3903 :py:meth:`get_moment`. 

3904 

3905 :param magnitude: 

3906 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3907 [Hanks and Kanamori, 1979] 

3908 :type magnitude: 

3909 optional, float 

3910 

3911 :param moment: 

3912 Target seismic moment :math:`M_0` [Nm]. 

3913 :type moment: 

3914 optional, float 

3915 ''' 

3916 if self.slip is None: 

3917 self.slip = 1. 

3918 logger.warning('No slip found for rescaling. ' 

3919 'An initial slip of 1 m is assumed.') 

3920 

3921 if magnitude is None and moment is None: 

3922 raise ValueError( 

3923 'Either target magnitude or moment need to be given.') 

3924 

3925 moment_init = self.get_moment(**kwargs) 

3926 

3927 if magnitude is not None: 

3928 moment = pmt.magnitude_to_moment(magnitude) 

3929 

3930 self.slip *= moment / moment_init 

3931 

3932 def get_centroid(self, store, *args, **kwargs): 

3933 ''' 

3934 Centroid of the pseudo dynamic rupture model. 

3935 

3936 The centroid location and time are derived from the locations and times 

3937 of the individual patches weighted with their moment contribution. 

3938 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3939 

3940 :param store: 

3941 Green's function database (needs to cover whole region of of the 

3942 source). Its ``deltat`` [s] is used as time increment for slip 

3943 difference calculation. Either ``deltat`` or ``store`` should be 

3944 given. 

3945 :type store: 

3946 :py:class:`~pyrocko.gf.store.Store` 

3947 

3948 :returns: 

3949 The centroid location and associated moment tensor. 

3950 :rtype: 

3951 :py:class:`pyrocko.model.Event` 

3952 ''' 

3953 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3954 t_max = time.values.max() 

3955 

3956 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3957 

3958 moment = num.sum(moment_rate * times, axis=1) 

3959 weights = moment / moment.sum() 

3960 

3961 norths = self.get_patch_attribute('north_shift') 

3962 easts = self.get_patch_attribute('east_shift') 

3963 depths = self.get_patch_attribute('depth') 

3964 

3965 centroid_n = num.sum(weights * norths) 

3966 centroid_e = num.sum(weights * easts) 

3967 centroid_d = num.sum(weights * depths) 

3968 

3969 centroid_lat, centroid_lon = ne_to_latlon( 

3970 self.lat, self.lon, centroid_n, centroid_e) 

3971 

3972 moment_rate_, times = self.get_moment_rate(store) 

3973 delta_times = num.concatenate(( 

3974 [times[1] - times[0]], 

3975 num.diff(times))) 

3976 moment_src = delta_times * moment_rate 

3977 

3978 centroid_t = num.sum( 

3979 moment_src / num.sum(moment_src) * times) + self.time 

3980 

3981 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3982 

3983 return model.Event( 

3984 lat=centroid_lat, 

3985 lon=centroid_lon, 

3986 depth=centroid_d, 

3987 time=centroid_t, 

3988 moment_tensor=mt, 

3989 magnitude=mt.magnitude, 

3990 duration=t_max) 

3991 

3992 

3993class DoubleDCSource(SourceWithMagnitude): 

3994 ''' 

3995 Two double-couple point sources separated in space and time. 

3996 Moment share between the sub-sources is controlled by the 

3997 parameter mix. 

3998 The position of the subsources is dependent on the moment 

3999 distribution between the two sources. Depth, east and north 

4000 shift are given for the centroid between the two double-couples. 

4001 The subsources will positioned according to their moment shares 

4002 around this centroid position. 

4003 This is done according to their delta parameters, which are 

4004 therefore in relation to that centroid. 

4005 Note that depth of the subsources therefore can be 

4006 depth+/-delta_depth. For shallow earthquakes therefore 

4007 the depth has to be chosen deeper to avoid sampling 

4008 above surface. 

4009 ''' 

4010 

4011 strike1 = Float.T( 

4012 default=0.0, 

4013 help='strike direction in [deg], measured clockwise from north') 

4014 

4015 dip1 = Float.T( 

4016 default=90.0, 

4017 help='dip angle in [deg], measured downward from horizontal') 

4018 

4019 azimuth = Float.T( 

4020 default=0.0, 

4021 help='azimuth to second double-couple [deg], ' 

4022 'measured at first, clockwise from north') 

4023 

4024 rake1 = Float.T( 

4025 default=0.0, 

4026 help='rake angle in [deg], ' 

4027 'measured counter-clockwise from right-horizontal ' 

4028 'in on-plane view') 

4029 

4030 strike2 = Float.T( 

4031 default=0.0, 

4032 help='strike direction in [deg], measured clockwise from north') 

4033 

4034 dip2 = Float.T( 

4035 default=90.0, 

4036 help='dip angle in [deg], measured downward from horizontal') 

4037 

4038 rake2 = Float.T( 

4039 default=0.0, 

4040 help='rake angle in [deg], ' 

4041 'measured counter-clockwise from right-horizontal ' 

4042 'in on-plane view') 

4043 

4044 delta_time = Float.T( 

4045 default=0.0, 

4046 help='separation of double-couples in time (t2-t1) [s]') 

4047 

4048 delta_depth = Float.T( 

4049 default=0.0, 

4050 help='difference in depth (z2-z1) [m]') 

4051 

4052 distance = Float.T( 

4053 default=0.0, 

4054 help='distance between the two double-couples [m]') 

4055 

4056 mix = Float.T( 

4057 default=0.5, 

4058 help='how to distribute the moment to the two doublecouples ' 

4059 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

4060 

4061 stf1 = STF.T( 

4062 optional=True, 

4063 help='Source time function of subsource 1 ' 

4064 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4065 

4066 stf2 = STF.T( 

4067 optional=True, 

4068 help='Source time function of subsource 2 ' 

4069 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4070 

4071 discretized_source_class = meta.DiscretizedMTSource 

4072 

4073 def base_key(self): 

4074 return ( 

4075 self.time, self.depth, self.lat, self.north_shift, 

4076 self.lon, self.east_shift, type(self).__name__) + \ 

4077 self.effective_stf1_pre().base_key() + \ 

4078 self.effective_stf2_pre().base_key() + ( 

4079 self.strike1, self.dip1, self.rake1, 

4080 self.strike2, self.dip2, self.rake2, 

4081 self.delta_time, self.delta_depth, 

4082 self.azimuth, self.distance, self.mix) 

4083 

4084 def get_factor(self): 

4085 return self.moment 

4086 

4087 def effective_stf1_pre(self): 

4088 return self.stf1 or self.stf or g_unit_pulse 

4089 

4090 def effective_stf2_pre(self): 

4091 return self.stf2 or self.stf or g_unit_pulse 

4092 

4093 def effective_stf_post(self): 

4094 return g_unit_pulse 

4095 

4096 def split(self): 

4097 a1 = 1.0 - self.mix 

4098 a2 = self.mix 

4099 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4100 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4101 

4102 dc1 = DCSource( 

4103 lat=self.lat, 

4104 lon=self.lon, 

4105 time=self.time - self.delta_time * a2, 

4106 north_shift=self.north_shift - delta_north * a2, 

4107 east_shift=self.east_shift - delta_east * a2, 

4108 depth=self.depth - self.delta_depth * a2, 

4109 moment=self.moment * a1, 

4110 strike=self.strike1, 

4111 dip=self.dip1, 

4112 rake=self.rake1, 

4113 stf=self.stf1 or self.stf) 

4114 

4115 dc2 = DCSource( 

4116 lat=self.lat, 

4117 lon=self.lon, 

4118 time=self.time + self.delta_time * a1, 

4119 north_shift=self.north_shift + delta_north * a1, 

4120 east_shift=self.east_shift + delta_east * a1, 

4121 depth=self.depth + self.delta_depth * a1, 

4122 moment=self.moment * a2, 

4123 strike=self.strike2, 

4124 dip=self.dip2, 

4125 rake=self.rake2, 

4126 stf=self.stf2 or self.stf) 

4127 

4128 return [dc1, dc2] 

4129 

4130 def discretize_basesource(self, store, target=None): 

4131 a1 = 1.0 - self.mix 

4132 a2 = self.mix 

4133 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4134 rake=self.rake1, scalar_moment=a1) 

4135 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4136 rake=self.rake2, scalar_moment=a2) 

4137 

4138 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4139 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4140 

4141 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4142 store.config.deltat, self.time - self.delta_time * a2) 

4143 

4144 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4145 store.config.deltat, self.time + self.delta_time * a1) 

4146 

4147 nt1 = times1.size 

4148 nt2 = times2.size 

4149 

4150 ds = meta.DiscretizedMTSource( 

4151 lat=self.lat, 

4152 lon=self.lon, 

4153 times=num.concatenate((times1, times2)), 

4154 north_shifts=num.concatenate(( 

4155 num.repeat(self.north_shift - delta_north * a2, nt1), 

4156 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4157 east_shifts=num.concatenate(( 

4158 num.repeat(self.east_shift - delta_east * a2, nt1), 

4159 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4160 depths=num.concatenate(( 

4161 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4162 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4163 m6s=num.vstack(( 

4164 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4165 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4166 

4167 return ds 

4168 

4169 def pyrocko_moment_tensor(self, store=None, target=None): 

4170 a1 = 1.0 - self.mix 

4171 a2 = self.mix 

4172 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4173 rake=self.rake1, 

4174 scalar_moment=a1 * self.moment) 

4175 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4176 rake=self.rake2, 

4177 scalar_moment=a2 * self.moment) 

4178 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4179 

4180 def pyrocko_event(self, store=None, target=None, **kwargs): 

4181 return SourceWithMagnitude.pyrocko_event( 

4182 self, store, target, 

4183 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4184 **kwargs) 

4185 

4186 @classmethod 

4187 def from_pyrocko_event(cls, ev, **kwargs): 

4188 d = {} 

4189 mt = ev.moment_tensor 

4190 if mt: 

4191 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4192 d.update( 

4193 strike1=float(strike), 

4194 dip1=float(dip), 

4195 rake1=float(rake), 

4196 strike2=float(strike), 

4197 dip2=float(dip), 

4198 rake2=float(rake), 

4199 mix=0.0, 

4200 magnitude=float(mt.moment_magnitude())) 

4201 

4202 d.update(kwargs) 

4203 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4204 source.stf1 = source.stf 

4205 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4206 source.stf = None 

4207 return source 

4208 

4209 

4210class RingfaultSource(SourceWithMagnitude): 

4211 ''' 

4212 A ring fault with vertical doublecouples. 

4213 ''' 

4214 

4215 diameter = Float.T( 

4216 default=1.0, 

4217 help='diameter of the ring in [m]') 

4218 

4219 sign = Float.T( 

4220 default=1.0, 

4221 help='inside of the ring moves up (+1) or down (-1)') 

4222 

4223 strike = Float.T( 

4224 default=0.0, 

4225 help='strike direction of the ring plane, clockwise from north,' 

4226 ' in [deg]') 

4227 

4228 dip = Float.T( 

4229 default=0.0, 

4230 help='dip angle of the ring plane from horizontal in [deg]') 

4231 

4232 npointsources = Int.T( 

4233 default=360, 

4234 help='number of point sources to use') 

4235 

4236 discretized_source_class = meta.DiscretizedMTSource 

4237 

4238 def base_key(self): 

4239 return Source.base_key(self) + ( 

4240 self.strike, self.dip, self.diameter, self.npointsources) 

4241 

4242 def get_factor(self): 

4243 return self.sign * self.moment 

4244 

4245 def discretize_basesource(self, store=None, target=None): 

4246 n = self.npointsources 

4247 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4248 

4249 points = num.zeros((n, 3)) 

4250 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4251 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4252 

4253 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4254 points = num.dot(rotmat.T, points.T).T # !!! ? 

4255 

4256 points[:, 0] += self.north_shift 

4257 points[:, 1] += self.east_shift 

4258 points[:, 2] += self.depth 

4259 

4260 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4261 scalar_moment=1.0 / n).m()) 

4262 

4263 rotmats = num.transpose( 

4264 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4265 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4266 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4267 

4268 ms = num.zeros((n, 3, 3)) 

4269 for i in range(n): 

4270 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4271 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4272 

4273 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4274 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4275 

4276 times, amplitudes = self.effective_stf_pre().discretize_t( 

4277 store.config.deltat, self.time) 

4278 

4279 nt = times.size 

4280 

4281 return meta.DiscretizedMTSource( 

4282 times=num.tile(times, n), 

4283 lat=self.lat, 

4284 lon=self.lon, 

4285 north_shifts=num.repeat(points[:, 0], nt), 

4286 east_shifts=num.repeat(points[:, 1], nt), 

4287 depths=num.repeat(points[:, 2], nt), 

4288 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4289 amplitudes, n)[:, num.newaxis]) 

4290 

4291 

4292class CombiSource(Source): 

4293 ''' 

4294 Composite source model. 

4295 ''' 

4296 

4297 discretized_source_class = meta.DiscretizedMTSource 

4298 

4299 subsources = List.T(Source.T()) 

4300 

4301 def __init__(self, subsources=[], **kwargs): 

4302 if not subsources: 

4303 raise BadRequest( 

4304 'Need at least one sub-source to create a CombiSource object.') 

4305 

4306 lats = num.array( 

4307 [subsource.lat for subsource in subsources], dtype=float) 

4308 lons = num.array( 

4309 [subsource.lon for subsource in subsources], dtype=float) 

4310 

4311 lat, lon = lats[0], lons[0] 

4312 if not num.all(lats == lat) and num.all(lons == lon): 

4313 subsources = [s.clone() for s in subsources] 

4314 for subsource in subsources[1:]: 

4315 subsource.set_origin(lat, lon) 

4316 

4317 depth = float(num.mean([p.depth for p in subsources])) 

4318 time = float(num.mean([p.time for p in subsources])) 

4319 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4320 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4321 kwargs.update( 

4322 time=time, 

4323 lat=float(lat), 

4324 lon=float(lon), 

4325 north_shift=north_shift, 

4326 east_shift=east_shift, 

4327 depth=depth) 

4328 

4329 Source.__init__(self, subsources=subsources, **kwargs) 

4330 

4331 def get_factor(self): 

4332 return 1.0 

4333 

4334 def discretize_basesource(self, store, target=None): 

4335 dsources = [] 

4336 for sf in self.subsources: 

4337 ds = sf.discretize_basesource(store, target) 

4338 ds.m6s *= sf.get_factor() 

4339 dsources.append(ds) 

4340 

4341 return meta.DiscretizedMTSource.combine(dsources) 

4342 

4343 

4344class SFSource(Source): 

4345 ''' 

4346 A single force point source. 

4347 

4348 Supported GF schemes: `'elastic5'`. 

4349 ''' 

4350 

4351 discretized_source_class = meta.DiscretizedSFSource 

4352 

4353 fn = Float.T( 

4354 default=0., 

4355 help='northward component of single force [N]') 

4356 

4357 fe = Float.T( 

4358 default=0., 

4359 help='eastward component of single force [N]') 

4360 

4361 fd = Float.T( 

4362 default=0., 

4363 help='downward component of single force [N]') 

4364 

4365 def __init__(self, **kwargs): 

4366 Source.__init__(self, **kwargs) 

4367 

4368 def base_key(self): 

4369 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4370 

4371 def get_factor(self): 

4372 return 1.0 

4373 

4374 def discretize_basesource(self, store, target=None): 

4375 times, amplitudes = self.effective_stf_pre().discretize_t( 

4376 store.config.deltat, self.time) 

4377 forces = amplitudes[:, num.newaxis] * num.array( 

4378 [[self.fn, self.fe, self.fd]], dtype=float) 

4379 

4380 return meta.DiscretizedSFSource(forces=forces, 

4381 **self._dparams_base_repeated(times)) 

4382 

4383 def pyrocko_event(self, store=None, target=None, **kwargs): 

4384 return Source.pyrocko_event( 

4385 self, store, target, 

4386 **kwargs) 

4387 

4388 @classmethod 

4389 def from_pyrocko_event(cls, ev, **kwargs): 

4390 d = {} 

4391 d.update(kwargs) 

4392 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4393 

4394 

4395class PorePressurePointSource(Source): 

4396 ''' 

4397 Excess pore pressure point source. 

4398 

4399 For poro-elastic initial value problem where an excess pore pressure is 

4400 brought into a small source volume. 

4401 ''' 

4402 

4403 discretized_source_class = meta.DiscretizedPorePressureSource 

4404 

4405 pp = Float.T( 

4406 default=1.0, 

4407 help='initial excess pore pressure in [Pa]') 

4408 

4409 def base_key(self): 

4410 return Source.base_key(self) 

4411 

4412 def get_factor(self): 

4413 return self.pp 

4414 

4415 def discretize_basesource(self, store, target=None): 

4416 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4417 **self._dparams_base()) 

4418 

4419 

4420class PorePressureLineSource(Source): 

4421 ''' 

4422 Excess pore pressure line source. 

4423 

4424 The line source is centered at (north_shift, east_shift, depth). 

4425 ''' 

4426 

4427 discretized_source_class = meta.DiscretizedPorePressureSource 

4428 

4429 pp = Float.T( 

4430 default=1.0, 

4431 help='initial excess pore pressure in [Pa]') 

4432 

4433 length = Float.T( 

4434 default=0.0, 

4435 help='length of the line source [m]') 

4436 

4437 azimuth = Float.T( 

4438 default=0.0, 

4439 help='azimuth direction, clockwise from north [deg]') 

4440 

4441 dip = Float.T( 

4442 default=90., 

4443 help='dip direction, downward from horizontal [deg]') 

4444 

4445 def base_key(self): 

4446 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4447 

4448 def get_factor(self): 

4449 return self.pp 

4450 

4451 def discretize_basesource(self, store, target=None): 

4452 

4453 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4454 

4455 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4456 

4457 sa = math.sin(self.azimuth * d2r) 

4458 ca = math.cos(self.azimuth * d2r) 

4459 sd = math.sin(self.dip * d2r) 

4460 cd = math.cos(self.dip * d2r) 

4461 

4462 points = num.zeros((n, 3)) 

4463 points[:, 0] = self.north_shift + a * ca * cd 

4464 points[:, 1] = self.east_shift + a * sa * cd 

4465 points[:, 2] = self.depth + a * sd 

4466 

4467 return meta.DiscretizedPorePressureSource( 

4468 times=util.num_full(n, self.time), 

4469 lat=self.lat, 

4470 lon=self.lon, 

4471 north_shifts=points[:, 0], 

4472 east_shifts=points[:, 1], 

4473 depths=points[:, 2], 

4474 pp=num.ones(n) / n) 

4475 

4476 

4477class Request(Object): 

4478 ''' 

4479 Synthetic seismogram computation request. 

4480 

4481 :: 

4482 

4483 Request(**kwargs) 

4484 Request(sources, targets, **kwargs) 

4485 ''' 

4486 

4487 sources = List.T( 

4488 Source.T(), 

4489 help='list of sources for which to produce synthetics.') 

4490 

4491 targets = List.T( 

4492 Target.T(), 

4493 help='list of targets for which to produce synthetics.') 

4494 

4495 @classmethod 

4496 def args2kwargs(cls, args): 

4497 if len(args) not in (0, 2, 3): 

4498 raise BadRequest('Invalid arguments.') 

4499 

4500 if len(args) == 2: 

4501 return dict(sources=args[0], targets=args[1]) 

4502 else: 

4503 return {} 

4504 

4505 def __init__(self, *args, **kwargs): 

4506 kwargs.update(self.args2kwargs(args)) 

4507 sources = kwargs.pop('sources', []) 

4508 targets = kwargs.pop('targets', []) 

4509 

4510 if isinstance(sources, Source): 

4511 sources = [sources] 

4512 

4513 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4514 targets = [targets] 

4515 

4516 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4517 

4518 @property 

4519 def targets_dynamic(self): 

4520 return [t for t in self.targets if isinstance(t, Target)] 

4521 

4522 @property 

4523 def targets_static(self): 

4524 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4525 

4526 @property 

4527 def has_dynamic(self): 

4528 return True if len(self.targets_dynamic) > 0 else False 

4529 

4530 @property 

4531 def has_statics(self): 

4532 return True if len(self.targets_static) > 0 else False 

4533 

4534 def subsources_map(self): 

4535 m = defaultdict(list) 

4536 for source in self.sources: 

4537 m[source.base_key()].append(source) 

4538 

4539 return m 

4540 

4541 def subtargets_map(self): 

4542 m = defaultdict(list) 

4543 for target in self.targets: 

4544 m[target.base_key()].append(target) 

4545 

4546 return m 

4547 

4548 def subrequest_map(self): 

4549 ms = self.subsources_map() 

4550 mt = self.subtargets_map() 

4551 m = {} 

4552 for (ks, ls) in ms.items(): 

4553 for (kt, lt) in mt.items(): 

4554 m[ks, kt] = (ls, lt) 

4555 

4556 return m 

4557 

4558 

4559class ProcessingStats(Object): 

4560 t_perc_get_store_and_receiver = Float.T(default=0.) 

4561 t_perc_discretize_source = Float.T(default=0.) 

4562 t_perc_make_base_seismogram = Float.T(default=0.) 

4563 t_perc_make_same_span = Float.T(default=0.) 

4564 t_perc_post_process = Float.T(default=0.) 

4565 t_perc_optimize = Float.T(default=0.) 

4566 t_perc_stack = Float.T(default=0.) 

4567 t_perc_static_get_store = Float.T(default=0.) 

4568 t_perc_static_discretize_basesource = Float.T(default=0.) 

4569 t_perc_static_sum_statics = Float.T(default=0.) 

4570 t_perc_static_post_process = Float.T(default=0.) 

4571 t_wallclock = Float.T(default=0.) 

4572 t_cpu = Float.T(default=0.) 

4573 n_read_blocks = Int.T(default=0) 

4574 n_results = Int.T(default=0) 

4575 n_subrequests = Int.T(default=0) 

4576 n_stores = Int.T(default=0) 

4577 n_records_stacked = Int.T(default=0) 

4578 

4579 

4580class Response(Object): 

4581 ''' 

4582 Resonse object to a synthetic seismogram computation request. 

4583 ''' 

4584 

4585 request = Request.T() 

4586 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4587 stats = ProcessingStats.T() 

4588 

4589 def pyrocko_traces(self): 

4590 ''' 

4591 Return a list of requested 

4592 :class:`~pyrocko.trace.Trace` instances. 

4593 ''' 

4594 

4595 traces = [] 

4596 for results in self.results_list: 

4597 for result in results: 

4598 if not isinstance(result, meta.Result): 

4599 continue 

4600 traces.append(result.trace.pyrocko_trace()) 

4601 

4602 return traces 

4603 

4604 def kite_scenes(self): 

4605 ''' 

4606 Return a list of requested 

4607 :class:`~kite.scenes` instances. 

4608 ''' 

4609 kite_scenes = [] 

4610 for results in self.results_list: 

4611 for result in results: 

4612 if isinstance(result, meta.KiteSceneResult): 

4613 sc = result.get_scene() 

4614 kite_scenes.append(sc) 

4615 

4616 return kite_scenes 

4617 

4618 def static_results(self): 

4619 ''' 

4620 Return a list of requested 

4621 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4622 ''' 

4623 statics = [] 

4624 for results in self.results_list: 

4625 for result in results: 

4626 if not isinstance(result, meta.StaticResult): 

4627 continue 

4628 statics.append(result) 

4629 

4630 return statics 

4631 

4632 def iter_results(self, get='pyrocko_traces'): 

4633 ''' 

4634 Generator function to iterate over results of request. 

4635 

4636 Yields associated :py:class:`Source`, 

4637 :class:`~pyrocko.gf.targets.Target`, 

4638 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4639 ''' 

4640 

4641 for isource, source in enumerate(self.request.sources): 

4642 for itarget, target in enumerate(self.request.targets): 

4643 result = self.results_list[isource][itarget] 

4644 if get == 'pyrocko_traces': 

4645 yield source, target, result.trace.pyrocko_trace() 

4646 elif get == 'results': 

4647 yield source, target, result 

4648 

4649 def snuffle(self, **kwargs): 

4650 ''' 

4651 Open *snuffler* with requested traces. 

4652 ''' 

4653 

4654 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4655 

4656 

4657class Engine(Object): 

4658 ''' 

4659 Base class for synthetic seismogram calculators. 

4660 ''' 

4661 

4662 def get_store_ids(self): 

4663 ''' 

4664 Get list of available GF store IDs 

4665 ''' 

4666 

4667 return [] 

4668 

4669 

4670class Rule(object): 

4671 pass 

4672 

4673 

4674class VectorRule(Rule): 

4675 

4676 def __init__(self, quantity, differentiate=0, integrate=0): 

4677 self.components = [quantity + '.' + c for c in 'ned'] 

4678 self.differentiate = differentiate 

4679 self.integrate = integrate 

4680 

4681 def required_components(self, target): 

4682 n, e, d = self.components 

4683 sa, ca, sd, cd = target.get_sin_cos_factors() 

4684 

4685 comps = [] 

4686 if nonzero(ca * cd): 

4687 comps.append(n) 

4688 

4689 if nonzero(sa * cd): 

4690 comps.append(e) 

4691 

4692 if nonzero(sd): 

4693 comps.append(d) 

4694 

4695 return tuple(comps) 

4696 

4697 def apply_(self, target, base_seismogram): 

4698 n, e, d = self.components 

4699 sa, ca, sd, cd = target.get_sin_cos_factors() 

4700 

4701 if nonzero(ca * cd): 

4702 data = base_seismogram[n].data * (ca * cd) 

4703 deltat = base_seismogram[n].deltat 

4704 else: 

4705 data = 0.0 

4706 

4707 if nonzero(sa * cd): 

4708 data = data + base_seismogram[e].data * (sa * cd) 

4709 deltat = base_seismogram[e].deltat 

4710 

4711 if nonzero(sd): 

4712 data = data + base_seismogram[d].data * sd 

4713 deltat = base_seismogram[d].deltat 

4714 

4715 if self.differentiate: 

4716 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4717 

4718 if self.integrate: 

4719 raise NotImplementedError('Integration is not implemented yet.') 

4720 

4721 return data 

4722 

4723 

4724class HorizontalVectorRule(Rule): 

4725 

4726 def __init__(self, quantity, differentiate=0, integrate=0): 

4727 self.components = [quantity + '.' + c for c in 'ne'] 

4728 self.differentiate = differentiate 

4729 self.integrate = integrate 

4730 

4731 def required_components(self, target): 

4732 n, e = self.components 

4733 sa, ca, _, _ = target.get_sin_cos_factors() 

4734 

4735 comps = [] 

4736 if nonzero(ca): 

4737 comps.append(n) 

4738 

4739 if nonzero(sa): 

4740 comps.append(e) 

4741 

4742 return tuple(comps) 

4743 

4744 def apply_(self, target, base_seismogram): 

4745 n, e = self.components 

4746 sa, ca, _, _ = target.get_sin_cos_factors() 

4747 

4748 if nonzero(ca): 

4749 data = base_seismogram[n].data * ca 

4750 else: 

4751 data = 0.0 

4752 

4753 if nonzero(sa): 

4754 data = data + base_seismogram[e].data * sa 

4755 

4756 if self.differentiate: 

4757 deltat = base_seismogram[e].deltat 

4758 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4759 

4760 if self.integrate: 

4761 raise NotImplementedError('Integration is not implemented yet.') 

4762 

4763 return data 

4764 

4765 

4766class ScalarRule(Rule): 

4767 

4768 def __init__(self, quantity, differentiate=0): 

4769 self.c = quantity 

4770 

4771 def required_components(self, target): 

4772 return (self.c, ) 

4773 

4774 def apply_(self, target, base_seismogram): 

4775 data = base_seismogram[self.c].data.copy() 

4776 deltat = base_seismogram[self.c].deltat 

4777 if self.differentiate: 

4778 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4779 

4780 return data 

4781 

4782 

4783class StaticDisplacement(Rule): 

4784 

4785 def required_components(self, target): 

4786 return tuple(['displacement.%s' % c for c in list('ned')]) 

4787 

4788 def apply_(self, target, base_statics): 

4789 if isinstance(target, SatelliteTarget): 

4790 los_fac = target.get_los_factors() 

4791 base_statics['displacement.los'] =\ 

4792 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4793 los_fac[:, 1] * base_statics['displacement.e'] + 

4794 los_fac[:, 2] * base_statics['displacement.n']) 

4795 return base_statics 

4796 

4797 

4798channel_rules = { 

4799 'displacement': [VectorRule('displacement')], 

4800 'rotation': [VectorRule('rotation')], 

4801 'velocity': [ 

4802 VectorRule('velocity'), 

4803 VectorRule('displacement', differentiate=1)], 

4804 'acceleration': [ 

4805 VectorRule('acceleration'), 

4806 VectorRule('velocity', differentiate=1), 

4807 VectorRule('displacement', differentiate=2)], 

4808 'pore_pressure': [ScalarRule('pore_pressure')], 

4809 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4810 'darcy_velocity': [VectorRule('darcy_velocity')], 

4811} 

4812 

4813static_rules = { 

4814 'displacement': [StaticDisplacement()] 

4815} 

4816 

4817 

4818class OutOfBoundsContext(Object): 

4819 source = Source.T() 

4820 target = Target.T() 

4821 distance = Float.T() 

4822 components = List.T(String.T()) 

4823 

4824 

4825def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4826 dsource_cache = {} 

4827 tcounters = list(range(6)) 

4828 

4829 store_ids = set() 

4830 sources = set() 

4831 targets = set() 

4832 

4833 for itarget, target in enumerate(ptargets): 

4834 target._id = itarget 

4835 

4836 for w in work: 

4837 _, _, isources, itargets = w 

4838 

4839 sources.update([psources[isource] for isource in isources]) 

4840 targets.update([ptargets[itarget] for itarget in itargets]) 

4841 

4842 store_ids = set([t.store_id for t in targets]) 

4843 

4844 for isource, source in enumerate(psources): 

4845 

4846 components = set() 

4847 for itarget, target in enumerate(targets): 

4848 rule = engine.get_rule(source, target) 

4849 components.update(rule.required_components(target)) 

4850 

4851 for store_id in store_ids: 

4852 store_targets = [t for t in targets if t.store_id == store_id] 

4853 

4854 sample_rates = set([t.sample_rate for t in store_targets]) 

4855 interpolations = set([t.interpolation for t in store_targets]) 

4856 

4857 base_seismograms = [] 

4858 store_targets_out = [] 

4859 

4860 for samp_rate in sample_rates: 

4861 for interp in interpolations: 

4862 engine_targets = [ 

4863 t for t in store_targets if t.sample_rate == samp_rate 

4864 and t.interpolation == interp] 

4865 

4866 if not engine_targets: 

4867 continue 

4868 

4869 store_targets_out += engine_targets 

4870 

4871 base_seismograms += engine.base_seismograms( 

4872 source, 

4873 engine_targets, 

4874 components, 

4875 dsource_cache, 

4876 nthreads) 

4877 

4878 for iseis, seismogram in enumerate(base_seismograms): 

4879 for tr in seismogram.values(): 

4880 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4881 e = SeismosizerError( 

4882 'Seismosizer failed with return code %i\n%s' % ( 

4883 tr.err, str( 

4884 OutOfBoundsContext( 

4885 source=source, 

4886 target=store_targets[iseis], 

4887 distance=source.distance_to( 

4888 store_targets[iseis]), 

4889 components=components)))) 

4890 raise e 

4891 

4892 for seismogram, target in zip(base_seismograms, store_targets_out): 

4893 

4894 try: 

4895 result = engine._post_process_dynamic( 

4896 seismogram, source, target) 

4897 except SeismosizerError as e: 

4898 result = e 

4899 

4900 yield (isource, target._id, result), tcounters 

4901 

4902 

4903def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4904 dsource_cache = {} 

4905 

4906 for w in work: 

4907 _, _, isources, itargets = w 

4908 

4909 sources = [psources[isource] for isource in isources] 

4910 targets = [ptargets[itarget] for itarget in itargets] 

4911 

4912 components = set() 

4913 for target in targets: 

4914 rule = engine.get_rule(sources[0], target) 

4915 components.update(rule.required_components(target)) 

4916 

4917 for isource, source in zip(isources, sources): 

4918 for itarget, target in zip(itargets, targets): 

4919 

4920 try: 

4921 base_seismogram, tcounters = engine.base_seismogram( 

4922 source, target, components, dsource_cache, nthreads) 

4923 except meta.OutOfBounds as e: 

4924 e.context = OutOfBoundsContext( 

4925 source=sources[0], 

4926 target=targets[0], 

4927 distance=sources[0].distance_to(targets[0]), 

4928 components=components) 

4929 raise 

4930 

4931 n_records_stacked = 0 

4932 t_optimize = 0.0 

4933 t_stack = 0.0 

4934 

4935 for _, tr in base_seismogram.items(): 

4936 n_records_stacked += tr.n_records_stacked 

4937 t_optimize += tr.t_optimize 

4938 t_stack += tr.t_stack 

4939 

4940 try: 

4941 result = engine._post_process_dynamic( 

4942 base_seismogram, source, target) 

4943 result.n_records_stacked = n_records_stacked 

4944 result.n_shared_stacking = len(sources) *\ 

4945 len(targets) 

4946 result.t_optimize = t_optimize 

4947 result.t_stack = t_stack 

4948 except SeismosizerError as e: 

4949 result = e 

4950 

4951 tcounters.append(xtime()) 

4952 yield (isource, itarget, result), tcounters 

4953 

4954 

4955def process_static(work, psources, ptargets, engine, nthreads=0): 

4956 for w in work: 

4957 _, _, isources, itargets = w 

4958 

4959 sources = [psources[isource] for isource in isources] 

4960 targets = [ptargets[itarget] for itarget in itargets] 

4961 

4962 for isource, source in zip(isources, sources): 

4963 for itarget, target in zip(itargets, targets): 

4964 components = engine.get_rule(source, target)\ 

4965 .required_components(target) 

4966 

4967 try: 

4968 base_statics, tcounters = engine.base_statics( 

4969 source, target, components, nthreads) 

4970 except meta.OutOfBounds as e: 

4971 e.context = OutOfBoundsContext( 

4972 source=sources[0], 

4973 target=targets[0], 

4974 distance=float('nan'), 

4975 components=components) 

4976 raise 

4977 result = engine._post_process_statics( 

4978 base_statics, source, target) 

4979 tcounters.append(xtime()) 

4980 

4981 yield (isource, itarget, result), tcounters 

4982 

4983 

4984class LocalEngine(Engine): 

4985 ''' 

4986 Offline synthetic seismogram calculator. 

4987 

4988 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4989 :py:attr:`store_dirs` with paths set in environment variables 

4990 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4991 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4992 :py:attr:`store_dirs` with paths set in the user's config file. 

4993 

4994 The config file can be found at :file:`~/.pyrocko/config.pf` 

4995 

4996 .. code-block :: python 

4997 

4998 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4999 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

5000 ''' 

5001 

5002 store_superdirs = List.T( 

5003 String.T(), 

5004 help="directories which are searched for Green's function stores") 

5005 

5006 store_dirs = List.T( 

5007 String.T(), 

5008 help="additional individual Green's function store directories") 

5009 

5010 default_store_id = String.T( 

5011 optional=True, 

5012 help='default store ID to be used when a request does not provide ' 

5013 'one') 

5014 

5015 def __init__(self, **kwargs): 

5016 use_env = kwargs.pop('use_env', False) 

5017 use_config = kwargs.pop('use_config', False) 

5018 Engine.__init__(self, **kwargs) 

5019 if use_env: 

5020 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

5021 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

5022 if env_store_superdirs: 

5023 self.store_superdirs.extend(env_store_superdirs.split(':')) 

5024 

5025 if env_store_dirs: 

5026 self.store_dirs.extend(env_store_dirs.split(':')) 

5027 

5028 if use_config: 

5029 c = config.config() 

5030 self.store_superdirs.extend(c.gf_store_superdirs) 

5031 self.store_dirs.extend(c.gf_store_dirs) 

5032 

5033 self._check_store_dirs_type() 

5034 self._id_to_store_dir = {} 

5035 self._open_stores = {} 

5036 self._effective_default_store_id = None 

5037 

5038 def _check_store_dirs_type(self): 

5039 for sdir in ['store_dirs', 'store_superdirs']: 

5040 if not isinstance(self.__getattribute__(sdir), list): 

5041 raise TypeError('{} of {} is not of type list'.format( 

5042 sdir, self.__class__.__name__)) 

5043 

5044 def _get_store_id(self, store_dir): 

5045 store_ = store.Store(store_dir) 

5046 store_id = store_.config.id 

5047 store_.close() 

5048 return store_id 

5049 

5050 def _looks_like_store_dir(self, store_dir): 

5051 return os.path.isdir(store_dir) and \ 

5052 all(os.path.isfile(pjoin(store_dir, x)) for x in 

5053 ('index', 'traces', 'config')) 

5054 

5055 def iter_store_dirs(self): 

5056 store_dirs = set() 

5057 for d in self.store_superdirs: 

5058 if not os.path.exists(d): 

5059 logger.warning('store_superdir not available: %s' % d) 

5060 continue 

5061 

5062 for entry in os.listdir(d): 

5063 store_dir = os.path.realpath(pjoin(d, entry)) 

5064 if self._looks_like_store_dir(store_dir): 

5065 store_dirs.add(store_dir) 

5066 

5067 for store_dir in self.store_dirs: 

5068 store_dirs.add(os.path.realpath(store_dir)) 

5069 

5070 return store_dirs 

5071 

5072 def _scan_stores(self): 

5073 for store_dir in self.iter_store_dirs(): 

5074 store_id = self._get_store_id(store_dir) 

5075 if store_id not in self._id_to_store_dir: 

5076 self._id_to_store_dir[store_id] = store_dir 

5077 else: 

5078 if store_dir != self._id_to_store_dir[store_id]: 

5079 raise DuplicateStoreId( 

5080 'GF store ID %s is used in (at least) two ' 

5081 'different stores. Locations are: %s and %s' % 

5082 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5083 

5084 def get_store_dir(self, store_id): 

5085 ''' 

5086 Lookup directory given a GF store ID. 

5087 ''' 

5088 

5089 if store_id not in self._id_to_store_dir: 

5090 self._scan_stores() 

5091 

5092 if store_id not in self._id_to_store_dir: 

5093 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5094 

5095 return self._id_to_store_dir[store_id] 

5096 

5097 def get_store_ids(self): 

5098 ''' 

5099 Get list of available store IDs. 

5100 ''' 

5101 

5102 self._scan_stores() 

5103 return sorted(self._id_to_store_dir.keys()) 

5104 

5105 def effective_default_store_id(self): 

5106 if self._effective_default_store_id is None: 

5107 if self.default_store_id is None: 

5108 store_ids = self.get_store_ids() 

5109 if len(store_ids) == 1: 

5110 self._effective_default_store_id = self.get_store_ids()[0] 

5111 else: 

5112 raise NoDefaultStoreSet() 

5113 else: 

5114 self._effective_default_store_id = self.default_store_id 

5115 

5116 return self._effective_default_store_id 

5117 

5118 def get_store(self, store_id=None): 

5119 ''' 

5120 Get a store from the engine. 

5121 

5122 :param store_id: identifier of the store (optional) 

5123 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5124 

5125 If no ``store_id`` is provided the store 

5126 associated with the :py:gattr:`default_store_id` is returned. 

5127 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5128 undefined. 

5129 ''' 

5130 

5131 if store_id is None: 

5132 store_id = self.effective_default_store_id() 

5133 

5134 if store_id not in self._open_stores: 

5135 store_dir = self.get_store_dir(store_id) 

5136 self._open_stores[store_id] = store.Store(store_dir) 

5137 

5138 return self._open_stores[store_id] 

5139 

5140 def get_store_config(self, store_id): 

5141 store = self.get_store(store_id) 

5142 return store.config 

5143 

5144 def get_store_extra(self, store_id, key): 

5145 store = self.get_store(store_id) 

5146 return store.get_extra(key) 

5147 

5148 def close_cashed_stores(self): 

5149 ''' 

5150 Close and remove ids from cashed stores. 

5151 ''' 

5152 store_ids = [] 

5153 for store_id, store_ in self._open_stores.items(): 

5154 store_.close() 

5155 store_ids.append(store_id) 

5156 

5157 for store_id in store_ids: 

5158 self._open_stores.pop(store_id) 

5159 

5160 def get_rule(self, source, target): 

5161 cprovided = self.get_store(target.store_id).get_provided_components() 

5162 

5163 if isinstance(target, StaticTarget): 

5164 quantity = target.quantity 

5165 available_rules = static_rules 

5166 elif isinstance(target, Target): 

5167 quantity = target.effective_quantity() 

5168 available_rules = channel_rules 

5169 

5170 try: 

5171 for rule in available_rules[quantity]: 

5172 cneeded = rule.required_components(target) 

5173 if all(c in cprovided for c in cneeded): 

5174 return rule 

5175 

5176 except KeyError: 

5177 pass 

5178 

5179 raise BadRequest( 

5180 'No rule to calculate "%s" with GFs from store "%s" ' 

5181 'for source model "%s".' % ( 

5182 target.effective_quantity(), 

5183 target.store_id, 

5184 source.__class__.__name__)) 

5185 

5186 def _cached_discretize_basesource(self, source, store, cache, target): 

5187 if (source, store) not in cache: 

5188 cache[source, store] = source.discretize_basesource(store, target) 

5189 

5190 return cache[source, store] 

5191 

5192 def base_seismograms(self, source, targets, components, dsource_cache, 

5193 nthreads=0): 

5194 

5195 target = targets[0] 

5196 

5197 interp = set([t.interpolation for t in targets]) 

5198 if len(interp) > 1: 

5199 raise BadRequest('Targets have different interpolation schemes.') 

5200 

5201 rates = set([t.sample_rate for t in targets]) 

5202 if len(rates) > 1: 

5203 raise BadRequest('Targets have different sample rates.') 

5204 

5205 store_ = self.get_store(target.store_id) 

5206 receivers = [t.receiver(store_) for t in targets] 

5207 

5208 if target.sample_rate is not None: 

5209 deltat = 1. / target.sample_rate 

5210 rate = target.sample_rate 

5211 else: 

5212 deltat = None 

5213 rate = store_.config.sample_rate 

5214 

5215 tmin = num.fromiter( 

5216 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5217 tmax = num.fromiter( 

5218 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5219 

5220 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax)) 

5221 

5222 itmin = num.zeros_like(tmin, dtype=num.int64) 

5223 itmax = num.zeros_like(tmin, dtype=num.int64) 

5224 nsamples = num.full_like(tmin, -1, dtype=num.int64) 

5225 

5226 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64) 

5227 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64) 

5228 nsamples = itmax - itmin + 1 

5229 nsamples[num.logical_not(mask)] = -1 

5230 

5231 base_source = self._cached_discretize_basesource( 

5232 source, store_, dsource_cache, target) 

5233 

5234 base_seismograms = store_.calc_seismograms( 

5235 base_source, receivers, components, 

5236 deltat=deltat, 

5237 itmin=itmin, nsamples=nsamples, 

5238 interpolation=target.interpolation, 

5239 optimization=target.optimization, 

5240 nthreads=nthreads) 

5241 

5242 for i, base_seismogram in enumerate(base_seismograms): 

5243 base_seismograms[i] = store.make_same_span(base_seismogram) 

5244 

5245 return base_seismograms 

5246 

5247 def base_seismogram(self, source, target, components, dsource_cache, 

5248 nthreads): 

5249 

5250 tcounters = [xtime()] 

5251 

5252 store_ = self.get_store(target.store_id) 

5253 receiver = target.receiver(store_) 

5254 

5255 if target.tmin and target.tmax is not None: 

5256 rate = store_.config.sample_rate 

5257 itmin = int(num.floor(target.tmin * rate)) 

5258 itmax = int(num.ceil(target.tmax * rate)) 

5259 nsamples = itmax - itmin + 1 

5260 else: 

5261 itmin = None 

5262 nsamples = None 

5263 

5264 tcounters.append(xtime()) 

5265 base_source = self._cached_discretize_basesource( 

5266 source, store_, dsource_cache, target) 

5267 

5268 tcounters.append(xtime()) 

5269 

5270 if target.sample_rate is not None: 

5271 deltat = 1. / target.sample_rate 

5272 else: 

5273 deltat = None 

5274 

5275 base_seismogram = store_.seismogram( 

5276 base_source, receiver, components, 

5277 deltat=deltat, 

5278 itmin=itmin, nsamples=nsamples, 

5279 interpolation=target.interpolation, 

5280 optimization=target.optimization, 

5281 nthreads=nthreads) 

5282 

5283 tcounters.append(xtime()) 

5284 

5285 base_seismogram = store.make_same_span(base_seismogram) 

5286 

5287 tcounters.append(xtime()) 

5288 

5289 return base_seismogram, tcounters 

5290 

5291 def base_statics(self, source, target, components, nthreads): 

5292 tcounters = [xtime()] 

5293 store_ = self.get_store(target.store_id) 

5294 

5295 if target.tsnapshot is not None: 

5296 rate = store_.config.sample_rate 

5297 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5298 else: 

5299 itsnapshot = None 

5300 tcounters.append(xtime()) 

5301 

5302 base_source = source.discretize_basesource(store_, target=target) 

5303 

5304 tcounters.append(xtime()) 

5305 

5306 base_statics = store_.statics( 

5307 base_source, 

5308 target, 

5309 itsnapshot, 

5310 components, 

5311 target.interpolation, 

5312 nthreads) 

5313 

5314 tcounters.append(xtime()) 

5315 

5316 return base_statics, tcounters 

5317 

5318 def _post_process_dynamic(self, base_seismogram, source, target): 

5319 base_any = next(iter(base_seismogram.values())) 

5320 deltat = base_any.deltat 

5321 itmin = base_any.itmin 

5322 

5323 rule = self.get_rule(source, target) 

5324 data = rule.apply_(target, base_seismogram) 

5325 

5326 factor = source.get_factor() * target.get_factor() 

5327 if factor != 1.0: 

5328 data = data * factor 

5329 

5330 stf = source.effective_stf_post() 

5331 

5332 times, amplitudes = stf.discretize_t( 

5333 deltat, 0.0) 

5334 

5335 # repeat end point to prevent boundary effects 

5336 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5337 padded_data[:data.size] = data 

5338 padded_data[data.size:] = data[-1] 

5339 data = num.convolve(amplitudes, padded_data) 

5340 

5341 tmin = itmin * deltat + times[0] 

5342 

5343 tr = meta.SeismosizerTrace( 

5344 codes=target.codes, 

5345 data=data[:-amplitudes.size], 

5346 deltat=deltat, 

5347 tmin=tmin) 

5348 

5349 return target.post_process(self, source, tr) 

5350 

5351 def _post_process_statics(self, base_statics, source, starget): 

5352 rule = self.get_rule(source, starget) 

5353 data = rule.apply_(starget, base_statics) 

5354 

5355 factor = source.get_factor() 

5356 if factor != 1.0: 

5357 for v in data.values(): 

5358 v *= factor 

5359 

5360 return starget.post_process(self, source, base_statics) 

5361 

5362 def process(self, *args, **kwargs): 

5363 ''' 

5364 Process a request. 

5365 

5366 :: 

5367 

5368 process(**kwargs) 

5369 process(request, **kwargs) 

5370 process(sources, targets, **kwargs) 

5371 

5372 The request can be given a a :py:class:`Request` object, or such an 

5373 object is created using ``Request(**kwargs)`` for convenience. 

5374 

5375 :returns: :py:class:`Response` object 

5376 ''' 

5377 

5378 if len(args) not in (0, 1, 2): 

5379 raise BadRequest('Invalid arguments.') 

5380 

5381 if len(args) == 1: 

5382 kwargs['request'] = args[0] 

5383 

5384 elif len(args) == 2: 

5385 kwargs.update(Request.args2kwargs(args)) 

5386 

5387 request = kwargs.pop('request', None) 

5388 status_callback = kwargs.pop('status_callback', None) 

5389 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5390 

5391 nprocs = kwargs.pop('nprocs', None) 

5392 nthreads = kwargs.pop('nthreads', 1) 

5393 if nprocs is not None: 

5394 nthreads = nprocs 

5395 

5396 if request is None: 

5397 request = Request(**kwargs) 

5398 

5399 if resource: 

5400 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5401 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5402 tt0 = xtime() 

5403 

5404 # make sure stores are open before fork() 

5405 store_ids = set(target.store_id for target in request.targets) 

5406 for store_id in store_ids: 

5407 self.get_store(store_id) 

5408 

5409 source_index = dict((x, i) for (i, x) in 

5410 enumerate(request.sources)) 

5411 target_index = dict((x, i) for (i, x) in 

5412 enumerate(request.targets)) 

5413 

5414 m = request.subrequest_map() 

5415 

5416 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5417 results_list = [] 

5418 

5419 for i in range(len(request.sources)): 

5420 results_list.append([None] * len(request.targets)) 

5421 

5422 tcounters_dyn_list = [] 

5423 tcounters_static_list = [] 

5424 nsub = len(skeys) 

5425 isub = 0 

5426 

5427 # Processing dynamic targets through 

5428 # parimap(process_subrequest_dynamic) 

5429 

5430 if calc_timeseries: 

5431 _process_dynamic = process_dynamic_timeseries 

5432 else: 

5433 _process_dynamic = process_dynamic 

5434 

5435 if request.has_dynamic: 

5436 work_dynamic = [ 

5437 (i, nsub, 

5438 [source_index[source] for source in m[k][0]], 

5439 [target_index[target] for target in m[k][1] 

5440 if not isinstance(target, StaticTarget)]) 

5441 for (i, k) in enumerate(skeys)] 

5442 

5443 for ii_results, tcounters_dyn in _process_dynamic( 

5444 work_dynamic, request.sources, request.targets, self, 

5445 nthreads): 

5446 

5447 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5448 isource, itarget, result = ii_results 

5449 results_list[isource][itarget] = result 

5450 

5451 if status_callback: 

5452 status_callback(isub, nsub) 

5453 

5454 isub += 1 

5455 

5456 # Processing static targets through process_static 

5457 if request.has_statics: 

5458 work_static = [ 

5459 (i, nsub, 

5460 [source_index[source] for source in m[k][0]], 

5461 [target_index[target] for target in m[k][1] 

5462 if isinstance(target, StaticTarget)]) 

5463 for (i, k) in enumerate(skeys)] 

5464 

5465 for ii_results, tcounters_static in process_static( 

5466 work_static, request.sources, request.targets, self, 

5467 nthreads=nthreads): 

5468 

5469 tcounters_static_list.append(num.diff(tcounters_static)) 

5470 isource, itarget, result = ii_results 

5471 results_list[isource][itarget] = result 

5472 

5473 if status_callback: 

5474 status_callback(isub, nsub) 

5475 

5476 isub += 1 

5477 

5478 if status_callback: 

5479 status_callback(nsub, nsub) 

5480 

5481 tt1 = time.time() 

5482 if resource: 

5483 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5484 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5485 

5486 s = ProcessingStats() 

5487 

5488 if request.has_dynamic: 

5489 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5490 t_dyn = float(num.sum(tcumu_dyn)) 

5491 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5492 (s.t_perc_get_store_and_receiver, 

5493 s.t_perc_discretize_source, 

5494 s.t_perc_make_base_seismogram, 

5495 s.t_perc_make_same_span, 

5496 s.t_perc_post_process) = perc_dyn 

5497 else: 

5498 t_dyn = 0. 

5499 

5500 if request.has_statics: 

5501 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5502 t_static = num.sum(tcumu_static) 

5503 perc_static = map(float, tcumu_static / t_static * 100.) 

5504 (s.t_perc_static_get_store, 

5505 s.t_perc_static_discretize_basesource, 

5506 s.t_perc_static_sum_statics, 

5507 s.t_perc_static_post_process) = perc_static 

5508 

5509 s.t_wallclock = tt1 - tt0 

5510 if resource: 

5511 s.t_cpu = ( 

5512 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5513 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5514 s.n_read_blocks = ( 

5515 (rs1.ru_inblock + rc1.ru_inblock) - 

5516 (rs0.ru_inblock + rc0.ru_inblock)) 

5517 

5518 n_records_stacked = 0. 

5519 for results in results_list: 

5520 for result in results: 

5521 if not isinstance(result, meta.Result): 

5522 continue 

5523 shr = float(result.n_shared_stacking) 

5524 n_records_stacked += result.n_records_stacked / shr 

5525 s.t_perc_optimize += result.t_optimize / shr 

5526 s.t_perc_stack += result.t_stack / shr 

5527 s.n_records_stacked = int(n_records_stacked) 

5528 if t_dyn != 0.: 

5529 s.t_perc_optimize /= t_dyn * 100 

5530 s.t_perc_stack /= t_dyn * 100 

5531 

5532 return Response( 

5533 request=request, 

5534 results_list=results_list, 

5535 stats=s) 

5536 

5537 

5538class RemoteEngine(Engine): 

5539 ''' 

5540 Client for remote synthetic seismogram calculator. 

5541 ''' 

5542 

5543 site = String.T(default=ws.g_default_site, optional=True) 

5544 url = String.T(default=ws.g_url, optional=True) 

5545 

5546 def process(self, request=None, status_callback=None, **kwargs): 

5547 

5548 if request is None: 

5549 request = Request(**kwargs) 

5550 

5551 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5552 

5553 

5554g_engine = None 

5555 

5556 

5557def get_engine(store_superdirs=[]): 

5558 global g_engine 

5559 if g_engine is None: 

5560 g_engine = LocalEngine(use_env=True, use_config=True) 

5561 

5562 for d in store_superdirs: 

5563 if d not in g_engine.store_superdirs: 

5564 g_engine.store_superdirs.append(d) 

5565 

5566 return g_engine 

5567 

5568 

5569class SourceGroup(Object): 

5570 

5571 def __getattr__(self, k): 

5572 return num.fromiter((getattr(s, k) for s in self), 

5573 dtype=float) 

5574 

5575 def __iter__(self): 

5576 raise NotImplementedError( 

5577 'This method should be implemented in subclass.') 

5578 

5579 def __len__(self): 

5580 raise NotImplementedError( 

5581 'This method should be implemented in subclass.') 

5582 

5583 

5584class SourceList(SourceGroup): 

5585 sources = List.T(Source.T()) 

5586 

5587 def append(self, s): 

5588 self.sources.append(s) 

5589 

5590 def __iter__(self): 

5591 return iter(self.sources) 

5592 

5593 def __len__(self): 

5594 return len(self.sources) 

5595 

5596 

5597class SourceGrid(SourceGroup): 

5598 

5599 base = Source.T() 

5600 variables = Dict.T(String.T(), Range.T()) 

5601 order = List.T(String.T()) 

5602 

5603 def __len__(self): 

5604 n = 1 

5605 for (k, v) in self.make_coords(self.base): 

5606 n *= len(list(v)) 

5607 

5608 return n 

5609 

5610 def __iter__(self): 

5611 for items in permudef(self.make_coords(self.base)): 

5612 s = self.base.clone(**{k: v for (k, v) in items}) 

5613 s.regularize() 

5614 yield s 

5615 

5616 def ordered_params(self): 

5617 ks = list(self.variables.keys()) 

5618 for k in self.order + list(self.base.keys()): 

5619 if k in ks: 

5620 yield k 

5621 ks.remove(k) 

5622 if ks: 

5623 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5624 (ks[0], self.base.__class__.__name__)) 

5625 

5626 def make_coords(self, base): 

5627 return [(param, self.variables[param].make(base=base[param])) 

5628 for param in self.ordered_params()] 

5629 

5630 

5631source_classes = [ 

5632 Source, 

5633 SourceWithMagnitude, 

5634 SourceWithDerivedMagnitude, 

5635 ExplosionSource, 

5636 RectangularExplosionSource, 

5637 DCSource, 

5638 CLVDSource, 

5639 VLVDSource, 

5640 MTSource, 

5641 RectangularSource, 

5642 PseudoDynamicRupture, 

5643 DoubleDCSource, 

5644 RingfaultSource, 

5645 CombiSource, 

5646 SFSource, 

5647 PorePressurePointSource, 

5648 PorePressureLineSource, 

5649] 

5650 

5651stf_classes = [ 

5652 STF, 

5653 BoxcarSTF, 

5654 TriangularSTF, 

5655 HalfSinusoidSTF, 

5656 ResonatorSTF, 

5657] 

5658 

5659__all__ = ''' 

5660SeismosizerError 

5661BadRequest 

5662NoSuchStore 

5663DerivedMagnitudeError 

5664STFMode 

5665'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5666Request 

5667ProcessingStats 

5668Response 

5669Engine 

5670LocalEngine 

5671RemoteEngine 

5672source_classes 

5673get_engine 

5674Range 

5675SourceGroup 

5676SourceList 

5677SourceGrid 

5678map_anchor 

5679'''.split()