1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28from collections import defaultdict 

29from functools import cmp_to_key 

30import time 

31import math 

32import os 

33import re 

34import logging 

35try: 

36 import resource 

37except ImportError: 

38 resource = None 

39from hashlib import sha1 

40 

41import numpy as num 

42from scipy.interpolate import RegularGridInterpolator 

43 

44from pyrocko.guts import (Object, Float, String, StringChoice, List, 

45 Timestamp, Int, SObject, ArgumentError, Dict, 

46 ValidationError, Bool) 

47from pyrocko.guts_array import Array 

48 

49from pyrocko import moment_tensor as pmt 

50from pyrocko import trace, util, config, model, eikonal_ext 

51from pyrocko.orthodrome import ne_to_latlon 

52from pyrocko.model import Location 

53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

54 okada_ext, invert_fault_dislocations_bem 

55 

56from . import meta, store, ws 

57from .tractions import TractionField, DirectedTractions 

58from .targets import Target, StaticTarget, SatelliteTarget 

59 

60pjoin = os.path.join 

61 

62guts_prefix = 'pf' 

63 

64d2r = math.pi / 180. 

65r2d = 180. / math.pi 

66km = 1e3 

67 

68logger = logging.getLogger('pyrocko.gf.seismosizer') 

69 

70 

71def cmp_none_aware(a, b): 

72 if isinstance(a, tuple) and isinstance(b, tuple): 

73 for xa, xb in zip(a, b): 

74 rv = cmp_none_aware(xa, xb) 

75 if rv != 0: 

76 return rv 

77 

78 return 0 

79 

80 anone = a is None 

81 bnone = b is None 

82 

83 if anone and bnone: 

84 return 0 

85 

86 if anone: 

87 return -1 

88 

89 if bnone: 

90 return 1 

91 

92 return bool(a > b) - bool(a < b) 

93 

94 

95def xtime(): 

96 return time.time() 

97 

98 

99class SeismosizerError(Exception): 

100 pass 

101 

102 

103class BadRequest(SeismosizerError): 

104 pass 

105 

106 

107class DuplicateStoreId(Exception): 

108 pass 

109 

110 

111class NoDefaultStoreSet(Exception): 

112 pass 

113 

114 

115class ConversionError(Exception): 

116 pass 

117 

118 

119class NoSuchStore(BadRequest): 

120 

121 def __init__(self, store_id=None, dirs=None): 

122 BadRequest.__init__(self) 

123 self.store_id = store_id 

124 self.dirs = dirs 

125 

126 def __str__(self): 

127 if self.store_id is not None: 

128 rstr = 'no GF store with id "%s" found.' % self.store_id 

129 else: 

130 rstr = 'GF store not found.' 

131 

132 if self.dirs is not None: 

133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

134 return rstr 

135 

136 

137def ufloat(s): 

138 units = { 

139 'k': 1e3, 

140 'M': 1e6, 

141 } 

142 

143 factor = 1.0 

144 if s and s[-1] in units: 

145 factor = units[s[-1]] 

146 s = s[:-1] 

147 if not s: 

148 raise ValueError('unit without a number: \'%s\'' % s) 

149 

150 return float(s) * factor 

151 

152 

153def ufloat_or_none(s): 

154 if s: 

155 return ufloat(s) 

156 else: 

157 return None 

158 

159 

160def int_or_none(s): 

161 if s: 

162 return int(s) 

163 else: 

164 return None 

165 

166 

167def nonzero(x, eps=1e-15): 

168 return abs(x) > eps 

169 

170 

171def permudef(ln, j=0): 

172 if j < len(ln): 

173 k, v = ln[j] 

174 for y in v: 

175 ln[j] = k, y 

176 for s in permudef(ln, j + 1): 

177 yield s 

178 

179 ln[j] = k, v 

180 return 

181 else: 

182 yield ln 

183 

184 

185def arr(x): 

186 return num.atleast_1d(num.asarray(x)) 

187 

188 

189def discretize_rect_source(deltas, deltat, time, north, east, depth, 

190 strike, dip, length, width, 

191 anchor, velocity=None, stf=None, 

192 nucleation_x=None, nucleation_y=None, 

193 decimation_factor=1, pointsonly=False, 

194 plane_coords=False, 

195 aggressive_oversampling=False): 

196 

197 if stf is None: 

198 stf = STF() 

199 

200 if not velocity and not pointsonly: 

201 raise AttributeError('velocity is required in time mode') 

202 

203 mindeltagf = float(num.min(deltas)) 

204 if velocity: 

205 mindeltagf = min(mindeltagf, deltat * velocity) 

206 

207 ln = length 

208 wd = width 

209 

210 if aggressive_oversampling: 

211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

213 else: 

214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 

217 n = int(nl * nw) 

218 

219 dl = ln / nl 

220 dw = wd / nw 

221 

222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

224 

225 points = num.zeros((n, 3)) 

226 points[:, 0] = num.tile(xl, nw) 

227 points[:, 1] = num.repeat(xw, nl) 

228 

229 if nucleation_x is not None: 

230 dist_x = num.abs(nucleation_x - points[:, 0]) 

231 else: 

232 dist_x = num.zeros(n) 

233 

234 if nucleation_y is not None: 

235 dist_y = num.abs(nucleation_y - points[:, 1]) 

236 else: 

237 dist_y = num.zeros(n) 

238 

239 dist = num.sqrt(dist_x**2 + dist_y**2) 

240 times = dist / velocity 

241 

242 anch_x, anch_y = map_anchor[anchor] 

243 

244 points[:, 0] -= anch_x * 0.5 * length 

245 points[:, 1] -= anch_y * 0.5 * width 

246 

247 if plane_coords: 

248 return points, dl, dw, nl, nw 

249 

250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

251 points = num.dot(rotmat.T, points.T).T 

252 

253 points[:, 0] += north 

254 points[:, 1] += east 

255 points[:, 2] += depth 

256 

257 if pointsonly: 

258 return points, dl, dw, nl, nw 

259 

260 xtau, amplitudes = stf.discretize_t(deltat, time) 

261 nt = xtau.size 

262 

263 points2 = num.repeat(points, nt, axis=0) 

264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

265 amplitudes2 = num.tile(amplitudes, n) 

266 

267 return points2, times2, amplitudes2, dl, dw, nl, nw 

268 

269 

270def check_rect_source_discretisation(points2, nl, nw, store): 

271 # We assume a non-rotated fault plane 

272 N_CRITICAL = 8 

273 points = points2.T.reshape((3, nl, nw)) 

274 if points.size <= N_CRITICAL: 

275 logger.warning('RectangularSource is defined by only %d sub-sources!' 

276 % points.size) 

277 return True 

278 

279 distances = num.sqrt( 

280 (points[0, 0, :] - points[0, 1, :])**2 + 

281 (points[1, 0, :] - points[1, 1, :])**2 + 

282 (points[2, 0, :] - points[2, 1, :])**2) 

283 

284 depths = points[2, 0, :] 

285 vs_profile = store.config.get_vs( 

286 lat=0., lon=0., 

287 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

288 interpolation='multilinear') 

289 

290 min_wavelength = vs_profile * (store.config.deltat * 2) 

291 if not num.all(min_wavelength > distances / 2): 

292 return False 

293 return True 

294 

295 

296def outline_rect_source(strike, dip, length, width, anchor): 

297 ln = length 

298 wd = width 

299 points = num.array( 

300 [[-0.5 * ln, -0.5 * wd, 0.], 

301 [0.5 * ln, -0.5 * wd, 0.], 

302 [0.5 * ln, 0.5 * wd, 0.], 

303 [-0.5 * ln, 0.5 * wd, 0.], 

304 [-0.5 * ln, -0.5 * wd, 0.]]) 

305 

306 anch_x, anch_y = map_anchor[anchor] 

307 points[:, 0] -= anch_x * 0.5 * length 

308 points[:, 1] -= anch_y * 0.5 * width 

309 

310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

311 

312 return num.dot(rotmat.T, points.T).T 

313 

314 

315def from_plane_coords( 

316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

317 lat=0., lon=0., 

318 north_shift=0, east_shift=0, 

319 anchor='top', cs='xy'): 

320 

321 ln = length 

322 wd = width 

323 x_abs = [] 

324 y_abs = [] 

325 if not isinstance(x_plane_coords, list): 

326 x_plane_coords = [x_plane_coords] 

327 y_plane_coords = [y_plane_coords] 

328 

329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

330 points = num.array( 

331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

336 

337 anch_x, anch_y = map_anchor[anchor] 

338 points[:, 0] -= anch_x * 0.5 * length 

339 points[:, 1] -= anch_y * 0.5 * width 

340 

341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

342 

343 points = num.dot(rotmat.T, points.T).T 

344 points[:, 0] += north_shift 

345 points[:, 1] += east_shift 

346 points[:, 2] += depth 

347 if cs in ('latlon', 'lonlat'): 

348 latlon = ne_to_latlon(lat, lon, 

349 points[:, 0], points[:, 1]) 

350 latlon = num.array(latlon).T 

351 x_abs.append(latlon[1:2, 1]) 

352 y_abs.append(latlon[2:3, 0]) 

353 if cs == 'xy': 

354 x_abs.append(points[1:2, 1]) 

355 y_abs.append(points[2:3, 0]) 

356 

357 if cs == 'lonlat': 

358 return y_abs, x_abs 

359 else: 

360 return x_abs, y_abs 

361 

362 

363def points_on_rect_source( 

364 strike, dip, length, width, anchor, 

365 discretized_basesource=None, points_x=None, points_y=None): 

366 

367 ln = length 

368 wd = width 

369 

370 if isinstance(points_x, list) or isinstance(points_x, float): 

371 points_x = num.array([points_x]) 

372 if isinstance(points_y, list) or isinstance(points_y, float): 

373 points_y = num.array([points_y]) 

374 

375 if discretized_basesource: 

376 ds = discretized_basesource 

377 

378 nl_patches = ds.nl + 1 

379 nw_patches = ds.nw + 1 

380 

381 npoints = nl_patches * nw_patches 

382 points = num.zeros((npoints, 3)) 

383 ln_patches = num.array([il for il in range(nl_patches)]) 

384 wd_patches = num.array([iw for iw in range(nw_patches)]) 

385 

386 points_ln =\ 

387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

388 points_wd =\ 

389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

390 

391 for il in range(nl_patches): 

392 for iw in range(nw_patches): 

393 points[il * nw_patches + iw, :] = num.array([ 

394 points_ln[il] * ln * 0.5, 

395 points_wd[iw] * wd * 0.5, 0.0]) 

396 

397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

398 points = num.zeros(shape=((len(points_x), 3))) 

399 for i, (x, y) in enumerate(zip(points_x, points_y)): 

400 points[i, :] = num.array( 

401 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

402 

403 anch_x, anch_y = map_anchor[anchor] 

404 

405 points[:, 0] -= anch_x * 0.5 * ln 

406 points[:, 1] -= anch_y * 0.5 * wd 

407 

408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

409 

410 return num.dot(rotmat.T, points.T).T 

411 

412 

413class InvalidGridDef(Exception): 

414 pass 

415 

416 

417class Range(SObject): 

418 ''' 

419 Convenient range specification. 

420 

421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

422 

423 Range('0 .. 10k : 1k') 

424 Range(start=0., stop=10e3, step=1e3) 

425 Range(0, 10e3, 1e3) 

426 Range('0 .. 10k @ 11') 

427 Range(start=0., stop=10*km, n=11) 

428 

429 Range(0, 10e3, n=11) 

430 Range(values=[x*1e3 for x in range(11)]) 

431 

432 Depending on the use context, it can be possible to omit any part of the 

433 specification. E.g. in the context of extracting a subset of an already 

434 existing range, the existing range's specification values would be filled 

435 in where missing. 

436 

437 The values are distributed with equal spacing, unless the ``spacing`` 

438 argument is modified. The values can be created offset or relative to an 

439 external base value with the ``relative`` argument if the use context 

440 supports this. 

441 

442 The range specification can be expressed with a short string 

443 representation:: 

444 

445 'start .. stop @ num | spacing, relative' 

446 'start .. stop : step | spacing, relative' 

447 

448 most parts of the expression can be omitted if not needed. Whitespace is 

449 allowed for readability but can also be omitted. 

450 ''' 

451 

452 start = Float.T(optional=True) 

453 stop = Float.T(optional=True) 

454 step = Float.T(optional=True) 

455 n = Int.T(optional=True) 

456 values = Array.T(optional=True, dtype=float, shape=(None,)) 

457 

458 spacing = StringChoice.T( 

459 choices=['lin', 'log', 'symlog'], 

460 default='lin', 

461 optional=True) 

462 

463 relative = StringChoice.T( 

464 choices=['', 'add', 'mult'], 

465 default='', 

466 optional=True) 

467 

468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

470 r'(\|(?P<stuff>.+))?$') 

471 

472 def __init__(self, *args, **kwargs): 

473 d = {} 

474 if len(args) == 1: 

475 d = self.parse(args[0]) 

476 elif len(args) in (2, 3): 

477 d['start'], d['stop'] = [float(x) for x in args[:2]] 

478 if len(args) == 3: 

479 d['step'] = float(args[2]) 

480 

481 for k, v in kwargs.items(): 

482 if k in d: 

483 raise ArgumentError('%s specified more than once' % k) 

484 

485 d[k] = v 

486 

487 SObject.__init__(self, **d) 

488 

489 def __str__(self): 

490 def sfloat(x): 

491 if x is not None: 

492 return '%g' % x 

493 else: 

494 return '' 

495 

496 if self.values: 

497 return ','.join('%g' % x for x in self.values) 

498 

499 if self.start is None and self.stop is None: 

500 s0 = '' 

501 else: 

502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

503 

504 s1 = '' 

505 if self.step is not None: 

506 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

507 elif self.n is not None: 

508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

509 

510 if self.spacing == 'lin' and self.relative == '': 

511 s2 = '' 

512 else: 

513 x = [] 

514 if self.spacing != 'lin': 

515 x.append(self.spacing) 

516 

517 if self.relative != '': 

518 x.append(self.relative) 

519 

520 s2 = ' | %s' % ','.join(x) 

521 

522 return s0 + s1 + s2 

523 

524 @classmethod 

525 def parse(cls, s): 

526 s = re.sub(r'\s+', '', s) 

527 m = cls.pattern.match(s) 

528 if not m: 

529 try: 

530 vals = [ufloat(x) for x in s.split(',')] 

531 except Exception: 

532 raise InvalidGridDef( 

533 '"%s" is not a valid range specification' % s) 

534 

535 return dict(values=num.array(vals, dtype=float)) 

536 

537 d = m.groupdict() 

538 try: 

539 start = ufloat_or_none(d['start']) 

540 stop = ufloat_or_none(d['stop']) 

541 step = ufloat_or_none(d['step']) 

542 n = int_or_none(d['n']) 

543 except Exception: 

544 raise InvalidGridDef( 

545 '"%s" is not a valid range specification' % s) 

546 

547 spacing = 'lin' 

548 relative = '' 

549 

550 if d['stuff'] is not None: 

551 t = d['stuff'].split(',') 

552 for x in t: 

553 if x in cls.spacing.choices: 

554 spacing = x 

555 elif x and x in cls.relative.choices: 

556 relative = x 

557 else: 

558 raise InvalidGridDef( 

559 '"%s" is not a valid range specification' % s) 

560 

561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

562 relative=relative) 

563 

564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

565 if self.values: 

566 return self.values 

567 

568 start = self.start 

569 stop = self.stop 

570 step = self.step 

571 n = self.n 

572 

573 swap = step is not None and step < 0. 

574 if start is None: 

575 start = [mi, ma][swap] 

576 if stop is None: 

577 stop = [ma, mi][swap] 

578 if step is None and inc is not None: 

579 step = [inc, -inc][ma < mi] 

580 

581 if start is None or stop is None: 

582 raise InvalidGridDef( 

583 'Cannot use range specification "%s" without start ' 

584 'and stop in this context' % self) 

585 

586 if step is None and n is None: 

587 step = stop - start 

588 

589 if n is None: 

590 if (step < 0) != (stop - start < 0): 

591 raise InvalidGridDef( 

592 'Range specification "%s" has inconsistent ordering ' 

593 '(step < 0 => stop > start)' % self) 

594 

595 n = int(round((stop - start) / step)) + 1 

596 stop2 = start + (n - 1) * step 

597 if abs(stop - stop2) > eps: 

598 n = int(math.floor((stop - start) / step)) + 1 

599 stop = start + (n - 1) * step 

600 else: 

601 stop = stop2 

602 

603 if start == stop: 

604 n = 1 

605 

606 if self.spacing == 'lin': 

607 vals = num.linspace(start, stop, n) 

608 

609 elif self.spacing in ('log', 'symlog'): 

610 if start > 0. and stop > 0.: 

611 vals = num.exp(num.linspace(num.log(start), 

612 num.log(stop), n)) 

613 elif start < 0. and stop < 0.: 

614 vals = -num.exp(num.linspace(num.log(-start), 

615 num.log(-stop), n)) 

616 else: 

617 raise InvalidGridDef( 

618 'Log ranges should not include or cross zero ' 

619 '(in range specification "%s").' % self) 

620 

621 if self.spacing == 'symlog': 

622 nvals = - vals 

623 vals = num.concatenate((nvals[::-1], vals)) 

624 

625 if self.relative in ('add', 'mult') and base is None: 

626 raise InvalidGridDef( 

627 'Cannot use relative range specification in this context.') 

628 

629 vals = self.make_relative(base, vals) 

630 

631 return list(map(float, vals)) 

632 

633 def make_relative(self, base, vals): 

634 if self.relative == 'add': 

635 vals += base 

636 

637 if self.relative == 'mult': 

638 vals *= base 

639 

640 return vals 

641 

642 

643class GridDefElement(Object): 

644 

645 param = meta.StringID.T() 

646 rs = Range.T() 

647 

648 def __init__(self, shorthand=None, **kwargs): 

649 if shorthand is not None: 

650 t = shorthand.split('=') 

651 if len(t) != 2: 

652 raise InvalidGridDef( 

653 'Invalid grid specification element: %s' % shorthand) 

654 

655 sp, sr = t[0].strip(), t[1].strip() 

656 

657 kwargs['param'] = sp 

658 kwargs['rs'] = Range(sr) 

659 

660 Object.__init__(self, **kwargs) 

661 

662 def shorthand(self): 

663 return self.param + ' = ' + str(self.rs) 

664 

665 

666class GridDef(Object): 

667 

668 elements = List.T(GridDefElement.T()) 

669 

670 def __init__(self, shorthand=None, **kwargs): 

671 if shorthand is not None: 

672 t = shorthand.splitlines() 

673 tt = [] 

674 for x in t: 

675 x = x.strip() 

676 if x: 

677 tt.extend(x.split(';')) 

678 

679 elements = [] 

680 for se in tt: 

681 elements.append(GridDef(se)) 

682 

683 kwargs['elements'] = elements 

684 

685 Object.__init__(self, **kwargs) 

686 

687 def shorthand(self): 

688 return '; '.join(str(x) for x in self.elements) 

689 

690 

691class Cloneable(object): 

692 

693 def __iter__(self): 

694 return iter(self.T.propnames) 

695 

696 def __getitem__(self, k): 

697 if k not in self.keys(): 

698 raise KeyError(k) 

699 

700 return getattr(self, k) 

701 

702 def __setitem__(self, k, v): 

703 if k not in self.keys(): 

704 raise KeyError(k) 

705 

706 return setattr(self, k, v) 

707 

708 def clone(self, **kwargs): 

709 ''' 

710 Make a copy of the object. 

711 

712 A new object of the same class is created and initialized with the 

713 parameters of the object on which this method is called on. If 

714 ``kwargs`` are given, these are used to override any of the 

715 initialization parameters. 

716 ''' 

717 

718 d = dict(self) 

719 for k in d: 

720 v = d[k] 

721 if isinstance(v, Cloneable): 

722 d[k] = v.clone() 

723 

724 d.update(kwargs) 

725 return self.__class__(**d) 

726 

727 @classmethod 

728 def keys(cls): 

729 ''' 

730 Get list of the source model's parameter names. 

731 ''' 

732 

733 return cls.T.propnames 

734 

735 

736class STF(Object, Cloneable): 

737 

738 ''' 

739 Base class for source time functions. 

740 ''' 

741 

742 def __init__(self, effective_duration=None, **kwargs): 

743 if effective_duration is not None: 

744 kwargs['duration'] = effective_duration / \ 

745 self.factor_duration_to_effective() 

746 

747 Object.__init__(self, **kwargs) 

748 

749 @classmethod 

750 def factor_duration_to_effective(cls): 

751 return 1.0 

752 

753 def centroid_time(self, tref): 

754 return tref 

755 

756 @property 

757 def effective_duration(self): 

758 return self.duration * self.factor_duration_to_effective() 

759 

760 def discretize_t(self, deltat, tref): 

761 tl = math.floor(tref / deltat) * deltat 

762 th = math.ceil(tref / deltat) * deltat 

763 if tl == th: 

764 return num.array([tl], dtype=float), num.ones(1) 

765 else: 

766 return ( 

767 num.array([tl, th], dtype=float), 

768 num.array([th - tref, tref - tl], dtype=float) / deltat) 

769 

770 def base_key(self): 

771 return (type(self).__name__,) 

772 

773 

774g_unit_pulse = STF() 

775 

776 

777def sshift(times, amplitudes, tshift, deltat): 

778 

779 t0 = math.floor(tshift / deltat) * deltat 

780 t1 = math.ceil(tshift / deltat) * deltat 

781 if t0 == t1: 

782 return times, amplitudes 

783 

784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

785 

786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

788 

789 times2 = num.arange(times.size + 1, dtype=float) * \ 

790 deltat + times[0] + t0 

791 

792 return times2, amplitudes2 

793 

794 

795class BoxcarSTF(STF): 

796 

797 ''' 

798 Boxcar type source time function. 

799 

800 .. figure :: /static/stf-BoxcarSTF.svg 

801 :width: 40% 

802 :align: center 

803 :alt: boxcar source time function 

804 ''' 

805 

806 duration = Float.T( 

807 default=0.0, 

808 help='duration of the boxcar') 

809 

810 anchor = Float.T( 

811 default=0.0, 

812 help='anchor point with respect to source.time: (' 

813 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

816 

817 @classmethod 

818 def factor_duration_to_effective(cls): 

819 return 1.0 

820 

821 def centroid_time(self, tref): 

822 return tref - 0.5 * self.duration * self.anchor 

823 

824 def discretize_t(self, deltat, tref): 

825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

827 tmin = round(tmin_stf / deltat) * deltat 

828 tmax = round(tmax_stf / deltat) * deltat 

829 nt = int(round((tmax - tmin) / deltat)) + 1 

830 times = num.linspace(tmin, tmax, nt) 

831 amplitudes = num.ones_like(times) 

832 if times.size > 1: 

833 t_edges = num.linspace( 

834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

835 t = tmin_stf + self.duration * num.array( 

836 [0.0, 0.0, 1.0, 1.0], dtype=float) 

837 f = num.array([0., 1., 1., 0.], dtype=float) 

838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

839 amplitudes /= num.sum(amplitudes) 

840 

841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

842 

843 return sshift(times, amplitudes, -tshift, deltat) 

844 

845 def base_key(self): 

846 return (type(self).__name__, self.duration, self.anchor) 

847 

848 

849class TriangularSTF(STF): 

850 

851 ''' 

852 Triangular type source time function. 

853 

854 .. figure :: /static/stf-TriangularSTF.svg 

855 :width: 40% 

856 :align: center 

857 :alt: triangular source time function 

858 ''' 

859 

860 duration = Float.T( 

861 default=0.0, 

862 help='baseline of the triangle') 

863 

864 peak_ratio = Float.T( 

865 default=0.5, 

866 help='fraction of time compared to duration, ' 

867 'when the maximum amplitude is reached') 

868 

869 anchor = Float.T( 

870 default=0.0, 

871 help='anchor point with respect to source.time: (' 

872 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

875 

876 @classmethod 

877 def factor_duration_to_effective(cls, peak_ratio=None): 

878 if peak_ratio is None: 

879 peak_ratio = cls.peak_ratio.default() 

880 

881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

882 

883 def __init__(self, effective_duration=None, **kwargs): 

884 if effective_duration is not None: 

885 kwargs['duration'] = effective_duration / \ 

886 self.factor_duration_to_effective( 

887 kwargs.get('peak_ratio', None)) 

888 

889 STF.__init__(self, **kwargs) 

890 

891 @property 

892 def centroid_ratio(self): 

893 ra = self.peak_ratio 

894 rb = 1.0 - ra 

895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

896 

897 def centroid_time(self, tref): 

898 ca = self.centroid_ratio 

899 cb = 1.0 - ca 

900 if self.anchor <= 0.: 

901 return tref - ca * self.duration * self.anchor 

902 else: 

903 return tref - cb * self.duration * self.anchor 

904 

905 @property 

906 def effective_duration(self): 

907 return self.duration * self.factor_duration_to_effective( 

908 self.peak_ratio) 

909 

910 def tminmax_stf(self, tref): 

911 ca = self.centroid_ratio 

912 cb = 1.0 - ca 

913 if self.anchor <= 0.: 

914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

915 tmax_stf = tmin_stf + self.duration 

916 else: 

917 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

918 tmin_stf = tmax_stf - self.duration 

919 

920 return tmin_stf, tmax_stf 

921 

922 def discretize_t(self, deltat, tref): 

923 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

924 

925 tmin = round(tmin_stf / deltat) * deltat 

926 tmax = round(tmax_stf / deltat) * deltat 

927 nt = int(round((tmax - tmin) / deltat)) + 1 

928 if nt > 1: 

929 t_edges = num.linspace( 

930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

931 t = tmin_stf + self.duration * num.array( 

932 [0.0, self.peak_ratio, 1.0], dtype=float) 

933 f = num.array([0., 1., 0.], dtype=float) 

934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

935 amplitudes /= num.sum(amplitudes) 

936 else: 

937 amplitudes = num.ones(1) 

938 

939 times = num.linspace(tmin, tmax, nt) 

940 return times, amplitudes 

941 

942 def base_key(self): 

943 return ( 

944 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

945 

946 

947class HalfSinusoidSTF(STF): 

948 

949 ''' 

950 Half sinusoid type source time function. 

951 

952 .. figure :: /static/stf-HalfSinusoidSTF.svg 

953 :width: 40% 

954 :align: center 

955 :alt: half-sinusouid source time function 

956 ''' 

957 

958 duration = Float.T( 

959 default=0.0, 

960 help='duration of the half-sinusoid (baseline)') 

961 

962 anchor = Float.T( 

963 default=0.0, 

964 help='anchor point with respect to source.time: (' 

965 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

968 

969 exponent = Int.T( 

970 default=1, 

971 help='set to 2 to use square of the half-period sinusoidal function.') 

972 

973 def __init__(self, effective_duration=None, **kwargs): 

974 if effective_duration is not None: 

975 kwargs['duration'] = effective_duration / \ 

976 self.factor_duration_to_effective( 

977 kwargs.get('exponent', 1)) 

978 

979 STF.__init__(self, **kwargs) 

980 

981 @classmethod 

982 def factor_duration_to_effective(cls, exponent): 

983 if exponent == 1: 

984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

985 elif exponent == 2: 

986 return math.sqrt(math.pi**2 - 6) / math.pi 

987 else: 

988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

989 

990 @property 

991 def effective_duration(self): 

992 return self.duration * self.factor_duration_to_effective(self.exponent) 

993 

994 def centroid_time(self, tref): 

995 return tref - 0.5 * self.duration * self.anchor 

996 

997 def discretize_t(self, deltat, tref): 

998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1000 tmin = round(tmin_stf / deltat) * deltat 

1001 tmax = round(tmax_stf / deltat) * deltat 

1002 nt = int(round((tmax - tmin) / deltat)) + 1 

1003 if nt > 1: 

1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1006 

1007 if self.exponent == 1: 

1008 fint = -num.cos( 

1009 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1010 

1011 elif self.exponent == 2: 

1012 fint = (t_edges - tmin_stf) / self.duration \ 

1013 - 1.0 / (2.0 * math.pi) * num.sin( 

1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1015 else: 

1016 raise ValueError( 

1017 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1018 

1019 amplitudes = fint[1:] - fint[:-1] 

1020 amplitudes /= num.sum(amplitudes) 

1021 else: 

1022 amplitudes = num.ones(1) 

1023 

1024 times = num.linspace(tmin, tmax, nt) 

1025 return times, amplitudes 

1026 

1027 def base_key(self): 

1028 return (type(self).__name__, self.duration, self.anchor) 

1029 

1030 

1031class SmoothRampSTF(STF): 

1032 ''' 

1033 Smooth-ramp type source time function for near-field displacement. 

1034 Based on moment function of double-couple point source proposed by Bruestle 

1035 and Mueller (PEPI, 1983). 

1036 

1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1038 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1039 312-324. 

1040 

1041 .. figure :: /static/stf-SmoothRampSTF.svg 

1042 :width: 40% 

1043 :alt: smooth ramp source time function 

1044 ''' 

1045 duration = Float.T( 

1046 default=0.0, 

1047 help='duration of the ramp (baseline)') 

1048 

1049 rise_ratio = Float.T( 

1050 default=0.5, 

1051 help='fraction of time compared to duration, ' 

1052 'when the maximum amplitude is reached') 

1053 

1054 anchor = Float.T( 

1055 default=0.0, 

1056 help='anchor point with respect to source.time: (' 

1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1060 

1061 def discretize_t(self, deltat, tref): 

1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1064 tmin = round(tmin_stf / deltat) * deltat 

1065 tmax = round(tmax_stf / deltat) * deltat 

1066 D = round((tmax - tmin) / deltat) * deltat 

1067 nt = int(round(D / deltat)) + 1 

1068 times = num.linspace(tmin, tmax, nt) 

1069 if nt > 1: 

1070 rise_time = self.rise_ratio * self.duration 

1071 amplitudes = num.ones_like(times) 

1072 tp = tmin + rise_time 

1073 ii = num.where(times <= tp) 

1074 t_inc = times[ii] 

1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1078 

1079 amplitudes /= num.sum(amplitudes) 

1080 else: 

1081 amplitudes = num.ones(1) 

1082 

1083 return times, amplitudes 

1084 

1085 def base_key(self): 

1086 return (type(self).__name__, 

1087 self.duration, self.rise_ratio, self.anchor) 

1088 

1089 

1090class ResonatorSTF(STF): 

1091 ''' 

1092 Simple resonator like source time function. 

1093 

1094 .. math :: 

1095 

1096 f(t) = 0 for t < 0 

1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1098 

1099 

1100 .. figure :: /static/stf-SmoothRampSTF.svg 

1101 :width: 40% 

1102 :alt: smooth ramp source time function 

1103 

1104 ''' 

1105 

1106 duration = Float.T( 

1107 default=0.0, 

1108 help='decay time') 

1109 

1110 frequency = Float.T( 

1111 default=1.0, 

1112 help='resonance frequency') 

1113 

1114 def discretize_t(self, deltat, tref): 

1115 tmin_stf = tref 

1116 tmax_stf = tref + self.duration * 3 

1117 tmin = math.floor(tmin_stf / deltat) * deltat 

1118 tmax = math.ceil(tmax_stf / deltat) * deltat 

1119 times = util.arange2(tmin, tmax, deltat) 

1120 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1122 

1123 return times, amplitudes 

1124 

1125 def base_key(self): 

1126 return (type(self).__name__, 

1127 self.duration, self.frequency) 

1128 

1129 

1130class STFMode(StringChoice): 

1131 choices = ['pre', 'post'] 

1132 

1133 

1134class Source(Location, Cloneable): 

1135 ''' 

1136 Base class for all source models. 

1137 ''' 

1138 

1139 name = String.T(optional=True, default='') 

1140 

1141 time = Timestamp.T( 

1142 default=Timestamp.D('1970-01-01 00:00:00'), 

1143 help='source origin time.') 

1144 

1145 stf = STF.T( 

1146 optional=True, 

1147 help='source time function.') 

1148 

1149 stf_mode = STFMode.T( 

1150 default='post', 

1151 help='whether to apply source time function in pre or ' 

1152 'post-processing.') 

1153 

1154 def __init__(self, **kwargs): 

1155 Location.__init__(self, **kwargs) 

1156 

1157 def update(self, **kwargs): 

1158 ''' 

1159 Change some of the source models parameters. 

1160 

1161 Example:: 

1162 

1163 >>> from pyrocko import gf 

1164 >>> s = gf.DCSource() 

1165 >>> s.update(strike=66., dip=33.) 

1166 >>> print(s) 

1167 --- !pf.DCSource 

1168 depth: 0.0 

1169 time: 1970-01-01 00:00:00 

1170 magnitude: 6.0 

1171 strike: 66.0 

1172 dip: 33.0 

1173 rake: 0.0 

1174 

1175 ''' 

1176 

1177 for (k, v) in kwargs.items(): 

1178 self[k] = v 

1179 

1180 def grid(self, **variables): 

1181 ''' 

1182 Create grid of source model variations. 

1183 

1184 :returns: :py:class:`SourceGrid` instance. 

1185 

1186 Example:: 

1187 

1188 >>> from pyrocko import gf 

1189 >>> base = DCSource() 

1190 >>> R = gf.Range 

1191 >>> for s in base.grid(R(' 

1192 

1193 ''' 

1194 return SourceGrid(base=self, variables=variables) 

1195 

1196 def base_key(self): 

1197 ''' 

1198 Get key to decide about source discretization / GF stack sharing. 

1199 

1200 When two source models differ only in amplitude and origin time, the 

1201 discretization and the GF stacking can be done only once for a unit 

1202 amplitude and a zero origin time and the amplitude and origin times of 

1203 the seismograms can be applied during post-processing of the synthetic 

1204 seismogram. 

1205 

1206 For any derived parameterized source model, this method is called to 

1207 decide if discretization and stacking of the source should be shared. 

1208 When two source models return an equal vector of values discretization 

1209 is shared. 

1210 ''' 

1211 return (self.depth, self.lat, self.north_shift, 

1212 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1213 self.effective_stf_pre().base_key() 

1214 

1215 def get_factor(self): 

1216 ''' 

1217 Get the scaling factor to be applied during post-processing. 

1218 

1219 Discretization of the base seismogram is usually done for a unit 

1220 amplitude, because a common factor can be efficiently multiplied to 

1221 final seismograms. This eliminates to do repeat the stacking when 

1222 creating seismograms for a series of source models only differing in 

1223 amplitude. 

1224 

1225 This method should return the scaling factor to apply in the 

1226 post-processing (often this is simply the scalar moment of the source). 

1227 ''' 

1228 

1229 return 1.0 

1230 

1231 def effective_stf_pre(self): 

1232 ''' 

1233 Return the STF applied before stacking of the Green's functions. 

1234 

1235 This STF is used during discretization of the parameterized source 

1236 models, i.e. to produce a temporal distribution of point sources. 

1237 

1238 Handling of the STF before stacking of the GFs is less efficient but 

1239 allows to use different source time functions for different parts of 

1240 the source. 

1241 ''' 

1242 

1243 if self.stf is not None and self.stf_mode == 'pre': 

1244 return self.stf 

1245 else: 

1246 return g_unit_pulse 

1247 

1248 def effective_stf_post(self): 

1249 ''' 

1250 Return the STF applied after stacking of the Green's fuctions. 

1251 

1252 This STF is used in the post-processing of the synthetic seismograms. 

1253 

1254 Handling of the STF after stacking of the GFs is usually more efficient 

1255 but is only possible when a common STF is used for all subsources. 

1256 ''' 

1257 

1258 if self.stf is not None and self.stf_mode == 'post': 

1259 return self.stf 

1260 else: 

1261 return g_unit_pulse 

1262 

1263 def _dparams_base(self): 

1264 return dict(times=arr(self.time), 

1265 lat=self.lat, lon=self.lon, 

1266 north_shifts=arr(self.north_shift), 

1267 east_shifts=arr(self.east_shift), 

1268 depths=arr(self.depth)) 

1269 

1270 def _hash(self): 

1271 sha = sha1() 

1272 for k in self.base_key(): 

1273 sha.update(str(k).encode()) 

1274 return sha.hexdigest() 

1275 

1276 def _dparams_base_repeated(self, times): 

1277 if times is None: 

1278 return self._dparams_base() 

1279 

1280 nt = times.size 

1281 north_shifts = num.repeat(self.north_shift, nt) 

1282 east_shifts = num.repeat(self.east_shift, nt) 

1283 depths = num.repeat(self.depth, nt) 

1284 return dict(times=times, 

1285 lat=self.lat, lon=self.lon, 

1286 north_shifts=north_shifts, 

1287 east_shifts=east_shifts, 

1288 depths=depths) 

1289 

1290 def pyrocko_event(self, store=None, target=None, **kwargs): 

1291 duration = None 

1292 if self.stf: 

1293 duration = self.stf.effective_duration 

1294 

1295 return model.Event( 

1296 lat=self.lat, 

1297 lon=self.lon, 

1298 north_shift=self.north_shift, 

1299 east_shift=self.east_shift, 

1300 time=self.time, 

1301 name=self.name, 

1302 depth=self.depth, 

1303 duration=duration, 

1304 **kwargs) 

1305 

1306 def outline(self, cs='xyz'): 

1307 points = num.atleast_2d(num.zeros([1, 3])) 

1308 

1309 points[:, 0] += self.north_shift 

1310 points[:, 1] += self.east_shift 

1311 points[:, 2] += self.depth 

1312 if cs == 'xyz': 

1313 return points 

1314 elif cs == 'xy': 

1315 return points[:, :2] 

1316 elif cs in ('latlon', 'lonlat'): 

1317 latlon = ne_to_latlon( 

1318 self.lat, self.lon, points[:, 0], points[:, 1]) 

1319 

1320 latlon = num.array(latlon).T 

1321 if cs == 'latlon': 

1322 return latlon 

1323 else: 

1324 return latlon[:, ::-1] 

1325 

1326 @classmethod 

1327 def from_pyrocko_event(cls, ev, **kwargs): 

1328 if ev.depth is None: 

1329 raise ConversionError( 

1330 'Cannot convert event object to source object: ' 

1331 'no depth information available') 

1332 

1333 stf = None 

1334 if ev.duration is not None: 

1335 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1336 

1337 d = dict( 

1338 name=ev.name, 

1339 time=ev.time, 

1340 lat=ev.lat, 

1341 lon=ev.lon, 

1342 north_shift=ev.north_shift, 

1343 east_shift=ev.east_shift, 

1344 depth=ev.depth, 

1345 stf=stf) 

1346 d.update(kwargs) 

1347 return cls(**d) 

1348 

1349 def get_magnitude(self): 

1350 raise NotImplementedError( 

1351 '%s does not implement get_magnitude()' 

1352 % self.__class__.__name__) 

1353 

1354 

1355class SourceWithMagnitude(Source): 

1356 ''' 

1357 Base class for sources containing a moment magnitude. 

1358 ''' 

1359 

1360 magnitude = Float.T( 

1361 default=6.0, 

1362 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1363 

1364 def __init__(self, **kwargs): 

1365 if 'moment' in kwargs: 

1366 mom = kwargs.pop('moment') 

1367 if 'magnitude' not in kwargs: 

1368 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1369 

1370 Source.__init__(self, **kwargs) 

1371 

1372 @property 

1373 def moment(self): 

1374 return float(pmt.magnitude_to_moment(self.magnitude)) 

1375 

1376 @moment.setter 

1377 def moment(self, value): 

1378 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1379 

1380 def pyrocko_event(self, store=None, target=None, **kwargs): 

1381 return Source.pyrocko_event( 

1382 self, store, target, 

1383 magnitude=self.magnitude, 

1384 **kwargs) 

1385 

1386 @classmethod 

1387 def from_pyrocko_event(cls, ev, **kwargs): 

1388 d = {} 

1389 if ev.magnitude: 

1390 d.update(magnitude=ev.magnitude) 

1391 

1392 d.update(kwargs) 

1393 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1394 

1395 def get_magnitude(self): 

1396 return self.magnitude 

1397 

1398 

1399class DerivedMagnitudeError(ValidationError): 

1400 pass 

1401 

1402 

1403class SourceWithDerivedMagnitude(Source): 

1404 

1405 class __T(Source.T): 

1406 

1407 def validate_extra(self, val): 

1408 Source.T.validate_extra(self, val) 

1409 val.check_conflicts() 

1410 

1411 def check_conflicts(self): 

1412 ''' 

1413 Check for parameter conflicts. 

1414 

1415 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1416 on conflicts. 

1417 ''' 

1418 pass 

1419 

1420 def get_magnitude(self, store=None, target=None): 

1421 raise DerivedMagnitudeError('No magnitude set.') 

1422 

1423 def get_moment(self, store=None, target=None): 

1424 return float(pmt.magnitude_to_moment( 

1425 self.get_magnitude(store, target))) 

1426 

1427 def pyrocko_moment_tensor(self, store=None, target=None): 

1428 raise NotImplementedError( 

1429 '%s does not implement pyrocko_moment_tensor()' 

1430 % self.__class__.__name__) 

1431 

1432 def pyrocko_event(self, store=None, target=None, **kwargs): 

1433 try: 

1434 mt = self.pyrocko_moment_tensor(store, target) 

1435 magnitude = self.get_magnitude() 

1436 except (DerivedMagnitudeError, NotImplementedError): 

1437 mt = None 

1438 magnitude = None 

1439 

1440 return Source.pyrocko_event( 

1441 self, store, target, 

1442 moment_tensor=mt, 

1443 magnitude=magnitude, 

1444 **kwargs) 

1445 

1446 

1447class ExplosionSource(SourceWithDerivedMagnitude): 

1448 ''' 

1449 An isotropic explosion point source. 

1450 ''' 

1451 

1452 magnitude = Float.T( 

1453 optional=True, 

1454 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1455 

1456 volume_change = Float.T( 

1457 optional=True, 

1458 help='volume change of the explosion/implosion or ' 

1459 'the contracting/extending magmatic source. [m^3]') 

1460 

1461 discretized_source_class = meta.DiscretizedExplosionSource 

1462 

1463 def __init__(self, **kwargs): 

1464 if 'moment' in kwargs: 

1465 mom = kwargs.pop('moment') 

1466 if 'magnitude' not in kwargs: 

1467 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1468 

1469 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1470 

1471 def base_key(self): 

1472 return SourceWithDerivedMagnitude.base_key(self) + \ 

1473 (self.volume_change,) 

1474 

1475 def check_conflicts(self): 

1476 if self.magnitude is not None and self.volume_change is not None: 

1477 raise DerivedMagnitudeError( 

1478 'Magnitude and volume_change are both defined.') 

1479 

1480 def get_magnitude(self, store=None, target=None): 

1481 self.check_conflicts() 

1482 

1483 if self.magnitude is not None: 

1484 return self.magnitude 

1485 

1486 elif self.volume_change is not None: 

1487 moment = self.volume_change * \ 

1488 self.get_moment_to_volume_change_ratio(store, target) 

1489 

1490 return float(pmt.moment_to_magnitude(abs(moment))) 

1491 else: 

1492 return float(pmt.moment_to_magnitude(1.0)) 

1493 

1494 def get_volume_change(self, store=None, target=None): 

1495 self.check_conflicts() 

1496 

1497 if self.volume_change is not None: 

1498 return self.volume_change 

1499 

1500 elif self.magnitude is not None: 

1501 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1502 return moment / self.get_moment_to_volume_change_ratio( 

1503 store, target) 

1504 

1505 else: 

1506 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1507 

1508 def get_moment_to_volume_change_ratio(self, store, target=None): 

1509 if store is None: 

1510 raise DerivedMagnitudeError( 

1511 'Need earth model to convert between volume change and ' 

1512 'magnitude.') 

1513 

1514 points = num.array( 

1515 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1516 

1517 interpolation = target.interpolation if target else 'multilinear' 

1518 try: 

1519 shear_moduli = store.config.get_shear_moduli( 

1520 self.lat, self.lon, 

1521 points=points, 

1522 interpolation=interpolation)[0] 

1523 

1524 bulk_moduli = store.config.get_bulk_moduli( 

1525 self.lat, self.lon, 

1526 points=points, 

1527 interpolation=interpolation)[0] 

1528 except meta.OutOfBounds: 

1529 raise DerivedMagnitudeError( 

1530 'Could not get shear modulus at source position.') 

1531 

1532 return float(2. * shear_moduli + bulk_moduli) 

1533 

1534 def get_factor(self): 

1535 return 1.0 

1536 

1537 def discretize_basesource(self, store, target=None): 

1538 times, amplitudes = self.effective_stf_pre().discretize_t( 

1539 store.config.deltat, self.time) 

1540 

1541 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1542 

1543 if self.volume_change is not None: 

1544 if self.volume_change < 0.: 

1545 amplitudes *= -1 

1546 

1547 return meta.DiscretizedExplosionSource( 

1548 m0s=amplitudes, 

1549 **self._dparams_base_repeated(times)) 

1550 

1551 def pyrocko_moment_tensor(self, store=None, target=None): 

1552 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1553 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1554 

1555 

1556class RectangularExplosionSource(ExplosionSource): 

1557 ''' 

1558 Rectangular or line explosion source. 

1559 ''' 

1560 

1561 discretized_source_class = meta.DiscretizedExplosionSource 

1562 

1563 strike = Float.T( 

1564 default=0.0, 

1565 help='strike direction in [deg], measured clockwise from north') 

1566 

1567 dip = Float.T( 

1568 default=90.0, 

1569 help='dip angle in [deg], measured downward from horizontal') 

1570 

1571 length = Float.T( 

1572 default=0., 

1573 help='length of rectangular source area [m]') 

1574 

1575 width = Float.T( 

1576 default=0., 

1577 help='width of rectangular source area [m]') 

1578 

1579 anchor = StringChoice.T( 

1580 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1581 'bottom_left', 'bottom_right'], 

1582 default='center', 

1583 optional=True, 

1584 help='Anchor point for positioning the plane, can be: top, center or' 

1585 'bottom and also top_left, top_right,bottom_left,' 

1586 'bottom_right, center_left and center right') 

1587 

1588 nucleation_x = Float.T( 

1589 optional=True, 

1590 help='horizontal position of rupture nucleation in normalized fault ' 

1591 'plane coordinates (-1 = left edge, +1 = right edge)') 

1592 

1593 nucleation_y = Float.T( 

1594 optional=True, 

1595 help='down-dip position of rupture nucleation in normalized fault ' 

1596 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1597 

1598 velocity = Float.T( 

1599 default=3500., 

1600 help='speed of explosion front [m/s]') 

1601 

1602 aggressive_oversampling = Bool.T( 

1603 default=False, 

1604 help='Aggressive oversampling for basesource discretization. ' 

1605 'When using \'multilinear\' interpolation oversampling has' 

1606 ' practically no effect.') 

1607 

1608 def base_key(self): 

1609 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1610 self.width, self.nucleation_x, 

1611 self.nucleation_y, self.velocity, 

1612 self.anchor) 

1613 

1614 def discretize_basesource(self, store, target=None): 

1615 

1616 if self.nucleation_x is not None: 

1617 nucx = self.nucleation_x * 0.5 * self.length 

1618 else: 

1619 nucx = None 

1620 

1621 if self.nucleation_y is not None: 

1622 nucy = self.nucleation_y * 0.5 * self.width 

1623 else: 

1624 nucy = None 

1625 

1626 stf = self.effective_stf_pre() 

1627 

1628 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1629 store.config.deltas, store.config.deltat, 

1630 self.time, self.north_shift, self.east_shift, self.depth, 

1631 self.strike, self.dip, self.length, self.width, self.anchor, 

1632 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1633 

1634 amplitudes /= num.sum(amplitudes) 

1635 amplitudes *= self.get_moment(store, target) 

1636 

1637 return meta.DiscretizedExplosionSource( 

1638 lat=self.lat, 

1639 lon=self.lon, 

1640 times=times, 

1641 north_shifts=points[:, 0], 

1642 east_shifts=points[:, 1], 

1643 depths=points[:, 2], 

1644 m0s=amplitudes) 

1645 

1646 def outline(self, cs='xyz'): 

1647 points = outline_rect_source(self.strike, self.dip, self.length, 

1648 self.width, self.anchor) 

1649 

1650 points[:, 0] += self.north_shift 

1651 points[:, 1] += self.east_shift 

1652 points[:, 2] += self.depth 

1653 if cs == 'xyz': 

1654 return points 

1655 elif cs == 'xy': 

1656 return points[:, :2] 

1657 elif cs in ('latlon', 'lonlat'): 

1658 latlon = ne_to_latlon( 

1659 self.lat, self.lon, points[:, 0], points[:, 1]) 

1660 

1661 latlon = num.array(latlon).T 

1662 if cs == 'latlon': 

1663 return latlon 

1664 else: 

1665 return latlon[:, ::-1] 

1666 

1667 def get_nucleation_abs_coord(self, cs='xy'): 

1668 

1669 if self.nucleation_x is None: 

1670 return None, None 

1671 

1672 coords = from_plane_coords(self.strike, self.dip, self.length, 

1673 self.width, self.depth, self.nucleation_x, 

1674 self.nucleation_y, lat=self.lat, 

1675 lon=self.lon, north_shift=self.north_shift, 

1676 east_shift=self.east_shift, cs=cs) 

1677 return coords 

1678 

1679 

1680class DCSource(SourceWithMagnitude): 

1681 ''' 

1682 A double-couple point source. 

1683 ''' 

1684 

1685 strike = Float.T( 

1686 default=0.0, 

1687 help='strike direction in [deg], measured clockwise from north') 

1688 

1689 dip = Float.T( 

1690 default=90.0, 

1691 help='dip angle in [deg], measured downward from horizontal') 

1692 

1693 rake = Float.T( 

1694 default=0.0, 

1695 help='rake angle in [deg], ' 

1696 'measured counter-clockwise from right-horizontal ' 

1697 'in on-plane view') 

1698 

1699 discretized_source_class = meta.DiscretizedMTSource 

1700 

1701 def base_key(self): 

1702 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1703 

1704 def get_factor(self): 

1705 return float(pmt.magnitude_to_moment(self.magnitude)) 

1706 

1707 def discretize_basesource(self, store, target=None): 

1708 mot = pmt.MomentTensor( 

1709 strike=self.strike, dip=self.dip, rake=self.rake) 

1710 

1711 times, amplitudes = self.effective_stf_pre().discretize_t( 

1712 store.config.deltat, self.time) 

1713 return meta.DiscretizedMTSource( 

1714 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1715 **self._dparams_base_repeated(times)) 

1716 

1717 def pyrocko_moment_tensor(self, store=None, target=None): 

1718 return pmt.MomentTensor( 

1719 strike=self.strike, 

1720 dip=self.dip, 

1721 rake=self.rake, 

1722 scalar_moment=self.moment) 

1723 

1724 def pyrocko_event(self, store=None, target=None, **kwargs): 

1725 return SourceWithMagnitude.pyrocko_event( 

1726 self, store, target, 

1727 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1728 **kwargs) 

1729 

1730 @classmethod 

1731 def from_pyrocko_event(cls, ev, **kwargs): 

1732 d = {} 

1733 mt = ev.moment_tensor 

1734 if mt: 

1735 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1736 d.update( 

1737 strike=float(strike), 

1738 dip=float(dip), 

1739 rake=float(rake), 

1740 magnitude=float(mt.moment_magnitude())) 

1741 

1742 d.update(kwargs) 

1743 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1744 

1745 

1746class CLVDSource(SourceWithMagnitude): 

1747 ''' 

1748 A pure CLVD point source. 

1749 ''' 

1750 

1751 discretized_source_class = meta.DiscretizedMTSource 

1752 

1753 azimuth = Float.T( 

1754 default=0.0, 

1755 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1756 

1757 dip = Float.T( 

1758 default=90., 

1759 help='dip direction of largest dipole, downward from horizontal [deg]') 

1760 

1761 def base_key(self): 

1762 return Source.base_key(self) + (self.azimuth, self.dip) 

1763 

1764 def get_factor(self): 

1765 return float(pmt.magnitude_to_moment(self.magnitude)) 

1766 

1767 @property 

1768 def m6(self): 

1769 a = math.sqrt(4. / 3.) * self.get_factor() 

1770 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1771 rotmat1 = pmt.euler_to_matrix( 

1772 d2r * (self.dip - 90.), 

1773 d2r * (self.azimuth - 90.), 

1774 0.) 

1775 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1776 return pmt.to6(m) 

1777 

1778 @property 

1779 def m6_astuple(self): 

1780 return tuple(self.m6.tolist()) 

1781 

1782 def discretize_basesource(self, store, target=None): 

1783 factor = self.get_factor() 

1784 times, amplitudes = self.effective_stf_pre().discretize_t( 

1785 store.config.deltat, self.time) 

1786 return meta.DiscretizedMTSource( 

1787 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1788 **self._dparams_base_repeated(times)) 

1789 

1790 def pyrocko_moment_tensor(self, store=None, target=None): 

1791 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1792 

1793 def pyrocko_event(self, store=None, target=None, **kwargs): 

1794 mt = self.pyrocko_moment_tensor(store, target) 

1795 return Source.pyrocko_event( 

1796 self, store, target, 

1797 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1798 magnitude=float(mt.moment_magnitude()), 

1799 **kwargs) 

1800 

1801 

1802class VLVDSource(SourceWithDerivedMagnitude): 

1803 ''' 

1804 Volumetric linear vector dipole source. 

1805 

1806 This source is a parameterization for a restricted moment tensor point 

1807 source, useful to represent dyke or sill like inflation or deflation 

1808 sources. The restriction is such that the moment tensor is rotational 

1809 symmetric. It can be represented by a superposition of a linear vector 

1810 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1811 restricted moment tensor has 4 degrees of freedom: 2 independent 

1812 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1813 

1814 In this parameterization, the isotropic component is controlled by 

1815 ``volume_change``. To define the moment tensor, it must be converted to the 

1816 scalar moment of the the MT's isotropic component. For the conversion, the 

1817 shear modulus at the source's position must be known. This value is 

1818 extracted from the earth model defined in the GF store in use. 

1819 

1820 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1821 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1822 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1823 ''' 

1824 

1825 discretized_source_class = meta.DiscretizedMTSource 

1826 

1827 azimuth = Float.T( 

1828 default=0.0, 

1829 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1830 

1831 dip = Float.T( 

1832 default=90., 

1833 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1834 

1835 volume_change = Float.T( 

1836 default=0., 

1837 help='volume change of the inflation/deflation [m^3].') 

1838 

1839 clvd_moment = Float.T( 

1840 default=0., 

1841 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1842 'controls the sign of the CLVD (the sign of its largest ' 

1843 'eigenvalue).') 

1844 

1845 def get_moment_to_volume_change_ratio(self, store, target): 

1846 if store is None or target is None: 

1847 raise DerivedMagnitudeError( 

1848 'Need earth model to convert between volume change and ' 

1849 'magnitude.') 

1850 

1851 points = num.array( 

1852 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1853 

1854 try: 

1855 shear_moduli = store.config.get_shear_moduli( 

1856 self.lat, self.lon, 

1857 points=points, 

1858 interpolation=target.interpolation)[0] 

1859 

1860 bulk_moduli = store.config.get_bulk_moduli( 

1861 self.lat, self.lon, 

1862 points=points, 

1863 interpolation=target.interpolation)[0] 

1864 except meta.OutOfBounds: 

1865 raise DerivedMagnitudeError( 

1866 'Could not get shear modulus at source position.') 

1867 

1868 return float(2. * shear_moduli + bulk_moduli) 

1869 

1870 def base_key(self): 

1871 return Source.base_key(self) + \ 

1872 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1873 

1874 def get_magnitude(self, store=None, target=None): 

1875 mt = self.pyrocko_moment_tensor(store, target) 

1876 return float(pmt.moment_to_magnitude(mt.moment)) 

1877 

1878 def get_m6(self, store, target): 

1879 a = math.sqrt(4. / 3.) * self.clvd_moment 

1880 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1881 

1882 rotmat1 = pmt.euler_to_matrix( 

1883 d2r * (self.dip - 90.), 

1884 d2r * (self.azimuth - 90.), 

1885 0.) 

1886 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1887 

1888 m_iso = self.volume_change * \ 

1889 self.get_moment_to_volume_change_ratio(store, target) 

1890 

1891 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1892 0., 0.,) * math.sqrt(2. / 3) 

1893 

1894 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1895 return m 

1896 

1897 def get_moment(self, store=None, target=None): 

1898 return float(pmt.magnitude_to_moment( 

1899 self.get_magnitude(store, target))) 

1900 

1901 def get_m6_astuple(self, store, target): 

1902 m6 = self.get_m6(store, target) 

1903 return tuple(m6.tolist()) 

1904 

1905 def discretize_basesource(self, store, target=None): 

1906 times, amplitudes = self.effective_stf_pre().discretize_t( 

1907 store.config.deltat, self.time) 

1908 

1909 m6 = self.get_m6(store, target) 

1910 m6 *= amplitudes / self.get_factor() 

1911 

1912 return meta.DiscretizedMTSource( 

1913 m6s=m6[num.newaxis, :], 

1914 **self._dparams_base_repeated(times)) 

1915 

1916 def pyrocko_moment_tensor(self, store=None, target=None): 

1917 m6_astuple = self.get_m6_astuple(store, target) 

1918 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1919 

1920 

1921class MTSource(Source): 

1922 ''' 

1923 A moment tensor point source. 

1924 ''' 

1925 

1926 discretized_source_class = meta.DiscretizedMTSource 

1927 

1928 mnn = Float.T( 

1929 default=1., 

1930 help='north-north component of moment tensor in [Nm]') 

1931 

1932 mee = Float.T( 

1933 default=1., 

1934 help='east-east component of moment tensor in [Nm]') 

1935 

1936 mdd = Float.T( 

1937 default=1., 

1938 help='down-down component of moment tensor in [Nm]') 

1939 

1940 mne = Float.T( 

1941 default=0., 

1942 help='north-east component of moment tensor in [Nm]') 

1943 

1944 mnd = Float.T( 

1945 default=0., 

1946 help='north-down component of moment tensor in [Nm]') 

1947 

1948 med = Float.T( 

1949 default=0., 

1950 help='east-down component of moment tensor in [Nm]') 

1951 

1952 def __init__(self, **kwargs): 

1953 if 'm6' in kwargs: 

1954 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1955 kwargs.pop('m6')): 

1956 kwargs[k] = float(v) 

1957 

1958 Source.__init__(self, **kwargs) 

1959 

1960 @property 

1961 def m6(self): 

1962 return num.array(self.m6_astuple) 

1963 

1964 @property 

1965 def m6_astuple(self): 

1966 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1967 

1968 @m6.setter 

1969 def m6(self, value): 

1970 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1971 

1972 def base_key(self): 

1973 return Source.base_key(self) + self.m6_astuple 

1974 

1975 def discretize_basesource(self, store, target=None): 

1976 times, amplitudes = self.effective_stf_pre().discretize_t( 

1977 store.config.deltat, self.time) 

1978 return meta.DiscretizedMTSource( 

1979 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1980 **self._dparams_base_repeated(times)) 

1981 

1982 def get_magnitude(self, store=None, target=None): 

1983 m6 = self.m6 

1984 return pmt.moment_to_magnitude( 

1985 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1986 math.sqrt(2.)) 

1987 

1988 def pyrocko_moment_tensor(self, store=None, target=None): 

1989 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1990 

1991 def pyrocko_event(self, store=None, target=None, **kwargs): 

1992 mt = self.pyrocko_moment_tensor(store, target) 

1993 return Source.pyrocko_event( 

1994 self, store, target, 

1995 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1996 magnitude=float(mt.moment_magnitude()), 

1997 **kwargs) 

1998 

1999 @classmethod 

2000 def from_pyrocko_event(cls, ev, **kwargs): 

2001 d = {} 

2002 mt = ev.moment_tensor 

2003 if mt: 

2004 d.update(m6=tuple(map(float, mt.m6()))) 

2005 else: 

2006 if ev.magnitude is not None: 

2007 mom = pmt.magnitude_to_moment(ev.magnitude) 

2008 v = math.sqrt(2. / 3.) * mom 

2009 d.update(m6=(v, v, v, 0., 0., 0.)) 

2010 

2011 d.update(kwargs) 

2012 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2013 

2014 

2015map_anchor = { 

2016 'center': (0.0, 0.0), 

2017 'center_left': (-1.0, 0.0), 

2018 'center_right': (1.0, 0.0), 

2019 'top': (0.0, -1.0), 

2020 'top_left': (-1.0, -1.0), 

2021 'top_right': (1.0, -1.0), 

2022 'bottom': (0.0, 1.0), 

2023 'bottom_left': (-1.0, 1.0), 

2024 'bottom_right': (1.0, 1.0)} 

2025 

2026 

2027class RectangularSource(SourceWithDerivedMagnitude): 

2028 ''' 

2029 Classical Haskell source model modified for bilateral rupture. 

2030 ''' 

2031 

2032 discretized_source_class = meta.DiscretizedMTSource 

2033 

2034 magnitude = Float.T( 

2035 optional=True, 

2036 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2037 

2038 strike = Float.T( 

2039 default=0.0, 

2040 help='strike direction in [deg], measured clockwise from north') 

2041 

2042 dip = Float.T( 

2043 default=90.0, 

2044 help='dip angle in [deg], measured downward from horizontal') 

2045 

2046 rake = Float.T( 

2047 default=0.0, 

2048 help='rake angle in [deg], ' 

2049 'measured counter-clockwise from right-horizontal ' 

2050 'in on-plane view') 

2051 

2052 length = Float.T( 

2053 default=0., 

2054 help='length of rectangular source area [m]') 

2055 

2056 width = Float.T( 

2057 default=0., 

2058 help='width of rectangular source area [m]') 

2059 

2060 anchor = StringChoice.T( 

2061 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2062 'bottom_left', 'bottom_right'], 

2063 default='center', 

2064 optional=True, 

2065 help='Anchor point for positioning the plane, can be: ``top, center ' 

2066 'bottom, top_left, top_right,bottom_left,' 

2067 'bottom_right, center_left, center right``.') 

2068 

2069 nucleation_x = Float.T( 

2070 optional=True, 

2071 help='horizontal position of rupture nucleation in normalized fault ' 

2072 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2073 

2074 nucleation_y = Float.T( 

2075 optional=True, 

2076 help='down-dip position of rupture nucleation in normalized fault ' 

2077 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2078 

2079 velocity = Float.T( 

2080 default=3500., 

2081 help='speed of rupture front [m/s]') 

2082 

2083 slip = Float.T( 

2084 optional=True, 

2085 help='Slip on the rectangular source area [m]') 

2086 

2087 opening_fraction = Float.T( 

2088 default=0., 

2089 help='Determines fraction of slip related to opening. ' 

2090 '(``-1``: pure tensile closing, ' 

2091 '``0``: pure shear, ' 

2092 '``1``: pure tensile opening)') 

2093 

2094 decimation_factor = Int.T( 

2095 optional=True, 

2096 default=1, 

2097 help='Sub-source decimation factor, a larger decimation will' 

2098 ' make the result inaccurate but shorten the necessary' 

2099 ' computation time (use for testing puposes only).') 

2100 

2101 aggressive_oversampling = Bool.T( 

2102 default=False, 

2103 help='Aggressive oversampling for basesource discretization. ' 

2104 'When using \'multilinear\' interpolation oversampling has' 

2105 ' practically no effect.') 

2106 

2107 def __init__(self, **kwargs): 

2108 if 'moment' in kwargs: 

2109 mom = kwargs.pop('moment') 

2110 if 'magnitude' not in kwargs: 

2111 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2112 

2113 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2114 

2115 def base_key(self): 

2116 return SourceWithDerivedMagnitude.base_key(self) + ( 

2117 self.magnitude, 

2118 self.slip, 

2119 self.strike, 

2120 self.dip, 

2121 self.rake, 

2122 self.length, 

2123 self.width, 

2124 self.nucleation_x, 

2125 self.nucleation_y, 

2126 self.velocity, 

2127 self.decimation_factor, 

2128 self.anchor) 

2129 

2130 def check_conflicts(self): 

2131 if self.magnitude is not None and self.slip is not None: 

2132 raise DerivedMagnitudeError( 

2133 'Magnitude and slip are both defined.') 

2134 

2135 def get_magnitude(self, store=None, target=None): 

2136 self.check_conflicts() 

2137 if self.magnitude is not None: 

2138 return self.magnitude 

2139 

2140 elif self.slip is not None: 

2141 if None in (store, target): 

2142 raise DerivedMagnitudeError( 

2143 'Magnitude for a rectangular source with slip defined ' 

2144 'can only be derived when earth model and target ' 

2145 'interpolation method are available.') 

2146 

2147 amplitudes = self._discretize(store, target)[2] 

2148 if amplitudes.ndim == 2: 

2149 # CLVD component has no net moment, leave out 

2150 return float(pmt.moment_to_magnitude( 

2151 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2152 else: 

2153 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2154 

2155 else: 

2156 return float(pmt.moment_to_magnitude(1.0)) 

2157 

2158 def get_factor(self): 

2159 return 1.0 

2160 

2161 def get_slip_tensile(self): 

2162 return self.slip * self.opening_fraction 

2163 

2164 def get_slip_shear(self): 

2165 return self.slip - abs(self.get_slip_tensile) 

2166 

2167 def _discretize(self, store, target): 

2168 if self.nucleation_x is not None: 

2169 nucx = self.nucleation_x * 0.5 * self.length 

2170 else: 

2171 nucx = None 

2172 

2173 if self.nucleation_y is not None: 

2174 nucy = self.nucleation_y * 0.5 * self.width 

2175 else: 

2176 nucy = None 

2177 

2178 stf = self.effective_stf_pre() 

2179 

2180 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2181 store.config.deltas, store.config.deltat, 

2182 self.time, self.north_shift, self.east_shift, self.depth, 

2183 self.strike, self.dip, self.length, self.width, self.anchor, 

2184 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2185 decimation_factor=self.decimation_factor, 

2186 aggressive_oversampling=self.aggressive_oversampling) 

2187 

2188 if self.slip is not None: 

2189 if target is not None: 

2190 interpolation = target.interpolation 

2191 else: 

2192 interpolation = 'nearest_neighbor' 

2193 logger.warning( 

2194 'no target information available, will use ' 

2195 '"nearest_neighbor" interpolation when extracting shear ' 

2196 'modulus from earth model') 

2197 

2198 shear_moduli = store.config.get_shear_moduli( 

2199 self.lat, self.lon, 

2200 points=points, 

2201 interpolation=interpolation) 

2202 

2203 tensile_slip = self.get_slip_tensile() 

2204 shear_slip = self.slip - abs(tensile_slip) 

2205 

2206 amplitudes_total = [shear_moduli * shear_slip] 

2207 if tensile_slip != 0: 

2208 bulk_moduli = store.config.get_bulk_moduli( 

2209 self.lat, self.lon, 

2210 points=points, 

2211 interpolation=interpolation) 

2212 

2213 tensile_iso = bulk_moduli * tensile_slip 

2214 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2215 

2216 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2217 

2218 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2219 amplitudes * dl * dw 

2220 

2221 else: 

2222 # normalization to retain total moment 

2223 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2224 moment = self.get_moment(store, target) 

2225 

2226 amplitudes_total = [ 

2227 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2228 if self.opening_fraction != 0.: 

2229 amplitudes_total.append( 

2230 amplitudes_norm * self.opening_fraction * moment) 

2231 

2232 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2233 

2234 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2235 

2236 def discretize_basesource(self, store, target=None): 

2237 

2238 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2239 store, target) 

2240 

2241 mot = pmt.MomentTensor( 

2242 strike=self.strike, dip=self.dip, rake=self.rake) 

2243 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2244 

2245 if amplitudes.ndim == 1: 

2246 m6s[:, :] *= amplitudes[:, num.newaxis] 

2247 elif amplitudes.ndim == 2: 

2248 # shear MT components 

2249 rotmat1 = pmt.euler_to_matrix( 

2250 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2251 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2252 

2253 if amplitudes.shape[0] == 2: 

2254 # tensile MT components - moment/magnitude input 

2255 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2256 rot_tensile = pmt.to6( 

2257 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2258 

2259 m6s_tensile = rot_tensile[ 

2260 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2261 m6s += m6s_tensile 

2262 

2263 elif amplitudes.shape[0] == 3: 

2264 # tensile MT components - slip input 

2265 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2266 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2267 

2268 rot_iso = pmt.to6( 

2269 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2270 rot_clvd = pmt.to6( 

2271 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2272 

2273 m6s_iso = rot_iso[ 

2274 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2275 m6s_clvd = rot_clvd[ 

2276 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2277 m6s += m6s_iso + m6s_clvd 

2278 else: 

2279 raise ValueError('Unknwown amplitudes shape!') 

2280 else: 

2281 raise ValueError( 

2282 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2283 

2284 ds = meta.DiscretizedMTSource( 

2285 lat=self.lat, 

2286 lon=self.lon, 

2287 times=times, 

2288 north_shifts=points[:, 0], 

2289 east_shifts=points[:, 1], 

2290 depths=points[:, 2], 

2291 m6s=m6s, 

2292 dl=dl, 

2293 dw=dw, 

2294 nl=nl, 

2295 nw=nw) 

2296 

2297 return ds 

2298 

2299 def outline(self, cs='xyz'): 

2300 points = outline_rect_source(self.strike, self.dip, self.length, 

2301 self.width, self.anchor) 

2302 

2303 points[:, 0] += self.north_shift 

2304 points[:, 1] += self.east_shift 

2305 points[:, 2] += self.depth 

2306 if cs == 'xyz': 

2307 return points 

2308 elif cs == 'xy': 

2309 return points[:, :2] 

2310 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2311 latlon = ne_to_latlon( 

2312 self.lat, self.lon, points[:, 0], points[:, 1]) 

2313 

2314 latlon = num.array(latlon).T 

2315 if cs == 'latlon': 

2316 return latlon 

2317 elif cs == 'lonlat': 

2318 return latlon[:, ::-1] 

2319 else: 

2320 return num.concatenate( 

2321 (latlon, points[:, 2].reshape((len(points), 1))), 

2322 axis=1) 

2323 

2324 def points_on_source(self, cs='xyz', **kwargs): 

2325 

2326 points = points_on_rect_source( 

2327 self.strike, self.dip, self.length, self.width, 

2328 self.anchor, **kwargs) 

2329 

2330 points[:, 0] += self.north_shift 

2331 points[:, 1] += self.east_shift 

2332 points[:, 2] += self.depth 

2333 if cs == 'xyz': 

2334 return points 

2335 elif cs == 'xy': 

2336 return points[:, :2] 

2337 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2338 latlon = ne_to_latlon( 

2339 self.lat, self.lon, points[:, 0], points[:, 1]) 

2340 

2341 latlon = num.array(latlon).T 

2342 if cs == 'latlon': 

2343 return latlon 

2344 elif cs == 'lonlat': 

2345 return latlon[:, ::-1] 

2346 else: 

2347 return num.concatenate( 

2348 (latlon, points[:, 2].reshape((len(points), 1))), 

2349 axis=1) 

2350 

2351 def get_nucleation_abs_coord(self, cs='xy'): 

2352 

2353 if self.nucleation_x is None: 

2354 return None, None 

2355 

2356 coords = from_plane_coords(self.strike, self.dip, self.length, 

2357 self.width, self.depth, self.nucleation_x, 

2358 self.nucleation_y, lat=self.lat, 

2359 lon=self.lon, north_shift=self.north_shift, 

2360 east_shift=self.east_shift, cs=cs) 

2361 return coords 

2362 

2363 def pyrocko_moment_tensor(self, store=None, target=None): 

2364 return pmt.MomentTensor( 

2365 strike=self.strike, 

2366 dip=self.dip, 

2367 rake=self.rake, 

2368 scalar_moment=self.get_moment(store, target)) 

2369 

2370 def pyrocko_event(self, store=None, target=None, **kwargs): 

2371 return SourceWithDerivedMagnitude.pyrocko_event( 

2372 self, store, target, 

2373 **kwargs) 

2374 

2375 @classmethod 

2376 def from_pyrocko_event(cls, ev, **kwargs): 

2377 d = {} 

2378 mt = ev.moment_tensor 

2379 if mt: 

2380 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2381 d.update( 

2382 strike=float(strike), 

2383 dip=float(dip), 

2384 rake=float(rake), 

2385 magnitude=float(mt.moment_magnitude())) 

2386 

2387 d.update(kwargs) 

2388 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2389 

2390 

2391class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2392 ''' 

2393 Combined Eikonal and Okada quasi-dynamic rupture model. 

2394 

2395 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2396 Note: attribute `stf` is not used so far, but kept for future applications. 

2397 ''' 

2398 

2399 discretized_source_class = meta.DiscretizedMTSource 

2400 

2401 strike = Float.T( 

2402 default=0.0, 

2403 help='Strike direction in [deg], measured clockwise from north.') 

2404 

2405 dip = Float.T( 

2406 default=0.0, 

2407 help='Dip angle in [deg], measured downward from horizontal.') 

2408 

2409 length = Float.T( 

2410 default=10. * km, 

2411 help='Length of rectangular source area in [m].') 

2412 

2413 width = Float.T( 

2414 default=5. * km, 

2415 help='Width of rectangular source area in [m].') 

2416 

2417 anchor = StringChoice.T( 

2418 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2419 'bottom_left', 'bottom_right'], 

2420 default='center', 

2421 optional=True, 

2422 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2423 'bottom, top_left, top_right, bottom_left, ' 

2424 'bottom_right, center_left, center_right``.') 

2425 

2426 nucleation_x__ = Array.T( 

2427 default=num.array([0.]), 

2428 dtype=num.float64, 

2429 serialize_as='list', 

2430 help='Horizontal position of rupture nucleation in normalized fault ' 

2431 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2432 

2433 nucleation_y__ = Array.T( 

2434 default=num.array([0.]), 

2435 dtype=num.float64, 

2436 serialize_as='list', 

2437 help='Down-dip position of rupture nucleation in normalized fault ' 

2438 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2439 

2440 nucleation_time__ = Array.T( 

2441 optional=True, 

2442 help='Time in [s] after origin, when nucleation points defined by ' 

2443 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2444 dtype=num.float64, 

2445 serialize_as='list') 

2446 

2447 gamma = Float.T( 

2448 default=0.8, 

2449 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2450 r':math:`v_r = \gamma * v_s`.') 

2451 

2452 nx = Int.T( 

2453 default=2, 

2454 help='Number of discrete source patches in x direction (along ' 

2455 'strike).') 

2456 

2457 ny = Int.T( 

2458 default=2, 

2459 help='Number of discrete source patches in y direction (down dip).') 

2460 

2461 slip = Float.T( 

2462 optional=True, 

2463 help='Maximum slip of the rectangular source [m]. ' 

2464 'Setting the slip the tractions/stress field ' 

2465 'will be normalized to accomodate the desired maximum slip.') 

2466 

2467 rake = Float.T( 

2468 optional=True, 

2469 help='Rake angle in [deg], ' 

2470 'measured counter-clockwise from right-horizontal ' 

2471 'in on-plane view. Rake is translated into homogenous tractions ' 

2472 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2473 'with tractions parameter.') 

2474 

2475 patches = List.T( 

2476 OkadaSource.T(), 

2477 optional=True, 

2478 help='List of all boundary elements/sub faults/fault patches.') 

2479 

2480 patch_mask__ = Array.T( 

2481 dtype=bool, 

2482 serialize_as='list', 

2483 shape=(None,), 

2484 optional=True, 

2485 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2486 'leaves the patch in the calculation, False excludes the patch.') 

2487 

2488 tractions = TractionField.T( 

2489 optional=True, 

2490 help='Traction field the rupture plane is exposed to. See the' 

2491 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2492 'If ``tractions=None`` and ``rake`` is given' 

2493 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2494 ' be used.') 

2495 

2496 coef_mat = Array.T( 

2497 optional=True, 

2498 help='Coefficient matrix linking traction and dislocation field.', 

2499 dtype=num.float64, 

2500 shape=(None, None)) 

2501 

2502 eikonal_decimation = Int.T( 

2503 optional=True, 

2504 default=1, 

2505 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2506 ' increase the accuracy of rupture front calculation but' 

2507 ' increases also the computation time.') 

2508 

2509 decimation_factor = Int.T( 

2510 optional=True, 

2511 default=1, 

2512 help='Sub-source decimation factor, a larger decimation will' 

2513 ' make the result inaccurate but shorten the necessary' 

2514 ' computation time (use for testing puposes only).') 

2515 

2516 nthreads = Int.T( 

2517 optional=True, 

2518 default=1, 

2519 help='Number of threads for Okada forward modelling, ' 

2520 'matrix inversion and calculation of point subsources. ' 

2521 'Note: for small/medium matrices 1 thread is most efficient.') 

2522 

2523 pure_shear = Bool.T( 

2524 optional=True, 

2525 default=False, 

2526 help='Calculate only shear tractions and omit tensile tractions.') 

2527 

2528 smooth_rupture = Bool.T( 

2529 default=True, 

2530 help='Smooth the tractions by weighting partially ruptured' 

2531 ' fault patches.') 

2532 

2533 aggressive_oversampling = Bool.T( 

2534 default=False, 

2535 help='Aggressive oversampling for basesource discretization. ' 

2536 'When using \'multilinear\' interpolation oversampling has' 

2537 ' practically no effect.') 

2538 

2539 def __init__(self, **kwargs): 

2540 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2541 self._interpolators = {} 

2542 self.check_conflicts() 

2543 

2544 @property 

2545 def nucleation_x(self): 

2546 return self.nucleation_x__ 

2547 

2548 @nucleation_x.setter 

2549 def nucleation_x(self, nucleation_x): 

2550 if isinstance(nucleation_x, list): 

2551 nucleation_x = num.array(nucleation_x) 

2552 

2553 elif not isinstance( 

2554 nucleation_x, num.ndarray) and nucleation_x is not None: 

2555 

2556 nucleation_x = num.array([nucleation_x]) 

2557 self.nucleation_x__ = nucleation_x 

2558 

2559 @property 

2560 def nucleation_y(self): 

2561 return self.nucleation_y__ 

2562 

2563 @nucleation_y.setter 

2564 def nucleation_y(self, nucleation_y): 

2565 if isinstance(nucleation_y, list): 

2566 nucleation_y = num.array(nucleation_y) 

2567 

2568 elif not isinstance(nucleation_y, num.ndarray) \ 

2569 and nucleation_y is not None: 

2570 nucleation_y = num.array([nucleation_y]) 

2571 

2572 self.nucleation_y__ = nucleation_y 

2573 

2574 @property 

2575 def nucleation(self): 

2576 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2577 

2578 if (nucl_x is None) or (nucl_y is None): 

2579 return None 

2580 

2581 assert nucl_x.shape[0] == nucl_y.shape[0] 

2582 

2583 return num.concatenate( 

2584 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2585 

2586 @nucleation.setter 

2587 def nucleation(self, nucleation): 

2588 if isinstance(nucleation, list): 

2589 nucleation = num.array(nucleation) 

2590 

2591 assert nucleation.shape[1] == 2 

2592 

2593 self.nucleation_x = nucleation[:, 0] 

2594 self.nucleation_y = nucleation[:, 1] 

2595 

2596 @property 

2597 def nucleation_time(self): 

2598 return self.nucleation_time__ 

2599 

2600 @nucleation_time.setter 

2601 def nucleation_time(self, nucleation_time): 

2602 if not isinstance(nucleation_time, num.ndarray) \ 

2603 and nucleation_time is not None: 

2604 nucleation_time = num.array([nucleation_time]) 

2605 

2606 self.nucleation_time__ = nucleation_time 

2607 

2608 @property 

2609 def patch_mask(self): 

2610 if (self.patch_mask__ is not None and 

2611 self.patch_mask__.shape == (self.nx * self.ny,)): 

2612 

2613 return self.patch_mask__ 

2614 else: 

2615 return num.ones(self.nx * self.ny, dtype=bool) 

2616 

2617 @patch_mask.setter 

2618 def patch_mask(self, patch_mask): 

2619 if isinstance(patch_mask, list): 

2620 patch_mask = num.array(patch_mask) 

2621 

2622 self.patch_mask__ = patch_mask 

2623 

2624 def get_tractions(self): 

2625 ''' 

2626 Get source traction vectors. 

2627 

2628 If :py:attr:`rake` is given, unit length directed traction vectors 

2629 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2630 else the given :py:attr:`tractions` are used. 

2631 

2632 :returns: 

2633 Traction vectors per patch. 

2634 :rtype: 

2635 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2636 ''' 

2637 

2638 if self.rake is not None: 

2639 if num.isnan(self.rake): 

2640 raise ValueError('Rake must be a real number, not NaN.') 

2641 

2642 logger.warning( 

2643 'Tractions are derived based on the given source rake.') 

2644 tractions = DirectedTractions(rake=self.rake) 

2645 else: 

2646 tractions = self.tractions 

2647 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2648 

2649 def base_key(self): 

2650 return SourceWithDerivedMagnitude.base_key(self) + ( 

2651 self.slip, 

2652 self.strike, 

2653 self.dip, 

2654 self.rake, 

2655 self.length, 

2656 self.width, 

2657 float(self.nucleation_x.mean()), 

2658 float(self.nucleation_y.mean()), 

2659 self.decimation_factor, 

2660 self.anchor, 

2661 self.pure_shear, 

2662 self.gamma, 

2663 tuple(self.patch_mask)) 

2664 

2665 def check_conflicts(self): 

2666 if self.tractions and self.rake: 

2667 raise AttributeError( 

2668 'Tractions and rake are mutually exclusive.') 

2669 if self.tractions is None and self.rake is None: 

2670 self.rake = 0. 

2671 

2672 def get_magnitude(self, store=None, target=None): 

2673 self.check_conflicts() 

2674 if self.slip is not None or self.tractions is not None: 

2675 if store is None: 

2676 raise DerivedMagnitudeError( 

2677 'Magnitude for a rectangular source with slip or ' 

2678 'tractions defined can only be derived when earth model ' 

2679 'is set.') 

2680 

2681 moment_rate, calc_times = self.discretize_basesource( 

2682 store, target=target).get_moment_rate(store.config.deltat) 

2683 

2684 deltat = num.concatenate(( 

2685 (num.diff(calc_times)[0],), 

2686 num.diff(calc_times))) 

2687 

2688 return float(pmt.moment_to_magnitude( 

2689 num.sum(moment_rate * deltat))) 

2690 

2691 else: 

2692 return float(pmt.moment_to_magnitude(1.0)) 

2693 

2694 def get_factor(self): 

2695 return 1.0 

2696 

2697 def outline(self, cs='xyz'): 

2698 ''' 

2699 Get source outline corner coordinates. 

2700 

2701 :param cs: 

2702 :ref:`Output coordinate system <coordinate-system-names>`. 

2703 :type cs: 

2704 optional, str 

2705 

2706 :returns: 

2707 Corner points in desired coordinate system. 

2708 :rtype: 

2709 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2710 ''' 

2711 points = outline_rect_source(self.strike, self.dip, self.length, 

2712 self.width, self.anchor) 

2713 

2714 points[:, 0] += self.north_shift 

2715 points[:, 1] += self.east_shift 

2716 points[:, 2] += self.depth 

2717 if cs == 'xyz': 

2718 return points 

2719 elif cs == 'xy': 

2720 return points[:, :2] 

2721 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2722 latlon = ne_to_latlon( 

2723 self.lat, self.lon, points[:, 0], points[:, 1]) 

2724 

2725 latlon = num.array(latlon).T 

2726 if cs == 'latlon': 

2727 return latlon 

2728 elif cs == 'lonlat': 

2729 return latlon[:, ::-1] 

2730 else: 

2731 return num.concatenate( 

2732 (latlon, points[:, 2].reshape((len(points), 1))), 

2733 axis=1) 

2734 

2735 def points_on_source(self, cs='xyz', **kwargs): 

2736 ''' 

2737 Convert relative plane coordinates to geographical coordinates. 

2738 

2739 Given x and y coordinates (relative source coordinates between -1. 

2740 and 1.) are converted to desired geographical coordinates. Coordinates 

2741 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2742 and ``points_y``. 

2743 

2744 :param cs: 

2745 :ref:`Output coordinate system <coordinate-system-names>`. 

2746 :type cs: 

2747 optional, str 

2748 

2749 :returns: 

2750 Point coordinates in desired coordinate system. 

2751 :rtype: 

2752 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2753 ''' 

2754 points = points_on_rect_source( 

2755 self.strike, self.dip, self.length, self.width, 

2756 self.anchor, **kwargs) 

2757 

2758 points[:, 0] += self.north_shift 

2759 points[:, 1] += self.east_shift 

2760 points[:, 2] += self.depth 

2761 if cs == 'xyz': 

2762 return points 

2763 elif cs == 'xy': 

2764 return points[:, :2] 

2765 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2766 latlon = ne_to_latlon( 

2767 self.lat, self.lon, points[:, 0], points[:, 1]) 

2768 

2769 latlon = num.array(latlon).T 

2770 if cs == 'latlon': 

2771 return latlon 

2772 elif cs == 'lonlat': 

2773 return latlon[:, ::-1] 

2774 else: 

2775 return num.concatenate( 

2776 (latlon, points[:, 2].reshape((len(points), 1))), 

2777 axis=1) 

2778 

2779 def pyrocko_moment_tensor(self, store=None, target=None): 

2780 if store is not None: 

2781 if not self.patches: 

2782 self.discretize_patches(store) 

2783 

2784 data = self.get_slip() 

2785 else: 

2786 data = self.get_tractions() 

2787 

2788 weights = num.linalg.norm(data, axis=1) 

2789 weights /= weights.sum() 

2790 

2791 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2792 rake = num.average(rakes, weights=weights) 

2793 

2794 return pmt.MomentTensor( 

2795 strike=self.strike, 

2796 dip=self.dip, 

2797 rake=rake, 

2798 scalar_moment=self.get_moment(store, target)) 

2799 

2800 def pyrocko_event(self, store=None, target=None, **kwargs): 

2801 return SourceWithDerivedMagnitude.pyrocko_event( 

2802 self, store, target, 

2803 **kwargs) 

2804 

2805 @classmethod 

2806 def from_pyrocko_event(cls, ev, **kwargs): 

2807 d = {} 

2808 mt = ev.moment_tensor 

2809 if mt: 

2810 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2811 d.update( 

2812 strike=float(strike), 

2813 dip=float(dip), 

2814 rake=float(rake)) 

2815 

2816 d.update(kwargs) 

2817 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2818 

2819 def _discretize_points(self, store, *args, **kwargs): 

2820 ''' 

2821 Discretize source plane with equal vertical and horizontal spacing. 

2822 

2823 Additional ``*args`` and ``**kwargs`` are passed to 

2824 :py:meth:`points_on_source`. 

2825 

2826 :param store: 

2827 Green's function database (needs to cover whole region of the 

2828 source). 

2829 :type store: 

2830 :py:class:`~pyrocko.gf.store.Store` 

2831 

2832 :returns: 

2833 Number of points in strike and dip direction, distance 

2834 between adjacent points, coordinates (latlondepth) and coordinates 

2835 (xy on fault) for discrete points. 

2836 :rtype: 

2837 (int, int, float, :py:class:`~numpy.ndarray`, 

2838 :py:class:`~numpy.ndarray`). 

2839 ''' 

2840 anch_x, anch_y = map_anchor[self.anchor] 

2841 

2842 npoints = int(self.width // km) + 1 

2843 points = num.zeros((npoints, 3)) 

2844 points[:, 1] = num.linspace(-1., 1., npoints) 

2845 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2846 

2847 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2848 points = num.dot(rotmat.T, points.T).T 

2849 points[:, 2] += self.depth 

2850 

2851 vs_min = store.config.get_vs( 

2852 self.lat, self.lon, points, 

2853 interpolation='nearest_neighbor') 

2854 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2855 

2856 oversampling = 10. 

2857 delta_l = self.length / (self.nx * oversampling) 

2858 delta_w = self.width / (self.ny * oversampling) 

2859 

2860 delta = self.eikonal_decimation * num.min([ 

2861 store.config.deltat * vr_min / oversampling, 

2862 delta_l, delta_w] + [ 

2863 deltas for deltas in store.config.deltas]) 

2864 

2865 delta = delta_w / num.ceil(delta_w / delta) 

2866 

2867 nx = int(num.ceil(self.length / delta)) + 1 

2868 ny = int(num.ceil(self.width / delta)) + 1 

2869 

2870 rem_l = (nx-1)*delta - self.length 

2871 lim_x = rem_l / self.length 

2872 

2873 points_xy = num.zeros((nx * ny, 2)) 

2874 points_xy[:, 0] = num.repeat( 

2875 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2876 points_xy[:, 1] = num.tile( 

2877 num.linspace(-1., 1., ny), nx) 

2878 

2879 points = self.points_on_source( 

2880 points_x=points_xy[:, 0], 

2881 points_y=points_xy[:, 1], 

2882 **kwargs) 

2883 

2884 return nx, ny, delta, points, points_xy 

2885 

2886 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2887 points=None): 

2888 ''' 

2889 Get rupture velocity for discrete points on source plane. 

2890 

2891 :param store: 

2892 Green's function database (needs to cover the whole region of the 

2893 source) 

2894 :type store: 

2895 optional, :py:class:`~pyrocko.gf.store.Store` 

2896 

2897 :param interpolation: 

2898 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2899 and ``'multilinear'``). 

2900 :type interpolation: 

2901 optional, str 

2902 

2903 :param points: 

2904 Coordinates on fault (-1.:1.) of discrete points. 

2905 :type points: 

2906 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2907 

2908 :returns: 

2909 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2910 points. 

2911 :rtype: 

2912 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2913 ''' 

2914 

2915 if points is None: 

2916 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

2917 

2918 return store.config.get_vs( 

2919 self.lat, self.lon, 

2920 points=points, 

2921 interpolation=interpolation) * self.gamma 

2922 

2923 def discretize_time( 

2924 self, store, interpolation='nearest_neighbor', 

2925 vr=None, times=None, *args, **kwargs): 

2926 ''' 

2927 Get rupture start time for discrete points on source plane. 

2928 

2929 :param store: 

2930 Green's function database (needs to cover whole region of the 

2931 source) 

2932 :type store: 

2933 :py:class:`~pyrocko.gf.store.Store` 

2934 

2935 :param interpolation: 

2936 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2937 and ``'multilinear'``). 

2938 :type interpolation: 

2939 optional, str 

2940 

2941 :param vr: 

2942 Array, containing rupture user defined rupture velocity values. 

2943 :type vr: 

2944 optional, :py:class:`~numpy.ndarray` 

2945 

2946 :param times: 

2947 Array, containing zeros, where rupture is starting, real positive 

2948 numbers at later secondary nucleation points and -1, where time 

2949 will be calculated. If not given, rupture starts at nucleation_x, 

2950 nucleation_y. Times are given for discrete points with equal 

2951 horizontal and vertical spacing. 

2952 :type times: 

2953 optional, :py:class:`~numpy.ndarray` 

2954 

2955 :returns: 

2956 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

2957 rupture propagation time of discrete points. 

2958 :rtype: 

2959 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

2960 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

2961 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

2962 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

2963 ''' 

2964 nx, ny, delta, points, points_xy = self._discretize_points( 

2965 store, cs='xyz') 

2966 

2967 if vr is None or vr.shape != tuple((nx, ny)): 

2968 if vr: 

2969 logger.warning( 

2970 'Given rupture velocities are not in right shape: ' 

2971 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

2972 vr = self._discretize_rupture_v(store, interpolation, points)\ 

2973 .reshape(nx, ny) 

2974 

2975 if vr.shape != tuple((nx, ny)): 

2976 logger.warning( 

2977 'Given rupture velocities are not in right shape. Therefore' 

2978 ' standard rupture velocity array is used.') 

2979 

2980 def initialize_times(): 

2981 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2982 

2983 if nucl_x.shape != nucl_y.shape: 

2984 raise ValueError( 

2985 'Nucleation coordinates have different shape.') 

2986 

2987 dist_points = num.array([ 

2988 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

2989 for x, y in zip(nucl_x, nucl_y)]) 

2990 nucl_indices = num.argmin(dist_points, axis=1) 

2991 

2992 if self.nucleation_time is None: 

2993 nucl_times = num.zeros_like(nucl_indices) 

2994 else: 

2995 if self.nucleation_time.shape == nucl_x.shape: 

2996 nucl_times = self.nucleation_time 

2997 else: 

2998 raise ValueError( 

2999 'Nucleation coordinates and times have different ' 

3000 'shapes') 

3001 

3002 t = num.full(nx * ny, -1.) 

3003 t[nucl_indices] = nucl_times 

3004 return t.reshape(nx, ny) 

3005 

3006 if times is None: 

3007 times = initialize_times() 

3008 elif times.shape != tuple((nx, ny)): 

3009 times = initialize_times() 

3010 logger.warning( 

3011 'Given times are not in right shape. Therefore standard time' 

3012 ' array is used.') 

3013 

3014 eikonal_ext.eikonal_solver_fmm_cartesian( 

3015 speeds=vr, times=times, delta=delta) 

3016 

3017 return points, points_xy, vr, times 

3018 

3019 def get_vr_time_interpolators( 

3020 self, store, interpolation='nearest_neighbor', force=False, 

3021 **kwargs): 

3022 ''' 

3023 Get interpolators for rupture velocity and rupture time. 

3024 

3025 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3026 

3027 :param store: 

3028 Green's function database (needs to cover whole region of the 

3029 source). 

3030 :type store: 

3031 :py:class:`~pyrocko.gf.store.Store` 

3032 

3033 :param interpolation: 

3034 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3035 and ``'multilinear'``). 

3036 :type interpolation: 

3037 optional, str 

3038 

3039 :param force: 

3040 Force recalculation of the interpolators (e.g. after change of 

3041 nucleation point locations/times). Default is ``False``. 

3042 :type force: 

3043 optional, bool 

3044 ''' 

3045 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3046 if interpolation not in interp_map: 

3047 raise TypeError( 

3048 'Interpolation method %s not available' % interpolation) 

3049 

3050 if not self._interpolators.get(interpolation, False) or force: 

3051 _, points_xy, vr, times = self.discretize_time( 

3052 store, **kwargs) 

3053 

3054 if self.length <= 0.: 

3055 raise ValueError( 

3056 'length must be larger then 0. not %g' % self.length) 

3057 

3058 if self.width <= 0.: 

3059 raise ValueError( 

3060 'width must be larger then 0. not %g' % self.width) 

3061 

3062 nx, ny = times.shape 

3063 anch_x, anch_y = map_anchor[self.anchor] 

3064 

3065 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3066 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3067 

3068 ascont = num.ascontiguousarray 

3069 

3070 self._interpolators[interpolation] = ( 

3071 nx, ny, times, vr, 

3072 RegularGridInterpolator( 

3073 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3074 times, 

3075 method=interp_map[interpolation]), 

3076 RegularGridInterpolator( 

3077 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3078 vr, 

3079 method=interp_map[interpolation])) 

3080 

3081 return self._interpolators[interpolation] 

3082 

3083 def discretize_patches( 

3084 self, store, interpolation='nearest_neighbor', force=False, 

3085 grid_shape=(), 

3086 **kwargs): 

3087 ''' 

3088 Get rupture start time and OkadaSource elements for points on rupture. 

3089 

3090 All source elements and their corresponding center points are 

3091 calculated and stored in the :py:attr:`patches` attribute. 

3092 

3093 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3094 

3095 :param store: 

3096 Green's function database (needs to cover whole region of the 

3097 source). 

3098 :type store: 

3099 :py:class:`~pyrocko.gf.store.Store` 

3100 

3101 :param interpolation: 

3102 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3103 and ``'multilinear'``). 

3104 :type interpolation: 

3105 optional, str 

3106 

3107 :param force: 

3108 Force recalculation of the vr and time interpolators ( e.g. after 

3109 change of nucleation point locations/times). Default is ``False``. 

3110 :type force: 

3111 optional, bool 

3112 

3113 :param grid_shape: 

3114 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3115 or grid_shape should be set. 

3116 :type grid_shape: 

3117 optional, tuple of int 

3118 ''' 

3119 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3120 self.get_vr_time_interpolators( 

3121 store, 

3122 interpolation=interpolation, force=force, **kwargs) 

3123 anch_x, anch_y = map_anchor[self.anchor] 

3124 

3125 al = self.length / 2. 

3126 aw = self.width / 2. 

3127 al1 = -(al + anch_x * al) 

3128 al2 = al - anch_x * al 

3129 aw1 = -aw + anch_y * aw 

3130 aw2 = aw + anch_y * aw 

3131 assert num.abs([al1, al2]).sum() == self.length 

3132 assert num.abs([aw1, aw2]).sum() == self.width 

3133 

3134 def get_lame(*a, **kw): 

3135 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3136 lamb = store.config.get_vp(*a, **kw)**2 \ 

3137 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3138 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3139 

3140 shear_mod, poisson = get_lame( 

3141 self.lat, self.lon, 

3142 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3143 interpolation=interpolation) 

3144 

3145 okada_src = OkadaSource( 

3146 lat=self.lat, lon=self.lon, 

3147 strike=self.strike, dip=self.dip, 

3148 north_shift=self.north_shift, east_shift=self.east_shift, 

3149 depth=self.depth, 

3150 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3151 poisson=poisson.mean(), 

3152 shearmod=shear_mod.mean(), 

3153 opening=kwargs.get('opening', 0.)) 

3154 

3155 if not (self.nx and self.ny): 

3156 if grid_shape: 

3157 self.nx, self.ny = grid_shape 

3158 else: 

3159 self.nx = nx 

3160 self.ny = ny 

3161 

3162 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3163 

3164 shear_mod, poisson = get_lame( 

3165 self.lat, self.lon, 

3166 num.array([src.source_patch()[:3] for src in source_disc]), 

3167 interpolation=interpolation) 

3168 

3169 if (self.nx, self.ny) != (nx, ny): 

3170 times_interp = time_interpolator( 

3171 num.ascontiguousarray(source_points[:, :2])) 

3172 vr_interp = vr_interpolator( 

3173 num.ascontiguousarray(source_points[:, :2])) 

3174 else: 

3175 times_interp = times.T.ravel() 

3176 vr_interp = vr.T.ravel() 

3177 

3178 for isrc, src in enumerate(source_disc): 

3179 src.vr = vr_interp[isrc] 

3180 src.time = times_interp[isrc] + self.time 

3181 

3182 self.patches = source_disc 

3183 

3184 def discretize_basesource(self, store, target=None): 

3185 ''' 

3186 Prepare source for synthetic waveform calculation. 

3187 

3188 :param store: 

3189 Green's function database (needs to cover whole region of the 

3190 source). 

3191 :type store: 

3192 :py:class:`~pyrocko.gf.store.Store` 

3193 

3194 :param target: 

3195 Target information. 

3196 :type target: 

3197 optional, :py:class:`~pyrocko.gf.targets.Target` 

3198 

3199 :returns: 

3200 Source discretized by a set of moment tensors and times. 

3201 :rtype: 

3202 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3203 ''' 

3204 if not target: 

3205 interpolation = 'nearest_neighbor' 

3206 else: 

3207 interpolation = target.interpolation 

3208 

3209 if not self.patches: 

3210 self.discretize_patches(store, interpolation) 

3211 

3212 if self.coef_mat is None: 

3213 self.calc_coef_mat() 

3214 

3215 delta_slip, slip_times = self.get_delta_slip(store) 

3216 npatches = self.nx * self.ny 

3217 ntimes = slip_times.size 

3218 

3219 anch_x, anch_y = map_anchor[self.anchor] 

3220 

3221 pln = self.length / self.nx 

3222 pwd = self.width / self.ny 

3223 

3224 patch_coords = num.array([ 

3225 (p.ix, p.iy) 

3226 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3227 

3228 # boundary condition is zero-slip 

3229 # is not valid to avoid unwished interpolation effects 

3230 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3231 slip_grid[1:-1, 1:-1, :, :] = \ 

3232 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3233 

3234 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3235 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3236 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3237 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3238 

3239 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3240 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3241 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3242 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3243 

3244 def make_grid(patch_parameter): 

3245 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3246 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3247 

3248 grid[0, 0] = grid[1, 1] 

3249 grid[0, -1] = grid[1, -2] 

3250 grid[-1, 0] = grid[-2, 1] 

3251 grid[-1, -1] = grid[-2, -2] 

3252 

3253 grid[1:-1, 0] = grid[1:-1, 1] 

3254 grid[1:-1, -1] = grid[1:-1, -2] 

3255 grid[0, 1:-1] = grid[1, 1:-1] 

3256 grid[-1, 1:-1] = grid[-2, 1:-1] 

3257 

3258 return grid 

3259 

3260 lamb = self.get_patch_attribute('lamb') 

3261 mu = self.get_patch_attribute('shearmod') 

3262 

3263 lamb_grid = make_grid(lamb) 

3264 mu_grid = make_grid(mu) 

3265 

3266 coords_x = num.zeros(self.nx + 2) 

3267 coords_x[1:-1] = patch_coords[:, 0, 0] 

3268 coords_x[0] = coords_x[1] - pln / 2 

3269 coords_x[-1] = coords_x[-2] + pln / 2 

3270 

3271 coords_y = num.zeros(self.ny + 2) 

3272 coords_y[1:-1] = patch_coords[0, :, 1] 

3273 coords_y[0] = coords_y[1] - pwd / 2 

3274 coords_y[-1] = coords_y[-2] + pwd / 2 

3275 

3276 slip_interp = RegularGridInterpolator( 

3277 (coords_x, coords_y, slip_times), 

3278 slip_grid, method='nearest') 

3279 

3280 lamb_interp = RegularGridInterpolator( 

3281 (coords_x, coords_y), 

3282 lamb_grid, method='nearest') 

3283 

3284 mu_interp = RegularGridInterpolator( 

3285 (coords_x, coords_y), 

3286 mu_grid, method='nearest') 

3287 

3288 # discretize basesources 

3289 mindeltagf = min(tuple( 

3290 (self.length / self.nx, self.width / self.ny) + 

3291 tuple(store.config.deltas))) 

3292 

3293 nl = int((1. / self.decimation_factor) * 

3294 num.ceil(pln / mindeltagf)) + 1 

3295 nw = int((1. / self.decimation_factor) * 

3296 num.ceil(pwd / mindeltagf)) + 1 

3297 nsrc_patch = int(nl * nw) 

3298 dl = pln / nl 

3299 dw = pwd / nw 

3300 

3301 patch_area = dl * dw 

3302 

3303 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3304 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3305 

3306 base_coords = num.zeros((nsrc_patch, 3)) 

3307 base_coords[:, 0] = num.tile(xl, nw) 

3308 base_coords[:, 1] = num.repeat(xw, nl) 

3309 base_coords = num.tile(base_coords, (npatches, 1)) 

3310 

3311 center_coords = num.zeros((npatches, 3)) 

3312 center_coords[:, 0] = num.repeat( 

3313 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3314 center_coords[:, 1] = num.tile( 

3315 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3316 

3317 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3318 nbaselocs = base_coords.shape[0] 

3319 

3320 base_interp = base_coords.repeat(ntimes, axis=0) 

3321 

3322 base_times = num.tile(slip_times, nbaselocs) 

3323 base_interp[:, 0] -= anch_x * self.length / 2 

3324 base_interp[:, 1] -= anch_y * self.width / 2 

3325 base_interp[:, 2] = base_times 

3326 

3327 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3328 store, interpolation=interpolation) 

3329 

3330 time_eikonal_max = time_interpolator.values.max() 

3331 

3332 nbasesrcs = base_interp.shape[0] 

3333 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3334 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3335 mu = mu_interp(base_interp[:, :2]).ravel() 

3336 

3337 if False: 

3338 try: 

3339 import matplotlib.pyplot as plt 

3340 coords = base_coords.copy() 

3341 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3342 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3343 plt.show() 

3344 except AttributeError: 

3345 pass 

3346 

3347 base_interp[:, 2] = 0. 

3348 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3349 base_interp = num.dot(rotmat.T, base_interp.T).T 

3350 base_interp[:, 0] += self.north_shift 

3351 base_interp[:, 1] += self.east_shift 

3352 base_interp[:, 2] += self.depth 

3353 

3354 slip_strike = delta_slip[:, :, 0].ravel() 

3355 slip_dip = delta_slip[:, :, 1].ravel() 

3356 slip_norm = delta_slip[:, :, 2].ravel() 

3357 

3358 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3359 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3360 

3361 m6s = okada_ext.patch2m6( 

3362 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3363 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3364 rakes=slip_rake, 

3365 disl_shear=slip_shear, 

3366 disl_norm=slip_norm, 

3367 lamb=lamb, 

3368 mu=mu, 

3369 nthreads=self.nthreads) 

3370 

3371 m6s *= patch_area 

3372 

3373 dl = -self.patches[0].al1 + self.patches[0].al2 

3374 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3375 

3376 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3377 

3378 ds = meta.DiscretizedMTSource( 

3379 lat=self.lat, 

3380 lon=self.lon, 

3381 times=base_times + self.time, 

3382 north_shifts=base_interp[:, 0], 

3383 east_shifts=base_interp[:, 1], 

3384 depths=base_interp[:, 2], 

3385 m6s=m6s, 

3386 dl=dl, 

3387 dw=dw, 

3388 nl=self.nx, 

3389 nw=self.ny) 

3390 

3391 return ds 

3392 

3393 def calc_coef_mat(self): 

3394 ''' 

3395 Calculate coefficients connecting tractions and dislocations. 

3396 ''' 

3397 if not self.patches: 

3398 raise ValueError( 

3399 'Patches are needed. Please calculate them first.') 

3400 

3401 self.coef_mat = make_okada_coefficient_matrix( 

3402 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3403 

3404 def get_patch_attribute(self, attr): 

3405 ''' 

3406 Get patch attributes. 

3407 

3408 :param attr: 

3409 Name of selected attribute (see 

3410 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3411 :type attr: 

3412 str 

3413 

3414 :returns: 

3415 Array with attribute value for each fault patch. 

3416 :rtype: 

3417 :py:class:`~numpy.ndarray` 

3418 

3419 ''' 

3420 if not self.patches: 

3421 raise ValueError( 

3422 'Patches are needed. Please calculate them first.') 

3423 return num.array([getattr(p, attr) for p in self.patches]) 

3424 

3425 def get_slip( 

3426 self, 

3427 time=None, 

3428 scale_slip=True, 

3429 interpolation='nearest_neighbor', 

3430 **kwargs): 

3431 ''' 

3432 Get slip per subfault patch for given time after rupture start. 

3433 

3434 :param time: 

3435 Time after origin [s], for which slip is computed. If not 

3436 given, final static slip is returned. 

3437 :type time: 

3438 optional, float > 0. 

3439 

3440 :param scale_slip: 

3441 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3442 to fit the given maximum slip. 

3443 :type scale_slip: 

3444 optional, bool 

3445 

3446 :param interpolation: 

3447 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3448 and ``'multilinear'``). 

3449 :type interpolation: 

3450 optional, str 

3451 

3452 :returns: 

3453 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3454 for each source patch. 

3455 :rtype: 

3456 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3457 ''' 

3458 

3459 if self.patches is None: 

3460 raise ValueError( 

3461 'Please discretize the source first (discretize_patches())') 

3462 npatches = len(self.patches) 

3463 tractions = self.get_tractions() 

3464 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3465 

3466 time_patch = time 

3467 if time is None: 

3468 time_patch = time_patch_max 

3469 

3470 if self.coef_mat is None: 

3471 self.calc_coef_mat() 

3472 

3473 if tractions.shape != (npatches, 3): 

3474 raise AttributeError( 

3475 'The traction vector is of invalid shape.' 

3476 ' Required shape is (npatches, 3)') 

3477 

3478 patch_mask = num.ones(npatches, dtype=bool) 

3479 if self.patch_mask is not None: 

3480 patch_mask = self.patch_mask 

3481 

3482 times = self.get_patch_attribute('time') - self.time 

3483 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3484 relevant_sources = num.nonzero(times <= time_patch)[0] 

3485 disloc_est = num.zeros_like(tractions) 

3486 

3487 if self.smooth_rupture: 

3488 patch_activation = num.zeros(npatches) 

3489 

3490 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3491 self.get_vr_time_interpolators( 

3492 store, interpolation=interpolation) 

3493 

3494 # Getting the native Eikonal grid, bit hackish 

3495 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3496 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3497 times_eikonal = time_interpolator.values 

3498 

3499 time_max = time 

3500 if time is None: 

3501 time_max = times_eikonal.max() 

3502 

3503 for ip, p in enumerate(self.patches): 

3504 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3505 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3506 

3507 idx_length = num.logical_and( 

3508 points_x >= ul[0], points_x <= lr[0]) 

3509 idx_width = num.logical_and( 

3510 points_y >= ul[1], points_y <= lr[1]) 

3511 

3512 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3513 if times_patch.size == 0: 

3514 raise AttributeError('could not use smooth_rupture') 

3515 

3516 patch_activation[ip] = \ 

3517 (times_patch <= time_max).sum() / times_patch.size 

3518 

3519 if time_patch == 0 and time_patch != time_patch_max: 

3520 patch_activation[ip] = 0. 

3521 

3522 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3523 

3524 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3525 

3526 if relevant_sources.size == 0: 

3527 return disloc_est 

3528 

3529 indices_disl = num.repeat(relevant_sources * 3, 3) 

3530 indices_disl[1::3] += 1 

3531 indices_disl[2::3] += 2 

3532 

3533 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3534 stress_field=tractions[relevant_sources, :].ravel(), 

3535 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3536 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3537 epsilon=None, 

3538 **kwargs) 

3539 

3540 if self.smooth_rupture: 

3541 disloc_est *= patch_activation[:, num.newaxis] 

3542 

3543 if scale_slip and self.slip is not None: 

3544 disloc_tmax = num.zeros(npatches) 

3545 

3546 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3547 indices_disl[1::3] += 1 

3548 indices_disl[2::3] += 2 

3549 

3550 disloc_tmax[patch_mask] = num.linalg.norm( 

3551 invert_fault_dislocations_bem( 

3552 stress_field=tractions[patch_mask, :].ravel(), 

3553 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3554 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3555 epsilon=None, 

3556 **kwargs), axis=1) 

3557 

3558 disloc_tmax_max = disloc_tmax.max() 

3559 if disloc_tmax_max == 0.: 

3560 logger.warning( 

3561 'slip scaling not performed. Maximum slip is 0.') 

3562 

3563 disloc_est *= self.slip / disloc_tmax_max 

3564 

3565 return disloc_est 

3566 

3567 def get_delta_slip( 

3568 self, 

3569 store=None, 

3570 deltat=None, 

3571 delta=True, 

3572 interpolation='nearest_neighbor', 

3573 **kwargs): 

3574 ''' 

3575 Get slip change snapshots. 

3576 

3577 The time interval, within which the slip changes are computed is 

3578 determined by the sampling rate of the Green's function database or 

3579 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3580 

3581 :param store: 

3582 Green's function database (needs to cover whole region of of the 

3583 source). Its sampling interval is used as time increment for slip 

3584 difference calculation. Either ``deltat`` or ``store`` should be 

3585 given. 

3586 :type store: 

3587 optional, :py:class:`~pyrocko.gf.store.Store` 

3588 

3589 :param deltat: 

3590 Time interval for slip difference calculation [s]. Either 

3591 ``deltat`` or ``store`` should be given. 

3592 :type deltat: 

3593 optional, float 

3594 

3595 :param delta: 

3596 If ``True``, slip differences between two time steps are given. If 

3597 ``False``, cumulative slip for all time steps. 

3598 :type delta: 

3599 optional, bool 

3600 

3601 :param interpolation: 

3602 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3603 and ``'multilinear'``). 

3604 :type interpolation: 

3605 optional, str 

3606 

3607 :returns: 

3608 Displacement changes(:math:`\\Delta u_{strike}, 

3609 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3610 time; corner times, for which delta slip is computed. The order of 

3611 displacement changes array is: 

3612 

3613 .. math:: 

3614 

3615 &[[\\\\ 

3616 &[\\Delta u_{strike, patch1, t1}, 

3617 \\Delta u_{dip, patch1, t1}, 

3618 \\Delta u_{tensile, patch1, t1}],\\\\ 

3619 &[\\Delta u_{strike, patch1, t2}, 

3620 \\Delta u_{dip, patch1, t2}, 

3621 \\Delta u_{tensile, patch1, t2}]\\\\ 

3622 &], [\\\\ 

3623 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3624 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3625 

3626 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3627 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3628 ''' 

3629 if store and deltat: 

3630 raise AttributeError( 

3631 'Argument collision. ' 

3632 'Please define only the store or the deltat argument.') 

3633 

3634 if store: 

3635 deltat = store.config.deltat 

3636 

3637 if not deltat: 

3638 raise AttributeError('Please give a GF store or set deltat.') 

3639 

3640 npatches = len(self.patches) 

3641 

3642 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3643 store, interpolation=interpolation) 

3644 tmax = time_interpolator.values.max() 

3645 

3646 calc_times = num.arange(0., tmax + deltat, deltat) 

3647 calc_times[calc_times > tmax] = tmax 

3648 

3649 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3650 

3651 for itime, t in enumerate(calc_times): 

3652 disloc_est[:, itime, :] = self.get_slip( 

3653 time=t, scale_slip=False, **kwargs) 

3654 

3655 if self.slip: 

3656 disloc_tmax = num.linalg.norm( 

3657 self.get_slip(scale_slip=False, time=tmax), 

3658 axis=1) 

3659 

3660 disloc_tmax_max = disloc_tmax.max() 

3661 if disloc_tmax_max == 0.: 

3662 logger.warning( 

3663 'Slip scaling not performed. Maximum slip is 0.') 

3664 else: 

3665 disloc_est *= self.slip / disloc_tmax_max 

3666 

3667 if not delta: 

3668 return disloc_est, calc_times 

3669 

3670 # if we have only one timestep there is no gradient 

3671 if calc_times.size > 1: 

3672 disloc_init = disloc_est[:, 0, :] 

3673 disloc_est = num.diff(disloc_est, axis=1) 

3674 disloc_est = num.concatenate(( 

3675 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3676 

3677 calc_times = calc_times 

3678 

3679 return disloc_est, calc_times 

3680 

3681 def get_slip_rate(self, *args, **kwargs): 

3682 ''' 

3683 Get slip rate inverted from patches. 

3684 

3685 The time interval, within which the slip rates are computed is 

3686 determined by the sampling rate of the Green's function database or 

3687 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3688 :py:meth:`get_delta_slip`. 

3689 

3690 :returns: 

3691 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3692 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3693 for each source patch and time; corner times, for which slip rate 

3694 is computed. The order of sliprate array is: 

3695 

3696 .. math:: 

3697 

3698 &[[\\\\ 

3699 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3700 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3701 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3702 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3703 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3704 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3705 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3706 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3707 

3708 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3709 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3710 ''' 

3711 ddisloc_est, calc_times = self.get_delta_slip( 

3712 *args, delta=True, **kwargs) 

3713 

3714 dt = num.concatenate( 

3715 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3716 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3717 

3718 return slip_rate, calc_times 

3719 

3720 def get_moment_rate_patches(self, *args, **kwargs): 

3721 ''' 

3722 Get scalar seismic moment rate for each patch individually. 

3723 

3724 Additional ``*args`` and ``**kwargs`` are passed to 

3725 :py:meth:`get_slip_rate`. 

3726 

3727 :returns: 

3728 Seismic moment rate for each source patch and time; corner times, 

3729 for which patch moment rate is computed based on slip rate. The 

3730 order of the moment rate array is: 

3731 

3732 .. math:: 

3733 

3734 &[\\\\ 

3735 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3736 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3737 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3738 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3739 &[...]]\\\\ 

3740 

3741 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3742 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3743 ''' 

3744 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3745 

3746 shear_mod = self.get_patch_attribute('shearmod') 

3747 p_length = self.get_patch_attribute('length') 

3748 p_width = self.get_patch_attribute('width') 

3749 

3750 dA = p_length * p_width 

3751 

3752 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3753 

3754 return mom_rate, calc_times 

3755 

3756 def get_moment_rate(self, store, target=None, deltat=None): 

3757 ''' 

3758 Get seismic source moment rate for the total source (STF). 

3759 

3760 :param store: 

3761 Green's function database (needs to cover whole region of of the 

3762 source). Its ``deltat`` [s] is used as time increment for slip 

3763 difference calculation. Either ``deltat`` or ``store`` should be 

3764 given. 

3765 :type store: 

3766 :py:class:`~pyrocko.gf.store.Store` 

3767 

3768 :param target: 

3769 Target information, needed for interpolation method. 

3770 :type target: 

3771 optional, :py:class:`~pyrocko.gf.targets.Target` 

3772 

3773 :param deltat: 

3774 Time increment for slip difference calculation [s]. If not given 

3775 ``store.deltat`` is used. 

3776 :type deltat: 

3777 optional, float 

3778 

3779 :return: 

3780 Seismic moment rate [Nm/s] for each time; corner times, for which 

3781 moment rate is computed. The order of the moment rate array is: 

3782 

3783 .. math:: 

3784 

3785 &[\\\\ 

3786 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3787 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3788 &...]\\\\ 

3789 

3790 :rtype: 

3791 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3792 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3793 ''' 

3794 if not deltat: 

3795 deltat = store.config.deltat 

3796 return self.discretize_basesource( 

3797 store, target=target).get_moment_rate(deltat) 

3798 

3799 def get_moment(self, *args, **kwargs): 

3800 ''' 

3801 Get seismic cumulative moment. 

3802 

3803 Additional ``*args`` and ``**kwargs`` are passed to 

3804 :py:meth:`get_magnitude`. 

3805 

3806 :returns: 

3807 Cumulative seismic moment in [Nm]. 

3808 :rtype: 

3809 float 

3810 ''' 

3811 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3812 *args, **kwargs))) 

3813 

3814 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3815 ''' 

3816 Rescale source slip based on given target magnitude or seismic moment. 

3817 

3818 Rescale the maximum source slip to fit the source moment magnitude or 

3819 seismic moment to the given target values. Either ``magnitude`` or 

3820 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3821 :py:meth:`get_moment`. 

3822 

3823 :param magnitude: 

3824 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3825 [Hanks and Kanamori, 1979] 

3826 :type magnitude: 

3827 optional, float 

3828 

3829 :param moment: 

3830 Target seismic moment :math:`M_0` [Nm]. 

3831 :type moment: 

3832 optional, float 

3833 ''' 

3834 if self.slip is None: 

3835 self.slip = 1. 

3836 logger.warning('No slip found for rescaling. ' 

3837 'An initial slip of 1 m is assumed.') 

3838 

3839 if magnitude is None and moment is None: 

3840 raise ValueError( 

3841 'Either target magnitude or moment need to be given.') 

3842 

3843 moment_init = self.get_moment(**kwargs) 

3844 

3845 if magnitude is not None: 

3846 moment = pmt.magnitude_to_moment(magnitude) 

3847 

3848 self.slip *= moment / moment_init 

3849 

3850 def get_centroid(self, store, *args, **kwargs): 

3851 ''' 

3852 Centroid of the pseudo dynamic rupture model. 

3853 

3854 The centroid location and time are derived from the locations and times 

3855 of the individual patches weighted with their moment contribution. 

3856 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3857 

3858 :param store: 

3859 Green's function database (needs to cover whole region of of the 

3860 source). Its ``deltat`` [s] is used as time increment for slip 

3861 difference calculation. Either ``deltat`` or ``store`` should be 

3862 given. 

3863 :type store: 

3864 :py:class:`~pyrocko.gf.store.Store` 

3865 

3866 :returns: 

3867 The centroid location and associated moment tensor. 

3868 :rtype: 

3869 :py:class:`pyrocko.model.Event` 

3870 ''' 

3871 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3872 t_max = time.values.max() 

3873 

3874 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3875 

3876 moment = num.sum(moment_rate * times, axis=1) 

3877 weights = moment / moment.sum() 

3878 

3879 norths = self.get_patch_attribute('north_shift') 

3880 easts = self.get_patch_attribute('east_shift') 

3881 depths = self.get_patch_attribute('depth') 

3882 

3883 centroid_n = num.sum(weights * norths) 

3884 centroid_e = num.sum(weights * easts) 

3885 centroid_d = num.sum(weights * depths) 

3886 

3887 centroid_lat, centroid_lon = ne_to_latlon( 

3888 self.lat, self.lon, centroid_n, centroid_e) 

3889 

3890 moment_rate_, times = self.get_moment_rate(store) 

3891 delta_times = num.concatenate(( 

3892 [times[1] - times[0]], 

3893 num.diff(times))) 

3894 moment_src = delta_times * moment_rate_ 

3895 

3896 centroid_t = num.sum( 

3897 moment_src / num.sum(moment_src) * times) + self.time 

3898 

3899 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3900 

3901 return model.Event( 

3902 name=self.name, 

3903 lat=centroid_lat, 

3904 lon=centroid_lon, 

3905 depth=centroid_d, 

3906 time=centroid_t, 

3907 moment_tensor=mt, 

3908 magnitude=mt.magnitude, 

3909 duration=t_max) 

3910 

3911 

3912class DoubleDCSource(SourceWithMagnitude): 

3913 ''' 

3914 Two double-couple point sources separated in space and time. 

3915 Moment share between the sub-sources is controlled by the 

3916 parameter mix. 

3917 The position of the subsources is dependent on the moment 

3918 distribution between the two sources. Depth, east and north 

3919 shift are given for the centroid between the two double-couples. 

3920 The subsources will positioned according to their moment shares 

3921 around this centroid position. 

3922 This is done according to their delta parameters, which are 

3923 therefore in relation to that centroid. 

3924 Note that depth of the subsources therefore can be 

3925 depth+/-delta_depth. For shallow earthquakes therefore 

3926 the depth has to be chosen deeper to avoid sampling 

3927 above surface. 

3928 ''' 

3929 

3930 strike1 = Float.T( 

3931 default=0.0, 

3932 help='strike direction in [deg], measured clockwise from north') 

3933 

3934 dip1 = Float.T( 

3935 default=90.0, 

3936 help='dip angle in [deg], measured downward from horizontal') 

3937 

3938 azimuth = Float.T( 

3939 default=0.0, 

3940 help='azimuth to second double-couple [deg], ' 

3941 'measured at first, clockwise from north') 

3942 

3943 rake1 = Float.T( 

3944 default=0.0, 

3945 help='rake angle in [deg], ' 

3946 'measured counter-clockwise from right-horizontal ' 

3947 'in on-plane view') 

3948 

3949 strike2 = Float.T( 

3950 default=0.0, 

3951 help='strike direction in [deg], measured clockwise from north') 

3952 

3953 dip2 = Float.T( 

3954 default=90.0, 

3955 help='dip angle in [deg], measured downward from horizontal') 

3956 

3957 rake2 = Float.T( 

3958 default=0.0, 

3959 help='rake angle in [deg], ' 

3960 'measured counter-clockwise from right-horizontal ' 

3961 'in on-plane view') 

3962 

3963 delta_time = Float.T( 

3964 default=0.0, 

3965 help='separation of double-couples in time (t2-t1) [s]') 

3966 

3967 delta_depth = Float.T( 

3968 default=0.0, 

3969 help='difference in depth (z2-z1) [m]') 

3970 

3971 distance = Float.T( 

3972 default=0.0, 

3973 help='distance between the two double-couples [m]') 

3974 

3975 mix = Float.T( 

3976 default=0.5, 

3977 help='how to distribute the moment to the two doublecouples ' 

3978 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

3979 

3980 stf1 = STF.T( 

3981 optional=True, 

3982 help='Source time function of subsource 1 ' 

3983 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3984 

3985 stf2 = STF.T( 

3986 optional=True, 

3987 help='Source time function of subsource 2 ' 

3988 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

3989 

3990 discretized_source_class = meta.DiscretizedMTSource 

3991 

3992 def base_key(self): 

3993 return ( 

3994 self.time, self.depth, self.lat, self.north_shift, 

3995 self.lon, self.east_shift, type(self).__name__) + \ 

3996 self.effective_stf1_pre().base_key() + \ 

3997 self.effective_stf2_pre().base_key() + ( 

3998 self.strike1, self.dip1, self.rake1, 

3999 self.strike2, self.dip2, self.rake2, 

4000 self.delta_time, self.delta_depth, 

4001 self.azimuth, self.distance, self.mix) 

4002 

4003 def get_factor(self): 

4004 return self.moment 

4005 

4006 def effective_stf1_pre(self): 

4007 return self.stf1 or self.stf or g_unit_pulse 

4008 

4009 def effective_stf2_pre(self): 

4010 return self.stf2 or self.stf or g_unit_pulse 

4011 

4012 def effective_stf_post(self): 

4013 return g_unit_pulse 

4014 

4015 def split(self): 

4016 a1 = 1.0 - self.mix 

4017 a2 = self.mix 

4018 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4019 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4020 

4021 dc1 = DCSource( 

4022 lat=self.lat, 

4023 lon=self.lon, 

4024 time=self.time - self.delta_time * a2, 

4025 north_shift=self.north_shift - delta_north * a2, 

4026 east_shift=self.east_shift - delta_east * a2, 

4027 depth=self.depth - self.delta_depth * a2, 

4028 moment=self.moment * a1, 

4029 strike=self.strike1, 

4030 dip=self.dip1, 

4031 rake=self.rake1, 

4032 stf=self.stf1 or self.stf) 

4033 

4034 dc2 = DCSource( 

4035 lat=self.lat, 

4036 lon=self.lon, 

4037 time=self.time + self.delta_time * a1, 

4038 north_shift=self.north_shift + delta_north * a1, 

4039 east_shift=self.east_shift + delta_east * a1, 

4040 depth=self.depth + self.delta_depth * a1, 

4041 moment=self.moment * a2, 

4042 strike=self.strike2, 

4043 dip=self.dip2, 

4044 rake=self.rake2, 

4045 stf=self.stf2 or self.stf) 

4046 

4047 return [dc1, dc2] 

4048 

4049 def discretize_basesource(self, store, target=None): 

4050 a1 = 1.0 - self.mix 

4051 a2 = self.mix 

4052 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4053 rake=self.rake1, scalar_moment=a1) 

4054 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4055 rake=self.rake2, scalar_moment=a2) 

4056 

4057 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4058 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4059 

4060 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4061 store.config.deltat, self.time - self.delta_time * a2) 

4062 

4063 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4064 store.config.deltat, self.time + self.delta_time * a1) 

4065 

4066 nt1 = times1.size 

4067 nt2 = times2.size 

4068 

4069 ds = meta.DiscretizedMTSource( 

4070 lat=self.lat, 

4071 lon=self.lon, 

4072 times=num.concatenate((times1, times2)), 

4073 north_shifts=num.concatenate(( 

4074 num.repeat(self.north_shift - delta_north * a2, nt1), 

4075 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4076 east_shifts=num.concatenate(( 

4077 num.repeat(self.east_shift - delta_east * a2, nt1), 

4078 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4079 depths=num.concatenate(( 

4080 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4081 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4082 m6s=num.vstack(( 

4083 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4084 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4085 

4086 return ds 

4087 

4088 def pyrocko_moment_tensor(self, store=None, target=None): 

4089 a1 = 1.0 - self.mix 

4090 a2 = self.mix 

4091 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4092 rake=self.rake1, 

4093 scalar_moment=a1 * self.moment) 

4094 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4095 rake=self.rake2, 

4096 scalar_moment=a2 * self.moment) 

4097 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4098 

4099 def pyrocko_event(self, store=None, target=None, **kwargs): 

4100 return SourceWithMagnitude.pyrocko_event( 

4101 self, store, target, 

4102 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4103 **kwargs) 

4104 

4105 @classmethod 

4106 def from_pyrocko_event(cls, ev, **kwargs): 

4107 d = {} 

4108 mt = ev.moment_tensor 

4109 if mt: 

4110 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4111 d.update( 

4112 strike1=float(strike), 

4113 dip1=float(dip), 

4114 rake1=float(rake), 

4115 strike2=float(strike), 

4116 dip2=float(dip), 

4117 rake2=float(rake), 

4118 mix=0.0, 

4119 magnitude=float(mt.moment_magnitude())) 

4120 

4121 d.update(kwargs) 

4122 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4123 source.stf1 = source.stf 

4124 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4125 source.stf = None 

4126 return source 

4127 

4128 

4129class RingfaultSource(SourceWithMagnitude): 

4130 ''' 

4131 A ring fault with vertical doublecouples. 

4132 ''' 

4133 

4134 diameter = Float.T( 

4135 default=1.0, 

4136 help='diameter of the ring in [m]') 

4137 

4138 sign = Float.T( 

4139 default=1.0, 

4140 help='inside of the ring moves up (+1) or down (-1)') 

4141 

4142 strike = Float.T( 

4143 default=0.0, 

4144 help='strike direction of the ring plane, clockwise from north,' 

4145 ' in [deg]') 

4146 

4147 dip = Float.T( 

4148 default=0.0, 

4149 help='dip angle of the ring plane from horizontal in [deg]') 

4150 

4151 npointsources = Int.T( 

4152 default=360, 

4153 help='number of point sources to use') 

4154 

4155 discretized_source_class = meta.DiscretizedMTSource 

4156 

4157 def base_key(self): 

4158 return Source.base_key(self) + ( 

4159 self.strike, self.dip, self.diameter, self.npointsources) 

4160 

4161 def get_factor(self): 

4162 return self.sign * self.moment 

4163 

4164 def discretize_basesource(self, store=None, target=None): 

4165 n = self.npointsources 

4166 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4167 

4168 points = num.zeros((n, 3)) 

4169 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4170 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4171 

4172 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4173 points = num.dot(rotmat.T, points.T).T # !!! ? 

4174 

4175 points[:, 0] += self.north_shift 

4176 points[:, 1] += self.east_shift 

4177 points[:, 2] += self.depth 

4178 

4179 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4180 scalar_moment=1.0 / n).m()) 

4181 

4182 rotmats = num.transpose( 

4183 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4184 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4185 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4186 

4187 ms = num.zeros((n, 3, 3)) 

4188 for i in range(n): 

4189 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4190 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4191 

4192 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4193 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4194 

4195 times, amplitudes = self.effective_stf_pre().discretize_t( 

4196 store.config.deltat, self.time) 

4197 

4198 nt = times.size 

4199 

4200 return meta.DiscretizedMTSource( 

4201 times=num.tile(times, n), 

4202 lat=self.lat, 

4203 lon=self.lon, 

4204 north_shifts=num.repeat(points[:, 0], nt), 

4205 east_shifts=num.repeat(points[:, 1], nt), 

4206 depths=num.repeat(points[:, 2], nt), 

4207 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4208 amplitudes, n)[:, num.newaxis]) 

4209 

4210 

4211class CombiSource(Source): 

4212 ''' 

4213 Composite source model. 

4214 ''' 

4215 

4216 discretized_source_class = meta.DiscretizedMTSource 

4217 

4218 subsources = List.T(Source.T()) 

4219 

4220 def __init__(self, subsources=[], **kwargs): 

4221 if not subsources: 

4222 raise BadRequest( 

4223 'Need at least one sub-source to create a CombiSource object.') 

4224 

4225 lats = num.array( 

4226 [subsource.lat for subsource in subsources], dtype=float) 

4227 lons = num.array( 

4228 [subsource.lon for subsource in subsources], dtype=float) 

4229 

4230 lat, lon = lats[0], lons[0] 

4231 if not num.all(lats == lat) and num.all(lons == lon): 

4232 subsources = [s.clone() for s in subsources] 

4233 for subsource in subsources[1:]: 

4234 subsource.set_origin(lat, lon) 

4235 

4236 depth = float(num.mean([p.depth for p in subsources])) 

4237 time = float(num.mean([p.time for p in subsources])) 

4238 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4239 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4240 kwargs.update( 

4241 time=time, 

4242 lat=float(lat), 

4243 lon=float(lon), 

4244 north_shift=north_shift, 

4245 east_shift=east_shift, 

4246 depth=depth) 

4247 

4248 Source.__init__(self, subsources=subsources, **kwargs) 

4249 

4250 def get_factor(self): 

4251 return 1.0 

4252 

4253 def discretize_basesource(self, store, target=None): 

4254 dsources = [] 

4255 for sf in self.subsources: 

4256 ds = sf.discretize_basesource(store, target) 

4257 ds.m6s *= sf.get_factor() 

4258 dsources.append(ds) 

4259 

4260 return meta.DiscretizedMTSource.combine(dsources) 

4261 

4262 

4263class SFSource(Source): 

4264 ''' 

4265 A single force point source. 

4266 

4267 Supported GF schemes: `'elastic5'`. 

4268 ''' 

4269 

4270 discretized_source_class = meta.DiscretizedSFSource 

4271 

4272 fn = Float.T( 

4273 default=0., 

4274 help='northward component of single force [N]') 

4275 

4276 fe = Float.T( 

4277 default=0., 

4278 help='eastward component of single force [N]') 

4279 

4280 fd = Float.T( 

4281 default=0., 

4282 help='downward component of single force [N]') 

4283 

4284 def __init__(self, **kwargs): 

4285 Source.__init__(self, **kwargs) 

4286 

4287 def base_key(self): 

4288 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4289 

4290 def get_factor(self): 

4291 return 1.0 

4292 

4293 def discretize_basesource(self, store, target=None): 

4294 times, amplitudes = self.effective_stf_pre().discretize_t( 

4295 store.config.deltat, self.time) 

4296 forces = amplitudes[:, num.newaxis] * num.array( 

4297 [[self.fn, self.fe, self.fd]], dtype=float) 

4298 

4299 return meta.DiscretizedSFSource(forces=forces, 

4300 **self._dparams_base_repeated(times)) 

4301 

4302 def pyrocko_event(self, store=None, target=None, **kwargs): 

4303 return Source.pyrocko_event( 

4304 self, store, target, 

4305 **kwargs) 

4306 

4307 @classmethod 

4308 def from_pyrocko_event(cls, ev, **kwargs): 

4309 d = {} 

4310 d.update(kwargs) 

4311 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4312 

4313 

4314class PorePressurePointSource(Source): 

4315 ''' 

4316 Excess pore pressure point source. 

4317 

4318 For poro-elastic initial value problem where an excess pore pressure is 

4319 brought into a small source volume. 

4320 ''' 

4321 

4322 discretized_source_class = meta.DiscretizedPorePressureSource 

4323 

4324 pp = Float.T( 

4325 default=1.0, 

4326 help='initial excess pore pressure in [Pa]') 

4327 

4328 def base_key(self): 

4329 return Source.base_key(self) 

4330 

4331 def get_factor(self): 

4332 return self.pp 

4333 

4334 def discretize_basesource(self, store, target=None): 

4335 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4336 **self._dparams_base()) 

4337 

4338 

4339class PorePressureLineSource(Source): 

4340 ''' 

4341 Excess pore pressure line source. 

4342 

4343 The line source is centered at (north_shift, east_shift, depth). 

4344 ''' 

4345 

4346 discretized_source_class = meta.DiscretizedPorePressureSource 

4347 

4348 pp = Float.T( 

4349 default=1.0, 

4350 help='initial excess pore pressure in [Pa]') 

4351 

4352 length = Float.T( 

4353 default=0.0, 

4354 help='length of the line source [m]') 

4355 

4356 azimuth = Float.T( 

4357 default=0.0, 

4358 help='azimuth direction, clockwise from north [deg]') 

4359 

4360 dip = Float.T( 

4361 default=90., 

4362 help='dip direction, downward from horizontal [deg]') 

4363 

4364 def base_key(self): 

4365 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4366 

4367 def get_factor(self): 

4368 return self.pp 

4369 

4370 def discretize_basesource(self, store, target=None): 

4371 

4372 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4373 

4374 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4375 

4376 sa = math.sin(self.azimuth * d2r) 

4377 ca = math.cos(self.azimuth * d2r) 

4378 sd = math.sin(self.dip * d2r) 

4379 cd = math.cos(self.dip * d2r) 

4380 

4381 points = num.zeros((n, 3)) 

4382 points[:, 0] = self.north_shift + a * ca * cd 

4383 points[:, 1] = self.east_shift + a * sa * cd 

4384 points[:, 2] = self.depth + a * sd 

4385 

4386 return meta.DiscretizedPorePressureSource( 

4387 times=util.num_full(n, self.time), 

4388 lat=self.lat, 

4389 lon=self.lon, 

4390 north_shifts=points[:, 0], 

4391 east_shifts=points[:, 1], 

4392 depths=points[:, 2], 

4393 pp=num.ones(n) / n) 

4394 

4395 

4396class Request(Object): 

4397 ''' 

4398 Synthetic seismogram computation request. 

4399 

4400 :: 

4401 

4402 Request(**kwargs) 

4403 Request(sources, targets, **kwargs) 

4404 ''' 

4405 

4406 sources = List.T( 

4407 Source.T(), 

4408 help='list of sources for which to produce synthetics.') 

4409 

4410 targets = List.T( 

4411 Target.T(), 

4412 help='list of targets for which to produce synthetics.') 

4413 

4414 @classmethod 

4415 def args2kwargs(cls, args): 

4416 if len(args) not in (0, 2, 3): 

4417 raise BadRequest('Invalid arguments.') 

4418 

4419 if len(args) == 2: 

4420 return dict(sources=args[0], targets=args[1]) 

4421 else: 

4422 return {} 

4423 

4424 def __init__(self, *args, **kwargs): 

4425 kwargs.update(self.args2kwargs(args)) 

4426 sources = kwargs.pop('sources', []) 

4427 targets = kwargs.pop('targets', []) 

4428 

4429 if isinstance(sources, Source): 

4430 sources = [sources] 

4431 

4432 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4433 targets = [targets] 

4434 

4435 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4436 

4437 @property 

4438 def targets_dynamic(self): 

4439 return [t for t in self.targets if isinstance(t, Target)] 

4440 

4441 @property 

4442 def targets_static(self): 

4443 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4444 

4445 @property 

4446 def has_dynamic(self): 

4447 return True if len(self.targets_dynamic) > 0 else False 

4448 

4449 @property 

4450 def has_statics(self): 

4451 return True if len(self.targets_static) > 0 else False 

4452 

4453 def subsources_map(self): 

4454 m = defaultdict(list) 

4455 for source in self.sources: 

4456 m[source.base_key()].append(source) 

4457 

4458 return m 

4459 

4460 def subtargets_map(self): 

4461 m = defaultdict(list) 

4462 for target in self.targets: 

4463 m[target.base_key()].append(target) 

4464 

4465 return m 

4466 

4467 def subrequest_map(self): 

4468 ms = self.subsources_map() 

4469 mt = self.subtargets_map() 

4470 m = {} 

4471 for (ks, ls) in ms.items(): 

4472 for (kt, lt) in mt.items(): 

4473 m[ks, kt] = (ls, lt) 

4474 

4475 return m 

4476 

4477 

4478class ProcessingStats(Object): 

4479 t_perc_get_store_and_receiver = Float.T(default=0.) 

4480 t_perc_discretize_source = Float.T(default=0.) 

4481 t_perc_make_base_seismogram = Float.T(default=0.) 

4482 t_perc_make_same_span = Float.T(default=0.) 

4483 t_perc_post_process = Float.T(default=0.) 

4484 t_perc_optimize = Float.T(default=0.) 

4485 t_perc_stack = Float.T(default=0.) 

4486 t_perc_static_get_store = Float.T(default=0.) 

4487 t_perc_static_discretize_basesource = Float.T(default=0.) 

4488 t_perc_static_sum_statics = Float.T(default=0.) 

4489 t_perc_static_post_process = Float.T(default=0.) 

4490 t_wallclock = Float.T(default=0.) 

4491 t_cpu = Float.T(default=0.) 

4492 n_read_blocks = Int.T(default=0) 

4493 n_results = Int.T(default=0) 

4494 n_subrequests = Int.T(default=0) 

4495 n_stores = Int.T(default=0) 

4496 n_records_stacked = Int.T(default=0) 

4497 

4498 

4499class Response(Object): 

4500 ''' 

4501 Resonse object to a synthetic seismogram computation request. 

4502 ''' 

4503 

4504 request = Request.T() 

4505 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4506 stats = ProcessingStats.T() 

4507 

4508 def pyrocko_traces(self): 

4509 ''' 

4510 Return a list of requested 

4511 :class:`~pyrocko.trace.Trace` instances. 

4512 ''' 

4513 

4514 traces = [] 

4515 for results in self.results_list: 

4516 for result in results: 

4517 if not isinstance(result, meta.Result): 

4518 continue 

4519 traces.append(result.trace.pyrocko_trace()) 

4520 

4521 return traces 

4522 

4523 def kite_scenes(self): 

4524 ''' 

4525 Return a list of requested 

4526 :class:`~kite.scenes` instances. 

4527 ''' 

4528 kite_scenes = [] 

4529 for results in self.results_list: 

4530 for result in results: 

4531 if isinstance(result, meta.KiteSceneResult): 

4532 sc = result.get_scene() 

4533 kite_scenes.append(sc) 

4534 

4535 return kite_scenes 

4536 

4537 def static_results(self): 

4538 ''' 

4539 Return a list of requested 

4540 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4541 ''' 

4542 statics = [] 

4543 for results in self.results_list: 

4544 for result in results: 

4545 if not isinstance(result, meta.StaticResult): 

4546 continue 

4547 statics.append(result) 

4548 

4549 return statics 

4550 

4551 def iter_results(self, get='pyrocko_traces'): 

4552 ''' 

4553 Generator function to iterate over results of request. 

4554 

4555 Yields associated :py:class:`Source`, 

4556 :class:`~pyrocko.gf.targets.Target`, 

4557 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4558 ''' 

4559 

4560 for isource, source in enumerate(self.request.sources): 

4561 for itarget, target in enumerate(self.request.targets): 

4562 result = self.results_list[isource][itarget] 

4563 if get == 'pyrocko_traces': 

4564 yield source, target, result.trace.pyrocko_trace() 

4565 elif get == 'results': 

4566 yield source, target, result 

4567 

4568 def snuffle(self, **kwargs): 

4569 ''' 

4570 Open *snuffler* with requested traces. 

4571 ''' 

4572 

4573 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4574 

4575 

4576class Engine(Object): 

4577 ''' 

4578 Base class for synthetic seismogram calculators. 

4579 ''' 

4580 

4581 def get_store_ids(self): 

4582 ''' 

4583 Get list of available GF store IDs 

4584 ''' 

4585 

4586 return [] 

4587 

4588 

4589class Rule(object): 

4590 pass 

4591 

4592 

4593class VectorRule(Rule): 

4594 

4595 def __init__(self, quantity, differentiate=0, integrate=0): 

4596 self.components = [quantity + '.' + c for c in 'ned'] 

4597 self.differentiate = differentiate 

4598 self.integrate = integrate 

4599 

4600 def required_components(self, target): 

4601 n, e, d = self.components 

4602 sa, ca, sd, cd = target.get_sin_cos_factors() 

4603 

4604 comps = [] 

4605 if nonzero(ca * cd): 

4606 comps.append(n) 

4607 

4608 if nonzero(sa * cd): 

4609 comps.append(e) 

4610 

4611 if nonzero(sd): 

4612 comps.append(d) 

4613 

4614 return tuple(comps) 

4615 

4616 def apply_(self, target, base_seismogram): 

4617 n, e, d = self.components 

4618 sa, ca, sd, cd = target.get_sin_cos_factors() 

4619 

4620 if nonzero(ca * cd): 

4621 data = base_seismogram[n].data * (ca * cd) 

4622 deltat = base_seismogram[n].deltat 

4623 else: 

4624 data = 0.0 

4625 

4626 if nonzero(sa * cd): 

4627 data = data + base_seismogram[e].data * (sa * cd) 

4628 deltat = base_seismogram[e].deltat 

4629 

4630 if nonzero(sd): 

4631 data = data + base_seismogram[d].data * sd 

4632 deltat = base_seismogram[d].deltat 

4633 

4634 if self.differentiate: 

4635 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4636 

4637 if self.integrate: 

4638 raise NotImplementedError('Integration is not implemented yet.') 

4639 

4640 return data 

4641 

4642 

4643class HorizontalVectorRule(Rule): 

4644 

4645 def __init__(self, quantity, differentiate=0, integrate=0): 

4646 self.components = [quantity + '.' + c for c in 'ne'] 

4647 self.differentiate = differentiate 

4648 self.integrate = integrate 

4649 

4650 def required_components(self, target): 

4651 n, e = self.components 

4652 sa, ca, _, _ = target.get_sin_cos_factors() 

4653 

4654 comps = [] 

4655 if nonzero(ca): 

4656 comps.append(n) 

4657 

4658 if nonzero(sa): 

4659 comps.append(e) 

4660 

4661 return tuple(comps) 

4662 

4663 def apply_(self, target, base_seismogram): 

4664 n, e = self.components 

4665 sa, ca, _, _ = target.get_sin_cos_factors() 

4666 

4667 if nonzero(ca): 

4668 data = base_seismogram[n].data * ca 

4669 else: 

4670 data = 0.0 

4671 

4672 if nonzero(sa): 

4673 data = data + base_seismogram[e].data * sa 

4674 

4675 if self.differentiate: 

4676 deltat = base_seismogram[e].deltat 

4677 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4678 

4679 if self.integrate: 

4680 raise NotImplementedError('Integration is not implemented yet.') 

4681 

4682 return data 

4683 

4684 

4685class ScalarRule(Rule): 

4686 

4687 def __init__(self, quantity, differentiate=0): 

4688 self.c = quantity 

4689 

4690 def required_components(self, target): 

4691 return (self.c, ) 

4692 

4693 def apply_(self, target, base_seismogram): 

4694 data = base_seismogram[self.c].data.copy() 

4695 deltat = base_seismogram[self.c].deltat 

4696 if self.differentiate: 

4697 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4698 

4699 return data 

4700 

4701 

4702class StaticDisplacement(Rule): 

4703 

4704 def required_components(self, target): 

4705 return tuple(['displacement.%s' % c for c in list('ned')]) 

4706 

4707 def apply_(self, target, base_statics): 

4708 if isinstance(target, SatelliteTarget): 

4709 los_fac = target.get_los_factors() 

4710 base_statics['displacement.los'] =\ 

4711 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4712 los_fac[:, 1] * base_statics['displacement.e'] + 

4713 los_fac[:, 2] * base_statics['displacement.n']) 

4714 return base_statics 

4715 

4716 

4717channel_rules = { 

4718 'displacement': [VectorRule('displacement')], 

4719 'rotation': [VectorRule('rotation')], 

4720 'velocity': [ 

4721 VectorRule('velocity'), 

4722 VectorRule('displacement', differentiate=1)], 

4723 'acceleration': [ 

4724 VectorRule('acceleration'), 

4725 VectorRule('velocity', differentiate=1), 

4726 VectorRule('displacement', differentiate=2)], 

4727 'pore_pressure': [ScalarRule('pore_pressure')], 

4728 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4729 'darcy_velocity': [VectorRule('darcy_velocity')], 

4730} 

4731 

4732static_rules = { 

4733 'displacement': [StaticDisplacement()] 

4734} 

4735 

4736 

4737class OutOfBoundsContext(Object): 

4738 source = Source.T() 

4739 target = Target.T() 

4740 distance = Float.T() 

4741 components = List.T(String.T()) 

4742 

4743 

4744def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4745 dsource_cache = {} 

4746 tcounters = list(range(6)) 

4747 

4748 store_ids = set() 

4749 sources = set() 

4750 targets = set() 

4751 

4752 for itarget, target in enumerate(ptargets): 

4753 target._id = itarget 

4754 

4755 for w in work: 

4756 _, _, isources, itargets = w 

4757 

4758 sources.update([psources[isource] for isource in isources]) 

4759 targets.update([ptargets[itarget] for itarget in itargets]) 

4760 

4761 store_ids = set([t.store_id for t in targets]) 

4762 

4763 for isource, source in enumerate(psources): 

4764 

4765 components = set() 

4766 for itarget, target in enumerate(targets): 

4767 rule = engine.get_rule(source, target) 

4768 components.update(rule.required_components(target)) 

4769 

4770 for store_id in store_ids: 

4771 store_targets = [t for t in targets if t.store_id == store_id] 

4772 

4773 sample_rates = set([t.sample_rate for t in store_targets]) 

4774 interpolations = set([t.interpolation for t in store_targets]) 

4775 

4776 base_seismograms = [] 

4777 store_targets_out = [] 

4778 

4779 for samp_rate in sample_rates: 

4780 for interp in interpolations: 

4781 engine_targets = [ 

4782 t for t in store_targets if t.sample_rate == samp_rate 

4783 and t.interpolation == interp] 

4784 

4785 if not engine_targets: 

4786 continue 

4787 

4788 store_targets_out += engine_targets 

4789 

4790 base_seismograms += engine.base_seismograms( 

4791 source, 

4792 engine_targets, 

4793 components, 

4794 dsource_cache, 

4795 nthreads) 

4796 

4797 for iseis, seismogram in enumerate(base_seismograms): 

4798 for tr in seismogram.values(): 

4799 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4800 e = SeismosizerError( 

4801 'Seismosizer failed with return code %i\n%s' % ( 

4802 tr.err, str( 

4803 OutOfBoundsContext( 

4804 source=source, 

4805 target=store_targets[iseis], 

4806 distance=source.distance_to( 

4807 store_targets[iseis]), 

4808 components=components)))) 

4809 raise e 

4810 

4811 for seismogram, target in zip(base_seismograms, store_targets_out): 

4812 

4813 try: 

4814 result = engine._post_process_dynamic( 

4815 seismogram, source, target) 

4816 except SeismosizerError as e: 

4817 result = e 

4818 

4819 yield (isource, target._id, result), tcounters 

4820 

4821 

4822def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4823 dsource_cache = {} 

4824 

4825 for w in work: 

4826 _, _, isources, itargets = w 

4827 

4828 sources = [psources[isource] for isource in isources] 

4829 targets = [ptargets[itarget] for itarget in itargets] 

4830 

4831 components = set() 

4832 for target in targets: 

4833 rule = engine.get_rule(sources[0], target) 

4834 components.update(rule.required_components(target)) 

4835 

4836 for isource, source in zip(isources, sources): 

4837 for itarget, target in zip(itargets, targets): 

4838 

4839 try: 

4840 base_seismogram, tcounters = engine.base_seismogram( 

4841 source, target, components, dsource_cache, nthreads) 

4842 except meta.OutOfBounds as e: 

4843 e.context = OutOfBoundsContext( 

4844 source=sources[0], 

4845 target=targets[0], 

4846 distance=sources[0].distance_to(targets[0]), 

4847 components=components) 

4848 raise 

4849 

4850 n_records_stacked = 0 

4851 t_optimize = 0.0 

4852 t_stack = 0.0 

4853 

4854 for _, tr in base_seismogram.items(): 

4855 n_records_stacked += tr.n_records_stacked 

4856 t_optimize += tr.t_optimize 

4857 t_stack += tr.t_stack 

4858 

4859 try: 

4860 result = engine._post_process_dynamic( 

4861 base_seismogram, source, target) 

4862 result.n_records_stacked = n_records_stacked 

4863 result.n_shared_stacking = len(sources) *\ 

4864 len(targets) 

4865 result.t_optimize = t_optimize 

4866 result.t_stack = t_stack 

4867 except SeismosizerError as e: 

4868 result = e 

4869 

4870 tcounters.append(xtime()) 

4871 yield (isource, itarget, result), tcounters 

4872 

4873 

4874def process_static(work, psources, ptargets, engine, nthreads=0): 

4875 for w in work: 

4876 _, _, isources, itargets = w 

4877 

4878 sources = [psources[isource] for isource in isources] 

4879 targets = [ptargets[itarget] for itarget in itargets] 

4880 

4881 for isource, source in zip(isources, sources): 

4882 for itarget, target in zip(itargets, targets): 

4883 components = engine.get_rule(source, target)\ 

4884 .required_components(target) 

4885 

4886 try: 

4887 base_statics, tcounters = engine.base_statics( 

4888 source, target, components, nthreads) 

4889 except meta.OutOfBounds as e: 

4890 e.context = OutOfBoundsContext( 

4891 source=sources[0], 

4892 target=targets[0], 

4893 distance=float('nan'), 

4894 components=components) 

4895 raise 

4896 result = engine._post_process_statics( 

4897 base_statics, source, target) 

4898 tcounters.append(xtime()) 

4899 

4900 yield (isource, itarget, result), tcounters 

4901 

4902 

4903class LocalEngine(Engine): 

4904 ''' 

4905 Offline synthetic seismogram calculator. 

4906 

4907 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4908 :py:attr:`store_dirs` with paths set in environment variables 

4909 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4910 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4911 :py:attr:`store_dirs` with paths set in the user's config file. 

4912 

4913 The config file can be found at :file:`~/.pyrocko/config.pf` 

4914 

4915 .. code-block :: python 

4916 

4917 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4918 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4919 ''' 

4920 

4921 store_superdirs = List.T( 

4922 String.T(), 

4923 help='directories which are searched for Green\'s function stores') 

4924 

4925 store_dirs = List.T( 

4926 String.T(), 

4927 help='additional individual Green\'s function store directories') 

4928 

4929 default_store_id = String.T( 

4930 optional=True, 

4931 help='default store ID to be used when a request does not provide ' 

4932 'one') 

4933 

4934 def __init__(self, **kwargs): 

4935 use_env = kwargs.pop('use_env', False) 

4936 use_config = kwargs.pop('use_config', False) 

4937 Engine.__init__(self, **kwargs) 

4938 if use_env: 

4939 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4940 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4941 if env_store_superdirs: 

4942 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4943 

4944 if env_store_dirs: 

4945 self.store_dirs.extend(env_store_dirs.split(':')) 

4946 

4947 if use_config: 

4948 c = config.config() 

4949 self.store_superdirs.extend(c.gf_store_superdirs) 

4950 self.store_dirs.extend(c.gf_store_dirs) 

4951 

4952 self._check_store_dirs_type() 

4953 self._id_to_store_dir = {} 

4954 self._open_stores = {} 

4955 self._effective_default_store_id = None 

4956 

4957 def _check_store_dirs_type(self): 

4958 for sdir in ['store_dirs', 'store_superdirs']: 

4959 if not isinstance(self.__getattribute__(sdir), list): 

4960 raise TypeError("{} of {} is not of type list".format( 

4961 sdir, self.__class__.__name__)) 

4962 

4963 def _get_store_id(self, store_dir): 

4964 store_ = store.Store(store_dir) 

4965 store_id = store_.config.id 

4966 store_.close() 

4967 return store_id 

4968 

4969 def _looks_like_store_dir(self, store_dir): 

4970 return os.path.isdir(store_dir) and \ 

4971 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4972 ('index', 'traces', 'config')) 

4973 

4974 def iter_store_dirs(self): 

4975 store_dirs = set() 

4976 for d in self.store_superdirs: 

4977 if not os.path.exists(d): 

4978 logger.warning('store_superdir not available: %s' % d) 

4979 continue 

4980 

4981 for entry in os.listdir(d): 

4982 store_dir = os.path.realpath(pjoin(d, entry)) 

4983 if self._looks_like_store_dir(store_dir): 

4984 store_dirs.add(store_dir) 

4985 

4986 for store_dir in self.store_dirs: 

4987 store_dirs.add(os.path.realpath(store_dir)) 

4988 

4989 return store_dirs 

4990 

4991 def _scan_stores(self): 

4992 for store_dir in self.iter_store_dirs(): 

4993 store_id = self._get_store_id(store_dir) 

4994 if store_id not in self._id_to_store_dir: 

4995 self._id_to_store_dir[store_id] = store_dir 

4996 else: 

4997 if store_dir != self._id_to_store_dir[store_id]: 

4998 raise DuplicateStoreId( 

4999 'GF store ID %s is used in (at least) two ' 

5000 'different stores. Locations are: %s and %s' % 

5001 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5002 

5003 def get_store_dir(self, store_id): 

5004 ''' 

5005 Lookup directory given a GF store ID. 

5006 ''' 

5007 

5008 if store_id not in self._id_to_store_dir: 

5009 self._scan_stores() 

5010 

5011 if store_id not in self._id_to_store_dir: 

5012 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5013 

5014 return self._id_to_store_dir[store_id] 

5015 

5016 def get_store_ids(self): 

5017 ''' 

5018 Get list of available store IDs. 

5019 ''' 

5020 

5021 self._scan_stores() 

5022 return sorted(self._id_to_store_dir.keys()) 

5023 

5024 def effective_default_store_id(self): 

5025 if self._effective_default_store_id is None: 

5026 if self.default_store_id is None: 

5027 store_ids = self.get_store_ids() 

5028 if len(store_ids) == 1: 

5029 self._effective_default_store_id = self.get_store_ids()[0] 

5030 else: 

5031 raise NoDefaultStoreSet() 

5032 else: 

5033 self._effective_default_store_id = self.default_store_id 

5034 

5035 return self._effective_default_store_id 

5036 

5037 def get_store(self, store_id=None): 

5038 ''' 

5039 Get a store from the engine. 

5040 

5041 :param store_id: identifier of the store (optional) 

5042 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5043 

5044 If no ``store_id`` is provided the store 

5045 associated with the :py:gattr:`default_store_id` is returned. 

5046 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5047 undefined. 

5048 ''' 

5049 

5050 if store_id is None: 

5051 store_id = self.effective_default_store_id() 

5052 

5053 if store_id not in self._open_stores: 

5054 store_dir = self.get_store_dir(store_id) 

5055 self._open_stores[store_id] = store.Store(store_dir) 

5056 

5057 return self._open_stores[store_id] 

5058 

5059 def get_store_config(self, store_id): 

5060 store = self.get_store(store_id) 

5061 return store.config 

5062 

5063 def get_store_extra(self, store_id, key): 

5064 store = self.get_store(store_id) 

5065 return store.get_extra(key) 

5066 

5067 def close_cashed_stores(self): 

5068 ''' 

5069 Close and remove ids from cashed stores. 

5070 ''' 

5071 store_ids = [] 

5072 for store_id, store_ in self._open_stores.items(): 

5073 store_.close() 

5074 store_ids.append(store_id) 

5075 

5076 for store_id in store_ids: 

5077 self._open_stores.pop(store_id) 

5078 

5079 def get_rule(self, source, target): 

5080 cprovided = self.get_store(target.store_id).get_provided_components() 

5081 

5082 if isinstance(target, StaticTarget): 

5083 quantity = target.quantity 

5084 available_rules = static_rules 

5085 elif isinstance(target, Target): 

5086 quantity = target.effective_quantity() 

5087 available_rules = channel_rules 

5088 

5089 try: 

5090 for rule in available_rules[quantity]: 

5091 cneeded = rule.required_components(target) 

5092 if all(c in cprovided for c in cneeded): 

5093 return rule 

5094 

5095 except KeyError: 

5096 pass 

5097 

5098 raise BadRequest( 

5099 'No rule to calculate "%s" with GFs from store "%s" ' 

5100 'for source model "%s".' % ( 

5101 target.effective_quantity(), 

5102 target.store_id, 

5103 source.__class__.__name__)) 

5104 

5105 def _cached_discretize_basesource(self, source, store, cache, target): 

5106 if (source, store) not in cache: 

5107 cache[source, store] = source.discretize_basesource(store, target) 

5108 

5109 return cache[source, store] 

5110 

5111 def base_seismograms(self, source, targets, components, dsource_cache, 

5112 nthreads=0): 

5113 

5114 target = targets[0] 

5115 

5116 interp = set([t.interpolation for t in targets]) 

5117 if len(interp) > 1: 

5118 raise BadRequest('Targets have different interpolation schemes.') 

5119 

5120 rates = set([t.sample_rate for t in targets]) 

5121 if len(rates) > 1: 

5122 raise BadRequest('Targets have different sample rates.') 

5123 

5124 store_ = self.get_store(target.store_id) 

5125 receivers = [t.receiver(store_) for t in targets] 

5126 

5127 if target.sample_rate is not None: 

5128 deltat = 1. / target.sample_rate 

5129 rate = target.sample_rate 

5130 else: 

5131 deltat = None 

5132 rate = store_.config.sample_rate 

5133 

5134 tmin = num.fromiter( 

5135 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5136 tmax = num.fromiter( 

5137 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5138 

5139 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax)) 

5140 

5141 itmin = num.zeros_like(tmin, dtype=num.int64) 

5142 itmax = num.zeros_like(tmin, dtype=num.int64) 

5143 nsamples = num.full_like(tmin, -1, dtype=num.int64) 

5144 

5145 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64) 

5146 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64) 

5147 nsamples = itmax - itmin + 1 

5148 nsamples[num.logical_not(mask)] = -1 

5149 

5150 base_source = self._cached_discretize_basesource( 

5151 source, store_, dsource_cache, target) 

5152 

5153 base_seismograms = store_.calc_seismograms( 

5154 base_source, receivers, components, 

5155 deltat=deltat, 

5156 itmin=itmin, nsamples=nsamples, 

5157 interpolation=target.interpolation, 

5158 optimization=target.optimization, 

5159 nthreads=nthreads) 

5160 

5161 for i, base_seismogram in enumerate(base_seismograms): 

5162 base_seismograms[i] = store.make_same_span(base_seismogram) 

5163 

5164 return base_seismograms 

5165 

5166 def base_seismogram(self, source, target, components, dsource_cache, 

5167 nthreads): 

5168 

5169 tcounters = [xtime()] 

5170 

5171 store_ = self.get_store(target.store_id) 

5172 receiver = target.receiver(store_) 

5173 

5174 if target.tmin and target.tmax is not None: 

5175 rate = store_.config.sample_rate 

5176 itmin = int(num.floor(target.tmin * rate)) 

5177 itmax = int(num.ceil(target.tmax * rate)) 

5178 nsamples = itmax - itmin + 1 

5179 else: 

5180 itmin = None 

5181 nsamples = None 

5182 

5183 tcounters.append(xtime()) 

5184 base_source = self._cached_discretize_basesource( 

5185 source, store_, dsource_cache, target) 

5186 

5187 tcounters.append(xtime()) 

5188 

5189 if target.sample_rate is not None: 

5190 deltat = 1. / target.sample_rate 

5191 else: 

5192 deltat = None 

5193 

5194 base_seismogram = store_.seismogram( 

5195 base_source, receiver, components, 

5196 deltat=deltat, 

5197 itmin=itmin, nsamples=nsamples, 

5198 interpolation=target.interpolation, 

5199 optimization=target.optimization, 

5200 nthreads=nthreads) 

5201 

5202 tcounters.append(xtime()) 

5203 

5204 base_seismogram = store.make_same_span(base_seismogram) 

5205 

5206 tcounters.append(xtime()) 

5207 

5208 return base_seismogram, tcounters 

5209 

5210 def base_statics(self, source, target, components, nthreads): 

5211 tcounters = [xtime()] 

5212 store_ = self.get_store(target.store_id) 

5213 

5214 if target.tsnapshot is not None: 

5215 rate = store_.config.sample_rate 

5216 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5217 else: 

5218 itsnapshot = None 

5219 tcounters.append(xtime()) 

5220 

5221 base_source = source.discretize_basesource(store_, target=target) 

5222 

5223 tcounters.append(xtime()) 

5224 

5225 base_statics = store_.statics( 

5226 base_source, 

5227 target, 

5228 itsnapshot, 

5229 components, 

5230 target.interpolation, 

5231 nthreads) 

5232 

5233 tcounters.append(xtime()) 

5234 

5235 return base_statics, tcounters 

5236 

5237 def _post_process_dynamic(self, base_seismogram, source, target): 

5238 base_any = next(iter(base_seismogram.values())) 

5239 deltat = base_any.deltat 

5240 itmin = base_any.itmin 

5241 

5242 rule = self.get_rule(source, target) 

5243 data = rule.apply_(target, base_seismogram) 

5244 

5245 factor = source.get_factor() * target.get_factor() 

5246 if factor != 1.0: 

5247 data = data * factor 

5248 

5249 stf = source.effective_stf_post() 

5250 

5251 times, amplitudes = stf.discretize_t( 

5252 deltat, 0.0) 

5253 

5254 # repeat end point to prevent boundary effects 

5255 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5256 padded_data[:data.size] = data 

5257 padded_data[data.size:] = data[-1] 

5258 data = num.convolve(amplitudes, padded_data) 

5259 

5260 tmin = itmin * deltat + times[0] 

5261 

5262 tr = meta.SeismosizerTrace( 

5263 codes=target.codes, 

5264 data=data[:-amplitudes.size], 

5265 deltat=deltat, 

5266 tmin=tmin) 

5267 

5268 return target.post_process(self, source, tr) 

5269 

5270 def _post_process_statics(self, base_statics, source, starget): 

5271 rule = self.get_rule(source, starget) 

5272 data = rule.apply_(starget, base_statics) 

5273 

5274 factor = source.get_factor() 

5275 if factor != 1.0: 

5276 for v in data.values(): 

5277 v *= factor 

5278 

5279 return starget.post_process(self, source, base_statics) 

5280 

5281 def process(self, *args, **kwargs): 

5282 ''' 

5283 Process a request. 

5284 

5285 :: 

5286 

5287 process(**kwargs) 

5288 process(request, **kwargs) 

5289 process(sources, targets, **kwargs) 

5290 

5291 The request can be given a a :py:class:`Request` object, or such an 

5292 object is created using ``Request(**kwargs)`` for convenience. 

5293 

5294 :returns: :py:class:`Response` object 

5295 ''' 

5296 

5297 if len(args) not in (0, 1, 2): 

5298 raise BadRequest('Invalid arguments.') 

5299 

5300 if len(args) == 1: 

5301 kwargs['request'] = args[0] 

5302 

5303 elif len(args) == 2: 

5304 kwargs.update(Request.args2kwargs(args)) 

5305 

5306 request = kwargs.pop('request', None) 

5307 status_callback = kwargs.pop('status_callback', None) 

5308 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5309 

5310 nprocs = kwargs.pop('nprocs', None) 

5311 nthreads = kwargs.pop('nthreads', 1) 

5312 if nprocs is not None: 

5313 nthreads = nprocs 

5314 

5315 if request is None: 

5316 request = Request(**kwargs) 

5317 

5318 if resource: 

5319 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5320 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5321 tt0 = xtime() 

5322 

5323 # make sure stores are open before fork() 

5324 store_ids = set(target.store_id for target in request.targets) 

5325 for store_id in store_ids: 

5326 self.get_store(store_id) 

5327 

5328 source_index = dict((x, i) for (i, x) in 

5329 enumerate(request.sources)) 

5330 target_index = dict((x, i) for (i, x) in 

5331 enumerate(request.targets)) 

5332 

5333 m = request.subrequest_map() 

5334 

5335 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5336 results_list = [] 

5337 

5338 for i in range(len(request.sources)): 

5339 results_list.append([None] * len(request.targets)) 

5340 

5341 tcounters_dyn_list = [] 

5342 tcounters_static_list = [] 

5343 nsub = len(skeys) 

5344 isub = 0 

5345 

5346 # Processing dynamic targets through 

5347 # parimap(process_subrequest_dynamic) 

5348 

5349 if calc_timeseries: 

5350 _process_dynamic = process_dynamic_timeseries 

5351 else: 

5352 _process_dynamic = process_dynamic 

5353 

5354 if request.has_dynamic: 

5355 work_dynamic = [ 

5356 (i, nsub, 

5357 [source_index[source] for source in m[k][0]], 

5358 [target_index[target] for target in m[k][1] 

5359 if not isinstance(target, StaticTarget)]) 

5360 for (i, k) in enumerate(skeys)] 

5361 

5362 for ii_results, tcounters_dyn in _process_dynamic( 

5363 work_dynamic, request.sources, request.targets, self, 

5364 nthreads): 

5365 

5366 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5367 isource, itarget, result = ii_results 

5368 results_list[isource][itarget] = result 

5369 

5370 if status_callback: 

5371 status_callback(isub, nsub) 

5372 

5373 isub += 1 

5374 

5375 # Processing static targets through process_static 

5376 if request.has_statics: 

5377 work_static = [ 

5378 (i, nsub, 

5379 [source_index[source] for source in m[k][0]], 

5380 [target_index[target] for target in m[k][1] 

5381 if isinstance(target, StaticTarget)]) 

5382 for (i, k) in enumerate(skeys)] 

5383 

5384 for ii_results, tcounters_static in process_static( 

5385 work_static, request.sources, request.targets, self, 

5386 nthreads=nthreads): 

5387 

5388 tcounters_static_list.append(num.diff(tcounters_static)) 

5389 isource, itarget, result = ii_results 

5390 results_list[isource][itarget] = result 

5391 

5392 if status_callback: 

5393 status_callback(isub, nsub) 

5394 

5395 isub += 1 

5396 

5397 if status_callback: 

5398 status_callback(nsub, nsub) 

5399 

5400 tt1 = time.time() 

5401 if resource: 

5402 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5403 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5404 

5405 s = ProcessingStats() 

5406 

5407 if request.has_dynamic: 

5408 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5409 t_dyn = float(num.sum(tcumu_dyn)) 

5410 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5411 (s.t_perc_get_store_and_receiver, 

5412 s.t_perc_discretize_source, 

5413 s.t_perc_make_base_seismogram, 

5414 s.t_perc_make_same_span, 

5415 s.t_perc_post_process) = perc_dyn 

5416 else: 

5417 t_dyn = 0. 

5418 

5419 if request.has_statics: 

5420 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5421 t_static = num.sum(tcumu_static) 

5422 perc_static = map(float, tcumu_static / t_static * 100.) 

5423 (s.t_perc_static_get_store, 

5424 s.t_perc_static_discretize_basesource, 

5425 s.t_perc_static_sum_statics, 

5426 s.t_perc_static_post_process) = perc_static 

5427 

5428 s.t_wallclock = tt1 - tt0 

5429 if resource: 

5430 s.t_cpu = ( 

5431 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5432 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5433 s.n_read_blocks = ( 

5434 (rs1.ru_inblock + rc1.ru_inblock) - 

5435 (rs0.ru_inblock + rc0.ru_inblock)) 

5436 

5437 n_records_stacked = 0. 

5438 for results in results_list: 

5439 for result in results: 

5440 if not isinstance(result, meta.Result): 

5441 continue 

5442 shr = float(result.n_shared_stacking) 

5443 n_records_stacked += result.n_records_stacked / shr 

5444 s.t_perc_optimize += result.t_optimize / shr 

5445 s.t_perc_stack += result.t_stack / shr 

5446 s.n_records_stacked = int(n_records_stacked) 

5447 if t_dyn != 0.: 

5448 s.t_perc_optimize /= t_dyn * 100 

5449 s.t_perc_stack /= t_dyn * 100 

5450 

5451 return Response( 

5452 request=request, 

5453 results_list=results_list, 

5454 stats=s) 

5455 

5456 

5457class RemoteEngine(Engine): 

5458 ''' 

5459 Client for remote synthetic seismogram calculator. 

5460 ''' 

5461 

5462 site = String.T(default=ws.g_default_site, optional=True) 

5463 url = String.T(default=ws.g_url, optional=True) 

5464 

5465 def process(self, request=None, status_callback=None, **kwargs): 

5466 

5467 if request is None: 

5468 request = Request(**kwargs) 

5469 

5470 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5471 

5472 

5473g_engine = None 

5474 

5475 

5476def get_engine(store_superdirs=[]): 

5477 global g_engine 

5478 if g_engine is None: 

5479 g_engine = LocalEngine(use_env=True, use_config=True) 

5480 

5481 for d in store_superdirs: 

5482 if d not in g_engine.store_superdirs: 

5483 g_engine.store_superdirs.append(d) 

5484 

5485 return g_engine 

5486 

5487 

5488class SourceGroup(Object): 

5489 

5490 def __getattr__(self, k): 

5491 return num.fromiter((getattr(s, k) for s in self), 

5492 dtype=float) 

5493 

5494 def __iter__(self): 

5495 raise NotImplementedError( 

5496 'This method should be implemented in subclass.') 

5497 

5498 def __len__(self): 

5499 raise NotImplementedError( 

5500 'This method should be implemented in subclass.') 

5501 

5502 

5503class SourceList(SourceGroup): 

5504 sources = List.T(Source.T()) 

5505 

5506 def append(self, s): 

5507 self.sources.append(s) 

5508 

5509 def __iter__(self): 

5510 return iter(self.sources) 

5511 

5512 def __len__(self): 

5513 return len(self.sources) 

5514 

5515 

5516class SourceGrid(SourceGroup): 

5517 

5518 base = Source.T() 

5519 variables = Dict.T(String.T(), Range.T()) 

5520 order = List.T(String.T()) 

5521 

5522 def __len__(self): 

5523 n = 1 

5524 for (k, v) in self.make_coords(self.base): 

5525 n *= len(list(v)) 

5526 

5527 return n 

5528 

5529 def __iter__(self): 

5530 for items in permudef(self.make_coords(self.base)): 

5531 s = self.base.clone(**{k: v for (k, v) in items}) 

5532 s.regularize() 

5533 yield s 

5534 

5535 def ordered_params(self): 

5536 ks = list(self.variables.keys()) 

5537 for k in self.order + list(self.base.keys()): 

5538 if k in ks: 

5539 yield k 

5540 ks.remove(k) 

5541 if ks: 

5542 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5543 (ks[0], self.base.__class__.__name__)) 

5544 

5545 def make_coords(self, base): 

5546 return [(param, self.variables[param].make(base=base[param])) 

5547 for param in self.ordered_params()] 

5548 

5549 

5550source_classes = [ 

5551 Source, 

5552 SourceWithMagnitude, 

5553 SourceWithDerivedMagnitude, 

5554 ExplosionSource, 

5555 RectangularExplosionSource, 

5556 DCSource, 

5557 CLVDSource, 

5558 VLVDSource, 

5559 MTSource, 

5560 RectangularSource, 

5561 PseudoDynamicRupture, 

5562 DoubleDCSource, 

5563 RingfaultSource, 

5564 CombiSource, 

5565 SFSource, 

5566 PorePressurePointSource, 

5567 PorePressureLineSource, 

5568] 

5569 

5570stf_classes = [ 

5571 STF, 

5572 BoxcarSTF, 

5573 TriangularSTF, 

5574 HalfSinusoidSTF, 

5575 ResonatorSTF, 

5576] 

5577 

5578__all__ = ''' 

5579SeismosizerError 

5580BadRequest 

5581NoSuchStore 

5582DerivedMagnitudeError 

5583STFMode 

5584'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5585Request 

5586ProcessingStats 

5587Response 

5588Engine 

5589LocalEngine 

5590RemoteEngine 

5591source_classes 

5592get_engine 

5593Range 

5594SourceGroup 

5595SourceList 

5596SourceGrid 

5597map_anchor 

5598'''.split()