1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28from collections import defaultdict 

29from functools import cmp_to_key 

30import time 

31import math 

32import os 

33import re 

34import logging 

35try: 

36 import resource 

37except ImportError: 

38 resource = None 

39from hashlib import sha1 

40 

41import numpy as num 

42from scipy.interpolate import RegularGridInterpolator 

43 

44from pyrocko.guts import (Object, Float, String, StringChoice, List, 

45 Timestamp, Int, SObject, ArgumentError, Dict, 

46 ValidationError, Bool) 

47from pyrocko.guts_array import Array 

48 

49from pyrocko import moment_tensor as pmt 

50from pyrocko import trace, util, config, model, eikonal_ext 

51from pyrocko.orthodrome import ne_to_latlon 

52from pyrocko.model import Location 

53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

54 okada_ext, invert_fault_dislocations_bem 

55 

56from . import meta, store, ws 

57from .tractions import TractionField, DirectedTractions 

58from .targets import Target, StaticTarget, SatelliteTarget 

59 

60pjoin = os.path.join 

61 

62guts_prefix = 'pf' 

63 

64d2r = math.pi / 180. 

65r2d = 180. / math.pi 

66km = 1e3 

67 

68logger = logging.getLogger('pyrocko.gf.seismosizer') 

69 

70 

71def cmp_none_aware(a, b): 

72 if isinstance(a, tuple) and isinstance(b, tuple): 

73 for xa, xb in zip(a, b): 

74 rv = cmp_none_aware(xa, xb) 

75 if rv != 0: 

76 return rv 

77 

78 return 0 

79 

80 anone = a is None 

81 bnone = b is None 

82 

83 if anone and bnone: 

84 return 0 

85 

86 if anone: 

87 return -1 

88 

89 if bnone: 

90 return 1 

91 

92 return bool(a > b) - bool(a < b) 

93 

94 

95def xtime(): 

96 return time.time() 

97 

98 

99class SeismosizerError(Exception): 

100 pass 

101 

102 

103class BadRequest(SeismosizerError): 

104 pass 

105 

106 

107class DuplicateStoreId(Exception): 

108 pass 

109 

110 

111class NoDefaultStoreSet(Exception): 

112 pass 

113 

114 

115class ConversionError(Exception): 

116 pass 

117 

118 

119class NoSuchStore(BadRequest): 

120 

121 def __init__(self, store_id=None, dirs=None): 

122 BadRequest.__init__(self) 

123 self.store_id = store_id 

124 self.dirs = dirs 

125 

126 def __str__(self): 

127 if self.store_id is not None: 

128 rstr = 'no GF store with id "%s" found.' % self.store_id 

129 else: 

130 rstr = 'GF store not found.' 

131 

132 if self.dirs is not None: 

133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

134 return rstr 

135 

136 

137def ufloat(s): 

138 units = { 

139 'k': 1e3, 

140 'M': 1e6, 

141 } 

142 

143 factor = 1.0 

144 if s and s[-1] in units: 

145 factor = units[s[-1]] 

146 s = s[:-1] 

147 if not s: 

148 raise ValueError('unit without a number: \'%s\'' % s) 

149 

150 return float(s) * factor 

151 

152 

153def ufloat_or_none(s): 

154 if s: 

155 return ufloat(s) 

156 else: 

157 return None 

158 

159 

160def int_or_none(s): 

161 if s: 

162 return int(s) 

163 else: 

164 return None 

165 

166 

167def nonzero(x, eps=1e-15): 

168 return abs(x) > eps 

169 

170 

171def permudef(ln, j=0): 

172 if j < len(ln): 

173 k, v = ln[j] 

174 for y in v: 

175 ln[j] = k, y 

176 for s in permudef(ln, j + 1): 

177 yield s 

178 

179 ln[j] = k, v 

180 return 

181 else: 

182 yield ln 

183 

184 

185def arr(x): 

186 return num.atleast_1d(num.asarray(x)) 

187 

188 

189def discretize_rect_source(deltas, deltat, time, north, east, depth, 

190 strike, dip, length, width, 

191 anchor, velocity=None, stf=None, 

192 nucleation_x=None, nucleation_y=None, 

193 decimation_factor=1, pointsonly=False, 

194 plane_coords=False, 

195 aggressive_oversampling=False): 

196 

197 if stf is None: 

198 stf = STF() 

199 

200 if not velocity and not pointsonly: 

201 raise AttributeError('velocity is required in time mode') 

202 

203 mindeltagf = float(num.min(deltas)) 

204 if velocity: 

205 mindeltagf = min(mindeltagf, deltat * velocity) 

206 

207 ln = length 

208 wd = width 

209 

210 if aggressive_oversampling: 

211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

213 else: 

214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 

217 n = int(nl * nw) 

218 

219 dl = ln / nl 

220 dw = wd / nw 

221 

222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

224 

225 points = num.zeros((n, 3)) 

226 points[:, 0] = num.tile(xl, nw) 

227 points[:, 1] = num.repeat(xw, nl) 

228 

229 if nucleation_x is not None: 

230 dist_x = num.abs(nucleation_x - points[:, 0]) 

231 else: 

232 dist_x = num.zeros(n) 

233 

234 if nucleation_y is not None: 

235 dist_y = num.abs(nucleation_y - points[:, 1]) 

236 else: 

237 dist_y = num.zeros(n) 

238 

239 dist = num.sqrt(dist_x**2 + dist_y**2) 

240 times = dist / velocity 

241 

242 anch_x, anch_y = map_anchor[anchor] 

243 

244 points[:, 0] -= anch_x * 0.5 * length 

245 points[:, 1] -= anch_y * 0.5 * width 

246 

247 if plane_coords: 

248 return points, dl, dw, nl, nw 

249 

250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

251 points = num.dot(rotmat.T, points.T).T 

252 

253 points[:, 0] += north 

254 points[:, 1] += east 

255 points[:, 2] += depth 

256 

257 if pointsonly: 

258 return points, dl, dw, nl, nw 

259 

260 xtau, amplitudes = stf.discretize_t(deltat, time) 

261 nt = xtau.size 

262 

263 points2 = num.repeat(points, nt, axis=0) 

264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

265 amplitudes2 = num.tile(amplitudes, n) 

266 

267 return points2, times2, amplitudes2, dl, dw, nl, nw 

268 

269 

270def check_rect_source_discretisation(points2, nl, nw, store): 

271 # We assume a non-rotated fault plane 

272 N_CRITICAL = 8 

273 points = points2.T.reshape((3, nl, nw)) 

274 if points.size <= N_CRITICAL: 

275 logger.warning('RectangularSource is defined by only %d sub-sources!' 

276 % points.size) 

277 return True 

278 

279 distances = num.sqrt( 

280 (points[0, 0, :] - points[0, 1, :])**2 + 

281 (points[1, 0, :] - points[1, 1, :])**2 + 

282 (points[2, 0, :] - points[2, 1, :])**2) 

283 

284 depths = points[2, 0, :] 

285 vs_profile = store.config.get_vs( 

286 lat=0., lon=0., 

287 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

288 interpolation='multilinear') 

289 

290 min_wavelength = vs_profile * (store.config.deltat * 2) 

291 if not num.all(min_wavelength > distances / 2): 

292 return False 

293 return True 

294 

295 

296def outline_rect_source(strike, dip, length, width, anchor): 

297 ln = length 

298 wd = width 

299 points = num.array( 

300 [[-0.5 * ln, -0.5 * wd, 0.], 

301 [0.5 * ln, -0.5 * wd, 0.], 

302 [0.5 * ln, 0.5 * wd, 0.], 

303 [-0.5 * ln, 0.5 * wd, 0.], 

304 [-0.5 * ln, -0.5 * wd, 0.]]) 

305 

306 anch_x, anch_y = map_anchor[anchor] 

307 points[:, 0] -= anch_x * 0.5 * length 

308 points[:, 1] -= anch_y * 0.5 * width 

309 

310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

311 

312 return num.dot(rotmat.T, points.T).T 

313 

314 

315def from_plane_coords( 

316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

317 lat=0., lon=0., 

318 north_shift=0, east_shift=0, 

319 anchor='top', cs='xy'): 

320 

321 ln = length 

322 wd = width 

323 x_abs = [] 

324 y_abs = [] 

325 if not isinstance(x_plane_coords, list): 

326 x_plane_coords = [x_plane_coords] 

327 y_plane_coords = [y_plane_coords] 

328 

329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

330 points = num.array( 

331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

336 

337 anch_x, anch_y = map_anchor[anchor] 

338 points[:, 0] -= anch_x * 0.5 * length 

339 points[:, 1] -= anch_y * 0.5 * width 

340 

341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

342 

343 points = num.dot(rotmat.T, points.T).T 

344 points[:, 0] += north_shift 

345 points[:, 1] += east_shift 

346 points[:, 2] += depth 

347 if cs in ('latlon', 'lonlat'): 

348 latlon = ne_to_latlon(lat, lon, 

349 points[:, 0], points[:, 1]) 

350 latlon = num.array(latlon).T 

351 x_abs.append(latlon[1:2, 1]) 

352 y_abs.append(latlon[2:3, 0]) 

353 if cs == 'xy': 

354 x_abs.append(points[1:2, 1]) 

355 y_abs.append(points[2:3, 0]) 

356 

357 if cs == 'lonlat': 

358 return y_abs, x_abs 

359 else: 

360 return x_abs, y_abs 

361 

362 

363def points_on_rect_source( 

364 strike, dip, length, width, anchor, 

365 discretized_basesource=None, points_x=None, points_y=None): 

366 

367 ln = length 

368 wd = width 

369 

370 if isinstance(points_x, list) or isinstance(points_x, float): 

371 points_x = num.array([points_x]) 

372 if isinstance(points_y, list) or isinstance(points_y, float): 

373 points_y = num.array([points_y]) 

374 

375 if discretized_basesource: 

376 ds = discretized_basesource 

377 

378 nl_patches = ds.nl + 1 

379 nw_patches = ds.nw + 1 

380 

381 npoints = nl_patches * nw_patches 

382 points = num.zeros((npoints, 3)) 

383 ln_patches = num.array([il for il in range(nl_patches)]) 

384 wd_patches = num.array([iw for iw in range(nw_patches)]) 

385 

386 points_ln =\ 

387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

388 points_wd =\ 

389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

390 

391 for il in range(nl_patches): 

392 for iw in range(nw_patches): 

393 points[il * nw_patches + iw, :] = num.array([ 

394 points_ln[il] * ln * 0.5, 

395 points_wd[iw] * wd * 0.5, 0.0]) 

396 

397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

398 points = num.zeros(shape=((len(points_x), 3))) 

399 for i, (x, y) in enumerate(zip(points_x, points_y)): 

400 points[i, :] = num.array( 

401 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

402 

403 anch_x, anch_y = map_anchor[anchor] 

404 

405 points[:, 0] -= anch_x * 0.5 * ln 

406 points[:, 1] -= anch_y * 0.5 * wd 

407 

408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

409 

410 return num.dot(rotmat.T, points.T).T 

411 

412 

413class InvalidGridDef(Exception): 

414 pass 

415 

416 

417class Range(SObject): 

418 ''' 

419 Convenient range specification. 

420 

421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

422 

423 Range('0 .. 10k : 1k') 

424 Range(start=0., stop=10e3, step=1e3) 

425 Range(0, 10e3, 1e3) 

426 Range('0 .. 10k @ 11') 

427 Range(start=0., stop=10*km, n=11) 

428 

429 Range(0, 10e3, n=11) 

430 Range(values=[x*1e3 for x in range(11)]) 

431 

432 Depending on the use context, it can be possible to omit any part of the 

433 specification. E.g. in the context of extracting a subset of an already 

434 existing range, the existing range's specification values would be filled 

435 in where missing. 

436 

437 The values are distributed with equal spacing, unless the ``spacing`` 

438 argument is modified. The values can be created offset or relative to an 

439 external base value with the ``relative`` argument if the use context 

440 supports this. 

441 

442 The range specification can be expressed with a short string 

443 representation:: 

444 

445 'start .. stop @ num | spacing, relative' 

446 'start .. stop : step | spacing, relative' 

447 

448 most parts of the expression can be omitted if not needed. Whitespace is 

449 allowed for readability but can also be omitted. 

450 ''' 

451 

452 start = Float.T(optional=True) 

453 stop = Float.T(optional=True) 

454 step = Float.T(optional=True) 

455 n = Int.T(optional=True) 

456 values = Array.T(optional=True, dtype=float, shape=(None,)) 

457 

458 spacing = StringChoice.T( 

459 choices=['lin', 'log', 'symlog'], 

460 default='lin', 

461 optional=True) 

462 

463 relative = StringChoice.T( 

464 choices=['', 'add', 'mult'], 

465 default='', 

466 optional=True) 

467 

468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

470 r'(\|(?P<stuff>.+))?$') 

471 

472 def __init__(self, *args, **kwargs): 

473 d = {} 

474 if len(args) == 1: 

475 d = self.parse(args[0]) 

476 elif len(args) in (2, 3): 

477 d['start'], d['stop'] = [float(x) for x in args[:2]] 

478 if len(args) == 3: 

479 d['step'] = float(args[2]) 

480 

481 for k, v in kwargs.items(): 

482 if k in d: 

483 raise ArgumentError('%s specified more than once' % k) 

484 

485 d[k] = v 

486 

487 SObject.__init__(self, **d) 

488 

489 def __str__(self): 

490 def sfloat(x): 

491 if x is not None: 

492 return '%g' % x 

493 else: 

494 return '' 

495 

496 if self.values: 

497 return ','.join('%g' % x for x in self.values) 

498 

499 if self.start is None and self.stop is None: 

500 s0 = '' 

501 else: 

502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

503 

504 s1 = '' 

505 if self.step is not None: 

506 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

507 elif self.n is not None: 

508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

509 

510 if self.spacing == 'lin' and self.relative == '': 

511 s2 = '' 

512 else: 

513 x = [] 

514 if self.spacing != 'lin': 

515 x.append(self.spacing) 

516 

517 if self.relative != '': 

518 x.append(self.relative) 

519 

520 s2 = ' | %s' % ','.join(x) 

521 

522 return s0 + s1 + s2 

523 

524 @classmethod 

525 def parse(cls, s): 

526 s = re.sub(r'\s+', '', s) 

527 m = cls.pattern.match(s) 

528 if not m: 

529 try: 

530 vals = [ufloat(x) for x in s.split(',')] 

531 except Exception: 

532 raise InvalidGridDef( 

533 '"%s" is not a valid range specification' % s) 

534 

535 return dict(values=num.array(vals, dtype=float)) 

536 

537 d = m.groupdict() 

538 try: 

539 start = ufloat_or_none(d['start']) 

540 stop = ufloat_or_none(d['stop']) 

541 step = ufloat_or_none(d['step']) 

542 n = int_or_none(d['n']) 

543 except Exception: 

544 raise InvalidGridDef( 

545 '"%s" is not a valid range specification' % s) 

546 

547 spacing = 'lin' 

548 relative = '' 

549 

550 if d['stuff'] is not None: 

551 t = d['stuff'].split(',') 

552 for x in t: 

553 if x in cls.spacing.choices: 

554 spacing = x 

555 elif x and x in cls.relative.choices: 

556 relative = x 

557 else: 

558 raise InvalidGridDef( 

559 '"%s" is not a valid range specification' % s) 

560 

561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

562 relative=relative) 

563 

564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

565 if self.values: 

566 return self.values 

567 

568 start = self.start 

569 stop = self.stop 

570 step = self.step 

571 n = self.n 

572 

573 swap = step is not None and step < 0. 

574 if start is None: 

575 start = [mi, ma][swap] 

576 if stop is None: 

577 stop = [ma, mi][swap] 

578 if step is None and inc is not None: 

579 step = [inc, -inc][ma < mi] 

580 

581 if start is None or stop is None: 

582 raise InvalidGridDef( 

583 'Cannot use range specification "%s" without start ' 

584 'and stop in this context' % self) 

585 

586 if step is None and n is None: 

587 step = stop - start 

588 

589 if n is None: 

590 if (step < 0) != (stop - start < 0): 

591 raise InvalidGridDef( 

592 'Range specification "%s" has inconsistent ordering ' 

593 '(step < 0 => stop > start)' % self) 

594 

595 n = int(round((stop - start) / step)) + 1 

596 stop2 = start + (n - 1) * step 

597 if abs(stop - stop2) > eps: 

598 n = int(math.floor((stop - start) / step)) + 1 

599 stop = start + (n - 1) * step 

600 else: 

601 stop = stop2 

602 

603 if start == stop: 

604 n = 1 

605 

606 if self.spacing == 'lin': 

607 vals = num.linspace(start, stop, n) 

608 

609 elif self.spacing in ('log', 'symlog'): 

610 if start > 0. and stop > 0.: 

611 vals = num.exp(num.linspace(num.log(start), 

612 num.log(stop), n)) 

613 elif start < 0. and stop < 0.: 

614 vals = -num.exp(num.linspace(num.log(-start), 

615 num.log(-stop), n)) 

616 else: 

617 raise InvalidGridDef( 

618 'Log ranges should not include or cross zero ' 

619 '(in range specification "%s").' % self) 

620 

621 if self.spacing == 'symlog': 

622 nvals = - vals 

623 vals = num.concatenate((nvals[::-1], vals)) 

624 

625 if self.relative in ('add', 'mult') and base is None: 

626 raise InvalidGridDef( 

627 'Cannot use relative range specification in this context.') 

628 

629 vals = self.make_relative(base, vals) 

630 

631 return list(map(float, vals)) 

632 

633 def make_relative(self, base, vals): 

634 if self.relative == 'add': 

635 vals += base 

636 

637 if self.relative == 'mult': 

638 vals *= base 

639 

640 return vals 

641 

642 

643class GridDefElement(Object): 

644 

645 param = meta.StringID.T() 

646 rs = Range.T() 

647 

648 def __init__(self, shorthand=None, **kwargs): 

649 if shorthand is not None: 

650 t = shorthand.split('=') 

651 if len(t) != 2: 

652 raise InvalidGridDef( 

653 'Invalid grid specification element: %s' % shorthand) 

654 

655 sp, sr = t[0].strip(), t[1].strip() 

656 

657 kwargs['param'] = sp 

658 kwargs['rs'] = Range(sr) 

659 

660 Object.__init__(self, **kwargs) 

661 

662 def shorthand(self): 

663 return self.param + ' = ' + str(self.rs) 

664 

665 

666class GridDef(Object): 

667 

668 elements = List.T(GridDefElement.T()) 

669 

670 def __init__(self, shorthand=None, **kwargs): 

671 if shorthand is not None: 

672 t = shorthand.splitlines() 

673 tt = [] 

674 for x in t: 

675 x = x.strip() 

676 if x: 

677 tt.extend(x.split(';')) 

678 

679 elements = [] 

680 for se in tt: 

681 elements.append(GridDef(se)) 

682 

683 kwargs['elements'] = elements 

684 

685 Object.__init__(self, **kwargs) 

686 

687 def shorthand(self): 

688 return '; '.join(str(x) for x in self.elements) 

689 

690 

691class Cloneable(object): 

692 

693 def __iter__(self): 

694 return iter(self.T.propnames) 

695 

696 def __getitem__(self, k): 

697 if k not in self.keys(): 

698 raise KeyError(k) 

699 

700 return getattr(self, k) 

701 

702 def __setitem__(self, k, v): 

703 if k not in self.keys(): 

704 raise KeyError(k) 

705 

706 return setattr(self, k, v) 

707 

708 def clone(self, **kwargs): 

709 ''' 

710 Make a copy of the object. 

711 

712 A new object of the same class is created and initialized with the 

713 parameters of the object on which this method is called on. If 

714 ``kwargs`` are given, these are used to override any of the 

715 initialization parameters. 

716 ''' 

717 

718 d = dict(self) 

719 for k in d: 

720 v = d[k] 

721 if isinstance(v, Cloneable): 

722 d[k] = v.clone() 

723 

724 d.update(kwargs) 

725 return self.__class__(**d) 

726 

727 @classmethod 

728 def keys(cls): 

729 ''' 

730 Get list of the source model's parameter names. 

731 ''' 

732 

733 return cls.T.propnames 

734 

735 

736class STF(Object, Cloneable): 

737 

738 ''' 

739 Base class for source time functions. 

740 ''' 

741 

742 def __init__(self, effective_duration=None, **kwargs): 

743 if effective_duration is not None: 

744 kwargs['duration'] = effective_duration / \ 

745 self.factor_duration_to_effective() 

746 

747 Object.__init__(self, **kwargs) 

748 

749 @classmethod 

750 def factor_duration_to_effective(cls): 

751 return 1.0 

752 

753 def centroid_time(self, tref): 

754 return tref 

755 

756 @property 

757 def effective_duration(self): 

758 return self.duration * self.factor_duration_to_effective() 

759 

760 def discretize_t(self, deltat, tref): 

761 tl = math.floor(tref / deltat) * deltat 

762 th = math.ceil(tref / deltat) * deltat 

763 if tl == th: 

764 return num.array([tl], dtype=float), num.ones(1) 

765 else: 

766 return ( 

767 num.array([tl, th], dtype=float), 

768 num.array([th - tref, tref - tl], dtype=float) / deltat) 

769 

770 def base_key(self): 

771 return (type(self).__name__,) 

772 

773 

774g_unit_pulse = STF() 

775 

776 

777def sshift(times, amplitudes, tshift, deltat): 

778 

779 t0 = math.floor(tshift / deltat) * deltat 

780 t1 = math.ceil(tshift / deltat) * deltat 

781 if t0 == t1: 

782 return times, amplitudes 

783 

784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

785 

786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

788 

789 times2 = num.arange(times.size + 1, dtype=float) * \ 

790 deltat + times[0] + t0 

791 

792 return times2, amplitudes2 

793 

794 

795class BoxcarSTF(STF): 

796 

797 ''' 

798 Boxcar type source time function. 

799 

800 .. figure :: /static/stf-BoxcarSTF.svg 

801 :width: 40% 

802 :align: center 

803 :alt: boxcar source time function 

804 ''' 

805 

806 duration = Float.T( 

807 default=0.0, 

808 help='duration of the boxcar') 

809 

810 anchor = Float.T( 

811 default=0.0, 

812 help='anchor point with respect to source.time: (' 

813 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

816 

817 @classmethod 

818 def factor_duration_to_effective(cls): 

819 return 1.0 

820 

821 def centroid_time(self, tref): 

822 return tref - 0.5 * self.duration * self.anchor 

823 

824 def discretize_t(self, deltat, tref): 

825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

827 tmin = round(tmin_stf / deltat) * deltat 

828 tmax = round(tmax_stf / deltat) * deltat 

829 nt = int(round((tmax - tmin) / deltat)) + 1 

830 times = num.linspace(tmin, tmax, nt) 

831 amplitudes = num.ones_like(times) 

832 if times.size > 1: 

833 t_edges = num.linspace( 

834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

835 t = tmin_stf + self.duration * num.array( 

836 [0.0, 0.0, 1.0, 1.0], dtype=float) 

837 f = num.array([0., 1., 1., 0.], dtype=float) 

838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

839 amplitudes /= num.sum(amplitudes) 

840 

841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

842 

843 return sshift(times, amplitudes, -tshift, deltat) 

844 

845 def base_key(self): 

846 return (type(self).__name__, self.duration, self.anchor) 

847 

848 

849class TriangularSTF(STF): 

850 

851 ''' 

852 Triangular type source time function. 

853 

854 .. figure :: /static/stf-TriangularSTF.svg 

855 :width: 40% 

856 :align: center 

857 :alt: triangular source time function 

858 ''' 

859 

860 duration = Float.T( 

861 default=0.0, 

862 help='baseline of the triangle') 

863 

864 peak_ratio = Float.T( 

865 default=0.5, 

866 help='fraction of time compared to duration, ' 

867 'when the maximum amplitude is reached') 

868 

869 anchor = Float.T( 

870 default=0.0, 

871 help='anchor point with respect to source.time: (' 

872 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

875 

876 @classmethod 

877 def factor_duration_to_effective(cls, peak_ratio=None): 

878 if peak_ratio is None: 

879 peak_ratio = cls.peak_ratio.default() 

880 

881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

882 

883 def __init__(self, effective_duration=None, **kwargs): 

884 if effective_duration is not None: 

885 kwargs['duration'] = effective_duration / \ 

886 self.factor_duration_to_effective( 

887 kwargs.get('peak_ratio', None)) 

888 

889 STF.__init__(self, **kwargs) 

890 

891 @property 

892 def centroid_ratio(self): 

893 ra = self.peak_ratio 

894 rb = 1.0 - ra 

895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

896 

897 def centroid_time(self, tref): 

898 ca = self.centroid_ratio 

899 cb = 1.0 - ca 

900 if self.anchor <= 0.: 

901 return tref - ca * self.duration * self.anchor 

902 else: 

903 return tref - cb * self.duration * self.anchor 

904 

905 @property 

906 def effective_duration(self): 

907 return self.duration * self.factor_duration_to_effective( 

908 self.peak_ratio) 

909 

910 def tminmax_stf(self, tref): 

911 ca = self.centroid_ratio 

912 cb = 1.0 - ca 

913 if self.anchor <= 0.: 

914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

915 tmax_stf = tmin_stf + self.duration 

916 else: 

917 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

918 tmin_stf = tmax_stf - self.duration 

919 

920 return tmin_stf, tmax_stf 

921 

922 def discretize_t(self, deltat, tref): 

923 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

924 

925 tmin = round(tmin_stf / deltat) * deltat 

926 tmax = round(tmax_stf / deltat) * deltat 

927 nt = int(round((tmax - tmin) / deltat)) + 1 

928 if nt > 1: 

929 t_edges = num.linspace( 

930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

931 t = tmin_stf + self.duration * num.array( 

932 [0.0, self.peak_ratio, 1.0], dtype=float) 

933 f = num.array([0., 1., 0.], dtype=float) 

934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

935 amplitudes /= num.sum(amplitudes) 

936 else: 

937 amplitudes = num.ones(1) 

938 

939 times = num.linspace(tmin, tmax, nt) 

940 return times, amplitudes 

941 

942 def base_key(self): 

943 return ( 

944 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

945 

946 

947class HalfSinusoidSTF(STF): 

948 

949 ''' 

950 Half sinusoid type source time function. 

951 

952 .. figure :: /static/stf-HalfSinusoidSTF.svg 

953 :width: 40% 

954 :align: center 

955 :alt: half-sinusouid source time function 

956 ''' 

957 

958 duration = Float.T( 

959 default=0.0, 

960 help='duration of the half-sinusoid (baseline)') 

961 

962 anchor = Float.T( 

963 default=0.0, 

964 help='anchor point with respect to source.time: (' 

965 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

968 

969 exponent = Int.T( 

970 default=1, 

971 help='set to 2 to use square of the half-period sinusoidal function.') 

972 

973 def __init__(self, effective_duration=None, **kwargs): 

974 if effective_duration is not None: 

975 kwargs['duration'] = effective_duration / \ 

976 self.factor_duration_to_effective( 

977 kwargs.get('exponent', 1)) 

978 

979 STF.__init__(self, **kwargs) 

980 

981 @classmethod 

982 def factor_duration_to_effective(cls, exponent): 

983 if exponent == 1: 

984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

985 elif exponent == 2: 

986 return math.sqrt(math.pi**2 - 6) / math.pi 

987 else: 

988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

989 

990 @property 

991 def effective_duration(self): 

992 return self.duration * self.factor_duration_to_effective(self.exponent) 

993 

994 def centroid_time(self, tref): 

995 return tref - 0.5 * self.duration * self.anchor 

996 

997 def discretize_t(self, deltat, tref): 

998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1000 tmin = round(tmin_stf / deltat) * deltat 

1001 tmax = round(tmax_stf / deltat) * deltat 

1002 nt = int(round((tmax - tmin) / deltat)) + 1 

1003 if nt > 1: 

1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1006 

1007 if self.exponent == 1: 

1008 fint = -num.cos( 

1009 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1010 

1011 elif self.exponent == 2: 

1012 fint = (t_edges - tmin_stf) / self.duration \ 

1013 - 1.0 / (2.0 * math.pi) * num.sin( 

1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1015 else: 

1016 raise ValueError( 

1017 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1018 

1019 amplitudes = fint[1:] - fint[:-1] 

1020 amplitudes /= num.sum(amplitudes) 

1021 else: 

1022 amplitudes = num.ones(1) 

1023 

1024 times = num.linspace(tmin, tmax, nt) 

1025 return times, amplitudes 

1026 

1027 def base_key(self): 

1028 return (type(self).__name__, self.duration, self.anchor) 

1029 

1030 

1031class SmoothRampSTF(STF): 

1032 ''' 

1033 Smooth-ramp type source time function for near-field displacement. 

1034 Based on moment function of double-couple point source proposed by Bruestle 

1035 and Mueller (PEPI, 1983). 

1036 

1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1038 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1039 312-324. 

1040 

1041 .. figure :: /static/stf-SmoothRampSTF.svg 

1042 :width: 40% 

1043 :alt: smooth ramp source time function 

1044 ''' 

1045 duration = Float.T( 

1046 default=0.0, 

1047 help='duration of the ramp (baseline)') 

1048 

1049 rise_ratio = Float.T( 

1050 default=0.5, 

1051 help='fraction of time compared to duration, ' 

1052 'when the maximum amplitude is reached') 

1053 

1054 anchor = Float.T( 

1055 default=0.0, 

1056 help='anchor point with respect to source.time: (' 

1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1060 

1061 def discretize_t(self, deltat, tref): 

1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1064 tmin = round(tmin_stf / deltat) * deltat 

1065 tmax = round(tmax_stf / deltat) * deltat 

1066 D = round((tmax - tmin) / deltat) * deltat 

1067 nt = int(round(D / deltat)) + 1 

1068 times = num.linspace(tmin, tmax, nt) 

1069 if nt > 1: 

1070 rise_time = self.rise_ratio * self.duration 

1071 amplitudes = num.ones_like(times) 

1072 tp = tmin + rise_time 

1073 ii = num.where(times <= tp) 

1074 t_inc = times[ii] 

1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1078 

1079 amplitudes /= num.sum(amplitudes) 

1080 else: 

1081 amplitudes = num.ones(1) 

1082 

1083 return times, amplitudes 

1084 

1085 def base_key(self): 

1086 return (type(self).__name__, 

1087 self.duration, self.rise_ratio, self.anchor) 

1088 

1089 

1090class ResonatorSTF(STF): 

1091 ''' 

1092 Simple resonator like source time function. 

1093 

1094 .. math :: 

1095 

1096 f(t) = 0 for t < 0 

1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1098 

1099 

1100 .. figure :: /static/stf-SmoothRampSTF.svg 

1101 :width: 40% 

1102 :alt: smooth ramp source time function 

1103 

1104 ''' 

1105 

1106 duration = Float.T( 

1107 default=0.0, 

1108 help='decay time') 

1109 

1110 frequency = Float.T( 

1111 default=1.0, 

1112 help='resonance frequency') 

1113 

1114 def discretize_t(self, deltat, tref): 

1115 tmin_stf = tref 

1116 tmax_stf = tref + self.duration * 3 

1117 tmin = math.floor(tmin_stf / deltat) * deltat 

1118 tmax = math.ceil(tmax_stf / deltat) * deltat 

1119 times = util.arange2(tmin, tmax, deltat) 

1120 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1122 

1123 return times, amplitudes 

1124 

1125 def base_key(self): 

1126 return (type(self).__name__, 

1127 self.duration, self.frequency) 

1128 

1129 

1130class STFMode(StringChoice): 

1131 choices = ['pre', 'post'] 

1132 

1133 

1134class Source(Location, Cloneable): 

1135 ''' 

1136 Base class for all source models. 

1137 ''' 

1138 

1139 name = String.T(optional=True, default='') 

1140 

1141 time = Timestamp.T( 

1142 default=Timestamp.D('1970-01-01 00:00:00'), 

1143 help='source origin time.') 

1144 

1145 stf = STF.T( 

1146 optional=True, 

1147 help='source time function.') 

1148 

1149 stf_mode = STFMode.T( 

1150 default='post', 

1151 help='whether to apply source time function in pre or ' 

1152 'post-processing.') 

1153 

1154 def __init__(self, **kwargs): 

1155 Location.__init__(self, **kwargs) 

1156 

1157 def update(self, **kwargs): 

1158 ''' 

1159 Change some of the source models parameters. 

1160 

1161 Example:: 

1162 

1163 >>> from pyrocko import gf 

1164 >>> s = gf.DCSource() 

1165 >>> s.update(strike=66., dip=33.) 

1166 >>> print(s) 

1167 --- !pf.DCSource 

1168 depth: 0.0 

1169 time: 1970-01-01 00:00:00 

1170 magnitude: 6.0 

1171 strike: 66.0 

1172 dip: 33.0 

1173 rake: 0.0 

1174 

1175 ''' 

1176 

1177 for (k, v) in kwargs.items(): 

1178 self[k] = v 

1179 

1180 def grid(self, **variables): 

1181 ''' 

1182 Create grid of source model variations. 

1183 

1184 :returns: :py:class:`SourceGrid` instance. 

1185 

1186 Example:: 

1187 

1188 >>> from pyrocko import gf 

1189 >>> base = DCSource() 

1190 >>> R = gf.Range 

1191 >>> for s in base.grid(R(' 

1192 

1193 ''' 

1194 return SourceGrid(base=self, variables=variables) 

1195 

1196 def base_key(self): 

1197 ''' 

1198 Get key to decide about source discretization / GF stack sharing. 

1199 

1200 When two source models differ only in amplitude and origin time, the 

1201 discretization and the GF stacking can be done only once for a unit 

1202 amplitude and a zero origin time and the amplitude and origin times of 

1203 the seismograms can be applied during post-processing of the synthetic 

1204 seismogram. 

1205 

1206 For any derived parameterized source model, this method is called to 

1207 decide if discretization and stacking of the source should be shared. 

1208 When two source models return an equal vector of values discretization 

1209 is shared. 

1210 ''' 

1211 return (self.depth, self.lat, self.north_shift, 

1212 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1213 self.effective_stf_pre().base_key() 

1214 

1215 def get_factor(self): 

1216 ''' 

1217 Get the scaling factor to be applied during post-processing. 

1218 

1219 Discretization of the base seismogram is usually done for a unit 

1220 amplitude, because a common factor can be efficiently multiplied to 

1221 final seismograms. This eliminates to do repeat the stacking when 

1222 creating seismograms for a series of source models only differing in 

1223 amplitude. 

1224 

1225 This method should return the scaling factor to apply in the 

1226 post-processing (often this is simply the scalar moment of the source). 

1227 ''' 

1228 

1229 return 1.0 

1230 

1231 def effective_stf_pre(self): 

1232 ''' 

1233 Return the STF applied before stacking of the Green's functions. 

1234 

1235 This STF is used during discretization of the parameterized source 

1236 models, i.e. to produce a temporal distribution of point sources. 

1237 

1238 Handling of the STF before stacking of the GFs is less efficient but 

1239 allows to use different source time functions for different parts of 

1240 the source. 

1241 ''' 

1242 

1243 if self.stf is not None and self.stf_mode == 'pre': 

1244 return self.stf 

1245 else: 

1246 return g_unit_pulse 

1247 

1248 def effective_stf_post(self): 

1249 ''' 

1250 Return the STF applied after stacking of the Green's fuctions. 

1251 

1252 This STF is used in the post-processing of the synthetic seismograms. 

1253 

1254 Handling of the STF after stacking of the GFs is usually more efficient 

1255 but is only possible when a common STF is used for all subsources. 

1256 ''' 

1257 

1258 if self.stf is not None and self.stf_mode == 'post': 

1259 return self.stf 

1260 else: 

1261 return g_unit_pulse 

1262 

1263 def _dparams_base(self): 

1264 return dict(times=arr(self.time), 

1265 lat=self.lat, lon=self.lon, 

1266 north_shifts=arr(self.north_shift), 

1267 east_shifts=arr(self.east_shift), 

1268 depths=arr(self.depth)) 

1269 

1270 def _hash(self): 

1271 sha = sha1() 

1272 for k in self.base_key(): 

1273 sha.update(str(k).encode()) 

1274 return sha.hexdigest() 

1275 

1276 def _dparams_base_repeated(self, times): 

1277 if times is None: 

1278 return self._dparams_base() 

1279 

1280 nt = times.size 

1281 north_shifts = num.repeat(self.north_shift, nt) 

1282 east_shifts = num.repeat(self.east_shift, nt) 

1283 depths = num.repeat(self.depth, nt) 

1284 return dict(times=times, 

1285 lat=self.lat, lon=self.lon, 

1286 north_shifts=north_shifts, 

1287 east_shifts=east_shifts, 

1288 depths=depths) 

1289 

1290 def pyrocko_event(self, store=None, target=None, **kwargs): 

1291 duration = None 

1292 if self.stf: 

1293 duration = self.stf.effective_duration 

1294 

1295 return model.Event( 

1296 lat=self.lat, 

1297 lon=self.lon, 

1298 north_shift=self.north_shift, 

1299 east_shift=self.east_shift, 

1300 time=self.time, 

1301 name=self.name, 

1302 depth=self.depth, 

1303 duration=duration, 

1304 **kwargs) 

1305 

1306 def geometry(self, **kwargs): 

1307 raise NotImplementedError 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 

1527 bulk_moduli = store.config.get_bulk_moduli( 

1528 self.lat, self.lon, 

1529 points=points, 

1530 interpolation=interpolation)[0] 

1531 except meta.OutOfBounds: 

1532 raise DerivedMagnitudeError( 

1533 'Could not get shear modulus at source position.') 

1534 

1535 return float(2. * shear_moduli + bulk_moduli) 

1536 

1537 def get_factor(self): 

1538 return 1.0 

1539 

1540 def discretize_basesource(self, store, target=None): 

1541 times, amplitudes = self.effective_stf_pre().discretize_t( 

1542 store.config.deltat, self.time) 

1543 

1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1545 

1546 if self.volume_change is not None: 

1547 if self.volume_change < 0.: 

1548 amplitudes *= -1 

1549 

1550 return meta.DiscretizedExplosionSource( 

1551 m0s=amplitudes, 

1552 **self._dparams_base_repeated(times)) 

1553 

1554 def pyrocko_moment_tensor(self, store=None, target=None): 

1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1557 

1558 

1559class RectangularExplosionSource(ExplosionSource): 

1560 ''' 

1561 Rectangular or line explosion source. 

1562 ''' 

1563 

1564 discretized_source_class = meta.DiscretizedExplosionSource 

1565 

1566 strike = Float.T( 

1567 default=0.0, 

1568 help='strike direction in [deg], measured clockwise from north') 

1569 

1570 dip = Float.T( 

1571 default=90.0, 

1572 help='dip angle in [deg], measured downward from horizontal') 

1573 

1574 length = Float.T( 

1575 default=0., 

1576 help='length of rectangular source area [m]') 

1577 

1578 width = Float.T( 

1579 default=0., 

1580 help='width of rectangular source area [m]') 

1581 

1582 anchor = StringChoice.T( 

1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1584 'bottom_left', 'bottom_right'], 

1585 default='center', 

1586 optional=True, 

1587 help='Anchor point for positioning the plane, can be: top, center or' 

1588 'bottom and also top_left, top_right,bottom_left,' 

1589 'bottom_right, center_left and center right') 

1590 

1591 nucleation_x = Float.T( 

1592 optional=True, 

1593 help='horizontal position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = left edge, +1 = right edge)') 

1595 

1596 nucleation_y = Float.T( 

1597 optional=True, 

1598 help='down-dip position of rupture nucleation in normalized fault ' 

1599 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1600 

1601 velocity = Float.T( 

1602 default=3500., 

1603 help='speed of explosion front [m/s]') 

1604 

1605 aggressive_oversampling = Bool.T( 

1606 default=False, 

1607 help='Aggressive oversampling for basesource discretization. ' 

1608 'When using \'multilinear\' interpolation oversampling has' 

1609 ' practically no effect.') 

1610 

1611 def base_key(self): 

1612 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1613 self.width, self.nucleation_x, 

1614 self.nucleation_y, self.velocity, 

1615 self.anchor) 

1616 

1617 def discretize_basesource(self, store, target=None): 

1618 

1619 if self.nucleation_x is not None: 

1620 nucx = self.nucleation_x * 0.5 * self.length 

1621 else: 

1622 nucx = None 

1623 

1624 if self.nucleation_y is not None: 

1625 nucy = self.nucleation_y * 0.5 * self.width 

1626 else: 

1627 nucy = None 

1628 

1629 stf = self.effective_stf_pre() 

1630 

1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1632 store.config.deltas, store.config.deltat, 

1633 self.time, self.north_shift, self.east_shift, self.depth, 

1634 self.strike, self.dip, self.length, self.width, self.anchor, 

1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1636 

1637 amplitudes /= num.sum(amplitudes) 

1638 amplitudes *= self.get_moment(store, target) 

1639 

1640 return meta.DiscretizedExplosionSource( 

1641 lat=self.lat, 

1642 lon=self.lon, 

1643 times=times, 

1644 north_shifts=points[:, 0], 

1645 east_shifts=points[:, 1], 

1646 depths=points[:, 2], 

1647 m0s=amplitudes) 

1648 

1649 def outline(self, cs='xyz'): 

1650 points = outline_rect_source(self.strike, self.dip, self.length, 

1651 self.width, self.anchor) 

1652 

1653 points[:, 0] += self.north_shift 

1654 points[:, 1] += self.east_shift 

1655 points[:, 2] += self.depth 

1656 if cs == 'xyz': 

1657 return points 

1658 elif cs == 'xy': 

1659 return points[:, :2] 

1660 elif cs in ('latlon', 'lonlat'): 

1661 latlon = ne_to_latlon( 

1662 self.lat, self.lon, points[:, 0], points[:, 1]) 

1663 

1664 latlon = num.array(latlon).T 

1665 if cs == 'latlon': 

1666 return latlon 

1667 else: 

1668 return latlon[:, ::-1] 

1669 

1670 def get_nucleation_abs_coord(self, cs='xy'): 

1671 

1672 if self.nucleation_x is None: 

1673 return None, None 

1674 

1675 coords = from_plane_coords(self.strike, self.dip, self.length, 

1676 self.width, self.depth, self.nucleation_x, 

1677 self.nucleation_y, lat=self.lat, 

1678 lon=self.lon, north_shift=self.north_shift, 

1679 east_shift=self.east_shift, cs=cs) 

1680 return coords 

1681 

1682 

1683class DCSource(SourceWithMagnitude): 

1684 ''' 

1685 A double-couple point source. 

1686 ''' 

1687 

1688 strike = Float.T( 

1689 default=0.0, 

1690 help='strike direction in [deg], measured clockwise from north') 

1691 

1692 dip = Float.T( 

1693 default=90.0, 

1694 help='dip angle in [deg], measured downward from horizontal') 

1695 

1696 rake = Float.T( 

1697 default=0.0, 

1698 help='rake angle in [deg], ' 

1699 'measured counter-clockwise from right-horizontal ' 

1700 'in on-plane view') 

1701 

1702 discretized_source_class = meta.DiscretizedMTSource 

1703 

1704 def base_key(self): 

1705 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1706 

1707 def get_factor(self): 

1708 return float(pmt.magnitude_to_moment(self.magnitude)) 

1709 

1710 def discretize_basesource(self, store, target=None): 

1711 mot = pmt.MomentTensor( 

1712 strike=self.strike, dip=self.dip, rake=self.rake) 

1713 

1714 times, amplitudes = self.effective_stf_pre().discretize_t( 

1715 store.config.deltat, self.time) 

1716 return meta.DiscretizedMTSource( 

1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1718 **self._dparams_base_repeated(times)) 

1719 

1720 def pyrocko_moment_tensor(self, store=None, target=None): 

1721 return pmt.MomentTensor( 

1722 strike=self.strike, 

1723 dip=self.dip, 

1724 rake=self.rake, 

1725 scalar_moment=self.moment) 

1726 

1727 def pyrocko_event(self, store=None, target=None, **kwargs): 

1728 return SourceWithMagnitude.pyrocko_event( 

1729 self, store, target, 

1730 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1731 **kwargs) 

1732 

1733 @classmethod 

1734 def from_pyrocko_event(cls, ev, **kwargs): 

1735 d = {} 

1736 mt = ev.moment_tensor 

1737 if mt: 

1738 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1739 d.update( 

1740 strike=float(strike), 

1741 dip=float(dip), 

1742 rake=float(rake), 

1743 magnitude=float(mt.moment_magnitude())) 

1744 

1745 d.update(kwargs) 

1746 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1747 

1748 

1749class CLVDSource(SourceWithMagnitude): 

1750 ''' 

1751 A pure CLVD point source. 

1752 ''' 

1753 

1754 discretized_source_class = meta.DiscretizedMTSource 

1755 

1756 azimuth = Float.T( 

1757 default=0.0, 

1758 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1759 

1760 dip = Float.T( 

1761 default=90., 

1762 help='dip direction of largest dipole, downward from horizontal [deg]') 

1763 

1764 def base_key(self): 

1765 return Source.base_key(self) + (self.azimuth, self.dip) 

1766 

1767 def get_factor(self): 

1768 return float(pmt.magnitude_to_moment(self.magnitude)) 

1769 

1770 @property 

1771 def m6(self): 

1772 a = math.sqrt(4. / 3.) * self.get_factor() 

1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1774 rotmat1 = pmt.euler_to_matrix( 

1775 d2r * (self.dip - 90.), 

1776 d2r * (self.azimuth - 90.), 

1777 0.) 

1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1779 return pmt.to6(m) 

1780 

1781 @property 

1782 def m6_astuple(self): 

1783 return tuple(self.m6.tolist()) 

1784 

1785 def discretize_basesource(self, store, target=None): 

1786 factor = self.get_factor() 

1787 times, amplitudes = self.effective_stf_pre().discretize_t( 

1788 store.config.deltat, self.time) 

1789 return meta.DiscretizedMTSource( 

1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1791 **self._dparams_base_repeated(times)) 

1792 

1793 def pyrocko_moment_tensor(self, store=None, target=None): 

1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1795 

1796 def pyrocko_event(self, store=None, target=None, **kwargs): 

1797 mt = self.pyrocko_moment_tensor(store, target) 

1798 return Source.pyrocko_event( 

1799 self, store, target, 

1800 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1801 magnitude=float(mt.moment_magnitude()), 

1802 **kwargs) 

1803 

1804 

1805class VLVDSource(SourceWithDerivedMagnitude): 

1806 ''' 

1807 Volumetric linear vector dipole source. 

1808 

1809 This source is a parameterization for a restricted moment tensor point 

1810 source, useful to represent dyke or sill like inflation or deflation 

1811 sources. The restriction is such that the moment tensor is rotational 

1812 symmetric. It can be represented by a superposition of a linear vector 

1813 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1814 restricted moment tensor has 4 degrees of freedom: 2 independent 

1815 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1816 

1817 In this parameterization, the isotropic component is controlled by 

1818 ``volume_change``. To define the moment tensor, it must be converted to the 

1819 scalar moment of the the MT's isotropic component. For the conversion, the 

1820 shear modulus at the source's position must be known. This value is 

1821 extracted from the earth model defined in the GF store in use. 

1822 

1823 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1825 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1826 ''' 

1827 

1828 discretized_source_class = meta.DiscretizedMTSource 

1829 

1830 azimuth = Float.T( 

1831 default=0.0, 

1832 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1833 

1834 dip = Float.T( 

1835 default=90., 

1836 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1837 

1838 volume_change = Float.T( 

1839 default=0., 

1840 help='volume change of the inflation/deflation [m^3].') 

1841 

1842 clvd_moment = Float.T( 

1843 default=0., 

1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1845 'controls the sign of the CLVD (the sign of its largest ' 

1846 'eigenvalue).') 

1847 

1848 def get_moment_to_volume_change_ratio(self, store, target): 

1849 if store is None or target is None: 

1850 raise DerivedMagnitudeError( 

1851 'Need earth model to convert between volume change and ' 

1852 'magnitude.') 

1853 

1854 points = num.array( 

1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1856 

1857 try: 

1858 shear_moduli = store.config.get_shear_moduli( 

1859 self.lat, self.lon, 

1860 points=points, 

1861 interpolation=target.interpolation)[0] 

1862 

1863 bulk_moduli = store.config.get_bulk_moduli( 

1864 self.lat, self.lon, 

1865 points=points, 

1866 interpolation=target.interpolation)[0] 

1867 except meta.OutOfBounds: 

1868 raise DerivedMagnitudeError( 

1869 'Could not get shear modulus at source position.') 

1870 

1871 return float(2. * shear_moduli + bulk_moduli) 

1872 

1873 def base_key(self): 

1874 return Source.base_key(self) + \ 

1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1876 

1877 def get_magnitude(self, store=None, target=None): 

1878 mt = self.pyrocko_moment_tensor(store, target) 

1879 return float(pmt.moment_to_magnitude(mt.moment)) 

1880 

1881 def get_m6(self, store, target): 

1882 a = math.sqrt(4. / 3.) * self.clvd_moment 

1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1884 

1885 rotmat1 = pmt.euler_to_matrix( 

1886 d2r * (self.dip - 90.), 

1887 d2r * (self.azimuth - 90.), 

1888 0.) 

1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1890 

1891 m_iso = self.volume_change * \ 

1892 self.get_moment_to_volume_change_ratio(store, target) 

1893 

1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1895 0., 0.,) * math.sqrt(2. / 3) 

1896 

1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1898 return m 

1899 

1900 def get_moment(self, store=None, target=None): 

1901 return float(pmt.magnitude_to_moment( 

1902 self.get_magnitude(store, target))) 

1903 

1904 def get_m6_astuple(self, store, target): 

1905 m6 = self.get_m6(store, target) 

1906 return tuple(m6.tolist()) 

1907 

1908 def discretize_basesource(self, store, target=None): 

1909 times, amplitudes = self.effective_stf_pre().discretize_t( 

1910 store.config.deltat, self.time) 

1911 

1912 m6 = self.get_m6(store, target) 

1913 m6 *= amplitudes / self.get_factor() 

1914 

1915 return meta.DiscretizedMTSource( 

1916 m6s=m6[num.newaxis, :], 

1917 **self._dparams_base_repeated(times)) 

1918 

1919 def pyrocko_moment_tensor(self, store=None, target=None): 

1920 m6_astuple = self.get_m6_astuple(store, target) 

1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1922 

1923 

1924class MTSource(Source): 

1925 ''' 

1926 A moment tensor point source. 

1927 ''' 

1928 

1929 discretized_source_class = meta.DiscretizedMTSource 

1930 

1931 mnn = Float.T( 

1932 default=1., 

1933 help='north-north component of moment tensor in [Nm]') 

1934 

1935 mee = Float.T( 

1936 default=1., 

1937 help='east-east component of moment tensor in [Nm]') 

1938 

1939 mdd = Float.T( 

1940 default=1., 

1941 help='down-down component of moment tensor in [Nm]') 

1942 

1943 mne = Float.T( 

1944 default=0., 

1945 help='north-east component of moment tensor in [Nm]') 

1946 

1947 mnd = Float.T( 

1948 default=0., 

1949 help='north-down component of moment tensor in [Nm]') 

1950 

1951 med = Float.T( 

1952 default=0., 

1953 help='east-down component of moment tensor in [Nm]') 

1954 

1955 def __init__(self, **kwargs): 

1956 if 'm6' in kwargs: 

1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1958 kwargs.pop('m6')): 

1959 kwargs[k] = float(v) 

1960 

1961 Source.__init__(self, **kwargs) 

1962 

1963 @property 

1964 def m6(self): 

1965 return num.array(self.m6_astuple) 

1966 

1967 @property 

1968 def m6_astuple(self): 

1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1970 

1971 @m6.setter 

1972 def m6(self, value): 

1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1974 

1975 def base_key(self): 

1976 return Source.base_key(self) + self.m6_astuple 

1977 

1978 def discretize_basesource(self, store, target=None): 

1979 times, amplitudes = self.effective_stf_pre().discretize_t( 

1980 store.config.deltat, self.time) 

1981 return meta.DiscretizedMTSource( 

1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1983 **self._dparams_base_repeated(times)) 

1984 

1985 def get_magnitude(self, store=None, target=None): 

1986 m6 = self.m6 

1987 return pmt.moment_to_magnitude( 

1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1989 math.sqrt(2.)) 

1990 

1991 def pyrocko_moment_tensor(self, store=None, target=None): 

1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1993 

1994 def pyrocko_event(self, store=None, target=None, **kwargs): 

1995 mt = self.pyrocko_moment_tensor(store, target) 

1996 return Source.pyrocko_event( 

1997 self, store, target, 

1998 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1999 magnitude=float(mt.moment_magnitude()), 

2000 **kwargs) 

2001 

2002 @classmethod 

2003 def from_pyrocko_event(cls, ev, **kwargs): 

2004 d = {} 

2005 mt = ev.moment_tensor 

2006 if mt: 

2007 d.update(m6=tuple(map(float, mt.m6()))) 

2008 else: 

2009 if ev.magnitude is not None: 

2010 mom = pmt.magnitude_to_moment(ev.magnitude) 

2011 v = math.sqrt(2. / 3.) * mom 

2012 d.update(m6=(v, v, v, 0., 0., 0.)) 

2013 

2014 d.update(kwargs) 

2015 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2016 

2017 

2018map_anchor = { 

2019 'center': (0.0, 0.0), 

2020 'center_left': (-1.0, 0.0), 

2021 'center_right': (1.0, 0.0), 

2022 'top': (0.0, -1.0), 

2023 'top_left': (-1.0, -1.0), 

2024 'top_right': (1.0, -1.0), 

2025 'bottom': (0.0, 1.0), 

2026 'bottom_left': (-1.0, 1.0), 

2027 'bottom_right': (1.0, 1.0)} 

2028 

2029 

2030class RectangularSource(SourceWithDerivedMagnitude): 

2031 ''' 

2032 Classical Haskell source model modified for bilateral rupture. 

2033 ''' 

2034 

2035 discretized_source_class = meta.DiscretizedMTSource 

2036 

2037 magnitude = Float.T( 

2038 optional=True, 

2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2040 

2041 strike = Float.T( 

2042 default=0.0, 

2043 help='strike direction in [deg], measured clockwise from north') 

2044 

2045 dip = Float.T( 

2046 default=90.0, 

2047 help='dip angle in [deg], measured downward from horizontal') 

2048 

2049 rake = Float.T( 

2050 default=0.0, 

2051 help='rake angle in [deg], ' 

2052 'measured counter-clockwise from right-horizontal ' 

2053 'in on-plane view') 

2054 

2055 length = Float.T( 

2056 default=0., 

2057 help='length of rectangular source area [m]') 

2058 

2059 width = Float.T( 

2060 default=0., 

2061 help='width of rectangular source area [m]') 

2062 

2063 anchor = StringChoice.T( 

2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2065 'bottom_left', 'bottom_right'], 

2066 default='center', 

2067 optional=True, 

2068 help='Anchor point for positioning the plane, can be: ``top, center ' 

2069 'bottom, top_left, top_right,bottom_left,' 

2070 'bottom_right, center_left, center right``.') 

2071 

2072 nucleation_x = Float.T( 

2073 optional=True, 

2074 help='horizontal position of rupture nucleation in normalized fault ' 

2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2076 

2077 nucleation_y = Float.T( 

2078 optional=True, 

2079 help='down-dip position of rupture nucleation in normalized fault ' 

2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2081 

2082 velocity = Float.T( 

2083 default=3500., 

2084 help='speed of rupture front [m/s]') 

2085 

2086 slip = Float.T( 

2087 optional=True, 

2088 help='Slip on the rectangular source area [m]') 

2089 

2090 opening_fraction = Float.T( 

2091 default=0., 

2092 help='Determines fraction of slip related to opening. ' 

2093 '(``-1``: pure tensile closing, ' 

2094 '``0``: pure shear, ' 

2095 '``1``: pure tensile opening)') 

2096 

2097 decimation_factor = Int.T( 

2098 optional=True, 

2099 default=1, 

2100 help='Sub-source decimation factor, a larger decimation will' 

2101 ' make the result inaccurate but shorten the necessary' 

2102 ' computation time (use for testing puposes only).') 

2103 

2104 aggressive_oversampling = Bool.T( 

2105 default=False, 

2106 help='Aggressive oversampling for basesource discretization. ' 

2107 'When using \'multilinear\' interpolation oversampling has' 

2108 ' practically no effect.') 

2109 

2110 def __init__(self, **kwargs): 

2111 if 'moment' in kwargs: 

2112 mom = kwargs.pop('moment') 

2113 if 'magnitude' not in kwargs: 

2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2115 

2116 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2117 

2118 def base_key(self): 

2119 return SourceWithDerivedMagnitude.base_key(self) + ( 

2120 self.magnitude, 

2121 self.slip, 

2122 self.strike, 

2123 self.dip, 

2124 self.rake, 

2125 self.length, 

2126 self.width, 

2127 self.nucleation_x, 

2128 self.nucleation_y, 

2129 self.velocity, 

2130 self.decimation_factor, 

2131 self.anchor) 

2132 

2133 def check_conflicts(self): 

2134 if self.magnitude is not None and self.slip is not None: 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude and slip are both defined.') 

2137 

2138 def get_magnitude(self, store=None, target=None): 

2139 self.check_conflicts() 

2140 if self.magnitude is not None: 

2141 return self.magnitude 

2142 

2143 elif self.slip is not None: 

2144 if None in (store, target): 

2145 raise DerivedMagnitudeError( 

2146 'Magnitude for a rectangular source with slip defined ' 

2147 'can only be derived when earth model and target ' 

2148 'interpolation method are available.') 

2149 

2150 amplitudes = self._discretize(store, target)[2] 

2151 if amplitudes.ndim == 2: 

2152 # CLVD component has no net moment, leave out 

2153 return float(pmt.moment_to_magnitude( 

2154 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2155 else: 

2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2157 

2158 else: 

2159 return float(pmt.moment_to_magnitude(1.0)) 

2160 

2161 def get_factor(self): 

2162 return 1.0 

2163 

2164 def get_slip_tensile(self): 

2165 return self.slip * self.opening_fraction 

2166 

2167 def get_slip_shear(self): 

2168 return self.slip - abs(self.get_slip_tensile) 

2169 

2170 def _discretize(self, store, target): 

2171 if self.nucleation_x is not None: 

2172 nucx = self.nucleation_x * 0.5 * self.length 

2173 else: 

2174 nucx = None 

2175 

2176 if self.nucleation_y is not None: 

2177 nucy = self.nucleation_y * 0.5 * self.width 

2178 else: 

2179 nucy = None 

2180 

2181 stf = self.effective_stf_pre() 

2182 

2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2184 store.config.deltas, store.config.deltat, 

2185 self.time, self.north_shift, self.east_shift, self.depth, 

2186 self.strike, self.dip, self.length, self.width, self.anchor, 

2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2188 decimation_factor=self.decimation_factor, 

2189 aggressive_oversampling=self.aggressive_oversampling) 

2190 

2191 if self.slip is not None: 

2192 if target is not None: 

2193 interpolation = target.interpolation 

2194 else: 

2195 interpolation = 'nearest_neighbor' 

2196 logger.warning( 

2197 'no target information available, will use ' 

2198 '"nearest_neighbor" interpolation when extracting shear ' 

2199 'modulus from earth model') 

2200 

2201 shear_moduli = store.config.get_shear_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_slip = self.get_slip_tensile() 

2207 shear_slip = self.slip - abs(tensile_slip) 

2208 

2209 amplitudes_total = [shear_moduli * shear_slip] 

2210 if tensile_slip != 0: 

2211 bulk_moduli = store.config.get_bulk_moduli( 

2212 self.lat, self.lon, 

2213 points=points, 

2214 interpolation=interpolation) 

2215 

2216 tensile_iso = bulk_moduli * tensile_slip 

2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2218 

2219 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2220 

2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2222 amplitudes * dl * dw 

2223 

2224 else: 

2225 # normalization to retain total moment 

2226 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2227 moment = self.get_moment(store, target) 

2228 

2229 amplitudes_total = [ 

2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2231 if self.opening_fraction != 0.: 

2232 amplitudes_total.append( 

2233 amplitudes_norm * self.opening_fraction * moment) 

2234 

2235 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2236 

2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2238 

2239 def discretize_basesource(self, store, target=None): 

2240 

2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2242 store, target) 

2243 

2244 mot = pmt.MomentTensor( 

2245 strike=self.strike, dip=self.dip, rake=self.rake) 

2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2247 

2248 if amplitudes.ndim == 1: 

2249 m6s[:, :] *= amplitudes[:, num.newaxis] 

2250 elif amplitudes.ndim == 2: 

2251 # shear MT components 

2252 rotmat1 = pmt.euler_to_matrix( 

2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2255 

2256 if amplitudes.shape[0] == 2: 

2257 # tensile MT components - moment/magnitude input 

2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2259 rot_tensile = pmt.to6( 

2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2261 

2262 m6s_tensile = rot_tensile[ 

2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2264 m6s += m6s_tensile 

2265 

2266 elif amplitudes.shape[0] == 3: 

2267 # tensile MT components - slip input 

2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2270 

2271 rot_iso = pmt.to6( 

2272 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2273 rot_clvd = pmt.to6( 

2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2275 

2276 m6s_iso = rot_iso[ 

2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2278 m6s_clvd = rot_clvd[ 

2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2280 m6s += m6s_iso + m6s_clvd 

2281 else: 

2282 raise ValueError('Unknwown amplitudes shape!') 

2283 else: 

2284 raise ValueError( 

2285 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2286 

2287 ds = meta.DiscretizedMTSource( 

2288 lat=self.lat, 

2289 lon=self.lon, 

2290 times=times, 

2291 north_shifts=points[:, 0], 

2292 east_shifts=points[:, 1], 

2293 depths=points[:, 2], 

2294 m6s=m6s, 

2295 dl=dl, 

2296 dw=dw, 

2297 nl=nl, 

2298 nw=nw) 

2299 

2300 return ds 

2301 

2302 def xy_to_coord(self, x, y, cs='xyz'): 

2303 ln, wd = self.length, self.width 

2304 strike, dip = self.strike, self.dip 

2305 

2306 def array_check(variable): 

2307 if not isinstance(variable, num.ndarray): 

2308 return num.array(variable) 

2309 else: 

2310 return variable 

2311 

2312 x, y = array_check(x), array_check(y) 

2313 

2314 if x.shape[0] != y.shape[0]: 

2315 raise ValueError('Shapes of x and y mismatch') 

2316 

2317 x, y = x * 0.5 * ln, y * 0.5 * wd 

2318 

2319 points = num.hstack(( 

2320 x.reshape(-1, 1), y.reshape(-1, 1), num.zeros((x.shape[0], 1)))) 

2321 

2322 anch_x, anch_y = map_anchor[self.anchor] 

2323 points[:, 0] -= anch_x * 0.5 * ln 

2324 points[:, 1] -= anch_y * 0.5 * wd 

2325 

2326 rotmat = num.asarray( 

2327 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

2328 

2329 points_rot = num.dot(rotmat.T, points.T).T 

2330 

2331 points_rot[:, 0] += self.north_shift 

2332 points_rot[:, 1] += self.east_shift 

2333 points_rot[:, 2] += self.depth 

2334 

2335 if cs == 'xyz': 

2336 return points_rot 

2337 elif cs == 'xy': 

2338 return points_rot[:, :2] 

2339 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2340 latlon = ne_to_latlon( 

2341 self.lat, self.lon, points_rot[:, 0], points_rot[:, 1]) 

2342 latlon = num.array(latlon).T 

2343 if cs == 'latlon': 

2344 return latlon 

2345 elif cs == 'lonlat': 

2346 return latlon[:, ::-1] 

2347 else: 

2348 return num.concatenate( 

2349 (latlon, points_rot[:, 2].reshape((len(points_rot), 1))), 

2350 axis=1) 

2351 

2352 def outline(self, cs='xyz'): 

2353 x = num.array([-1., 1., 1., -1., -1.]) 

2354 y = num.array([-1., -1., 1., 1., -1.]) 

2355 

2356 return self.xy_to_coord(x, y, cs=cs) 

2357 

2358 def points_on_source(self, cs='xyz', **kwargs): 

2359 

2360 points = points_on_rect_source( 

2361 self.strike, self.dip, self.length, self.width, 

2362 self.anchor, **kwargs) 

2363 

2364 points[:, 0] += self.north_shift 

2365 points[:, 1] += self.east_shift 

2366 points[:, 2] += self.depth 

2367 if cs == 'xyz': 

2368 return points 

2369 elif cs == 'xy': 

2370 return points[:, :2] 

2371 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2372 latlon = ne_to_latlon( 

2373 self.lat, self.lon, points[:, 0], points[:, 1]) 

2374 

2375 latlon = num.array(latlon).T 

2376 if cs == 'latlon': 

2377 return latlon 

2378 elif cs == 'lonlat': 

2379 return latlon[:, ::-1] 

2380 else: 

2381 return num.concatenate( 

2382 (latlon, points[:, 2].reshape((len(points), 1))), 

2383 axis=1) 

2384 

2385 def geometry(self, *args, **kwargs): 

2386 from pyrocko.model import Geometry 

2387 

2388 ds = self.discretize_basesource(*args, **kwargs) 

2389 nx, ny = ds.nl, ds.nw 

2390 

2391 def patch_outlines_xy(nx, ny): 

2392 points = num.zeros((nx * ny, 2)) 

2393 points[:, 0] = num.tile(num.linspace(-1., 1., nx), ny) 

2394 points[:, 1] = num.repeat(num.linspace(-1., 1., ny), nx) 

2395 

2396 return points 

2397 

2398 points_ds = patch_outlines_xy(nx + 1, ny + 1) 

2399 npoints = (nx + 1) * (ny + 1) 

2400 

2401 vertices = num.hstack(( 

2402 num.ones((npoints, 1)) * self.lat, 

2403 num.ones((npoints, 1)) * self.lon, 

2404 self.xy_to_coord(points_ds[:, 0], points_ds[:, 1], cs='xyz'))) 

2405 

2406 faces = num.array([[ 

2407 iy * (nx + 1) + ix, 

2408 iy * (nx + 1) + ix + 1, 

2409 (iy + 1) * (nx + 1) + ix + 1, 

2410 (iy + 1) * (nx + 1) + ix, 

2411 iy * (nx + 1) + ix] 

2412 for iy in range(ny) for ix in range(nx)]) 

2413 

2414 xyz = self.outline('xyz') 

2415 latlon = num.ones((5, 2)) * num.array([self.lat, self.lon]) 

2416 patchverts = num.hstack((latlon, xyz)) 

2417 face_outlines = patchverts[:-1, :] # last vertex double 

2418 

2419 geom = Geometry() 

2420 geom.setup(vertices, faces) 

2421 geom.set_outlines([face_outlines]) 

2422 

2423 if self.stf: 

2424 geom.times = num.unique(ds.times) 

2425 

2426 if self.nucleation_x is not None and self.nucleation_y is not None: 

2427 geom.add_property('t_arrival', ds.times) 

2428 

2429 geom.add_property( 

2430 'moment', ds.moments().reshape(ds.nl*ds.nw, -1)) 

2431 

2432 return geom 

2433 

2434 def get_nucleation_abs_coord(self, cs='xy'): 

2435 

2436 if self.nucleation_x is None: 

2437 return None, None 

2438 

2439 coords = from_plane_coords(self.strike, self.dip, self.length, 

2440 self.width, self.depth, self.nucleation_x, 

2441 self.nucleation_y, lat=self.lat, 

2442 lon=self.lon, north_shift=self.north_shift, 

2443 east_shift=self.east_shift, cs=cs) 

2444 return coords 

2445 

2446 def pyrocko_moment_tensor(self, store=None, target=None): 

2447 return pmt.MomentTensor( 

2448 strike=self.strike, 

2449 dip=self.dip, 

2450 rake=self.rake, 

2451 scalar_moment=self.get_moment(store, target)) 

2452 

2453 def pyrocko_event(self, store=None, target=None, **kwargs): 

2454 return SourceWithDerivedMagnitude.pyrocko_event( 

2455 self, store, target, 

2456 **kwargs) 

2457 

2458 @classmethod 

2459 def from_pyrocko_event(cls, ev, **kwargs): 

2460 d = {} 

2461 mt = ev.moment_tensor 

2462 if mt: 

2463 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2464 d.update( 

2465 strike=float(strike), 

2466 dip=float(dip), 

2467 rake=float(rake), 

2468 magnitude=float(mt.moment_magnitude())) 

2469 

2470 d.update(kwargs) 

2471 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2472 

2473 

2474class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2475 ''' 

2476 Combined Eikonal and Okada quasi-dynamic rupture model. 

2477 

2478 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2479 Note: attribute `stf` is not used so far, but kept for future applications. 

2480 ''' 

2481 

2482 discretized_source_class = meta.DiscretizedMTSource 

2483 

2484 strike = Float.T( 

2485 default=0.0, 

2486 help='Strike direction in [deg], measured clockwise from north.') 

2487 

2488 dip = Float.T( 

2489 default=0.0, 

2490 help='Dip angle in [deg], measured downward from horizontal.') 

2491 

2492 length = Float.T( 

2493 default=10. * km, 

2494 help='Length of rectangular source area in [m].') 

2495 

2496 width = Float.T( 

2497 default=5. * km, 

2498 help='Width of rectangular source area in [m].') 

2499 

2500 anchor = StringChoice.T( 

2501 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2502 'bottom_left', 'bottom_right'], 

2503 default='center', 

2504 optional=True, 

2505 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2506 'bottom, top_left, top_right, bottom_left, ' 

2507 'bottom_right, center_left, center_right``.') 

2508 

2509 nucleation_x__ = Array.T( 

2510 default=num.array([0.]), 

2511 dtype=num.float64, 

2512 serialize_as='list', 

2513 help='Horizontal position of rupture nucleation in normalized fault ' 

2514 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2515 

2516 nucleation_y__ = Array.T( 

2517 default=num.array([0.]), 

2518 dtype=num.float64, 

2519 serialize_as='list', 

2520 help='Down-dip position of rupture nucleation in normalized fault ' 

2521 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2522 

2523 nucleation_time__ = Array.T( 

2524 optional=True, 

2525 help='Time in [s] after origin, when nucleation points defined by ' 

2526 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2527 dtype=num.float64, 

2528 serialize_as='list') 

2529 

2530 gamma = Float.T( 

2531 default=0.8, 

2532 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2533 r':math:`v_r = \gamma * v_s`.') 

2534 

2535 nx = Int.T( 

2536 default=2, 

2537 help='Number of discrete source patches in x direction (along ' 

2538 'strike).') 

2539 

2540 ny = Int.T( 

2541 default=2, 

2542 help='Number of discrete source patches in y direction (down dip).') 

2543 

2544 slip = Float.T( 

2545 optional=True, 

2546 help='Maximum slip of the rectangular source [m]. ' 

2547 'Setting the slip the tractions/stress field ' 

2548 'will be normalized to accomodate the desired maximum slip.') 

2549 

2550 rake = Float.T( 

2551 optional=True, 

2552 help='Rake angle in [deg], ' 

2553 'measured counter-clockwise from right-horizontal ' 

2554 'in on-plane view. Rake is translated into homogenous tractions ' 

2555 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2556 'with tractions parameter.') 

2557 

2558 patches = List.T( 

2559 OkadaSource.T(), 

2560 optional=True, 

2561 help='List of all boundary elements/sub faults/fault patches.') 

2562 

2563 patch_mask__ = Array.T( 

2564 dtype=bool, 

2565 serialize_as='list', 

2566 shape=(None,), 

2567 optional=True, 

2568 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2569 'leaves the patch in the calculation, False excludes the patch.') 

2570 

2571 tractions = TractionField.T( 

2572 optional=True, 

2573 help='Traction field the rupture plane is exposed to. See the' 

2574 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2575 'If ``tractions=None`` and ``rake`` is given' 

2576 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2577 ' be used.') 

2578 

2579 coef_mat = Array.T( 

2580 optional=True, 

2581 help='Coefficient matrix linking traction and dislocation field.', 

2582 dtype=num.float64, 

2583 shape=(None, None)) 

2584 

2585 eikonal_decimation = Int.T( 

2586 optional=True, 

2587 default=1, 

2588 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2589 ' increase the accuracy of rupture front calculation but' 

2590 ' increases also the computation time.') 

2591 

2592 decimation_factor = Int.T( 

2593 optional=True, 

2594 default=1, 

2595 help='Sub-source decimation factor, a larger decimation will' 

2596 ' make the result inaccurate but shorten the necessary' 

2597 ' computation time (use for testing puposes only).') 

2598 

2599 nthreads = Int.T( 

2600 optional=True, 

2601 default=1, 

2602 help='Number of threads for Okada forward modelling, ' 

2603 'matrix inversion and calculation of point subsources. ' 

2604 'Note: for small/medium matrices 1 thread is most efficient.') 

2605 

2606 pure_shear = Bool.T( 

2607 optional=True, 

2608 default=False, 

2609 help='Calculate only shear tractions and omit tensile tractions.') 

2610 

2611 smooth_rupture = Bool.T( 

2612 default=True, 

2613 help='Smooth the tractions by weighting partially ruptured' 

2614 ' fault patches.') 

2615 

2616 aggressive_oversampling = Bool.T( 

2617 default=False, 

2618 help='Aggressive oversampling for basesource discretization. ' 

2619 'When using \'multilinear\' interpolation oversampling has' 

2620 ' practically no effect.') 

2621 

2622 def __init__(self, **kwargs): 

2623 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2624 self._interpolators = {} 

2625 self.check_conflicts() 

2626 

2627 @property 

2628 def nucleation_x(self): 

2629 return self.nucleation_x__ 

2630 

2631 @nucleation_x.setter 

2632 def nucleation_x(self, nucleation_x): 

2633 if isinstance(nucleation_x, list): 

2634 nucleation_x = num.array(nucleation_x) 

2635 

2636 elif not isinstance( 

2637 nucleation_x, num.ndarray) and nucleation_x is not None: 

2638 

2639 nucleation_x = num.array([nucleation_x]) 

2640 self.nucleation_x__ = nucleation_x 

2641 

2642 @property 

2643 def nucleation_y(self): 

2644 return self.nucleation_y__ 

2645 

2646 @nucleation_y.setter 

2647 def nucleation_y(self, nucleation_y): 

2648 if isinstance(nucleation_y, list): 

2649 nucleation_y = num.array(nucleation_y) 

2650 

2651 elif not isinstance(nucleation_y, num.ndarray) \ 

2652 and nucleation_y is not None: 

2653 nucleation_y = num.array([nucleation_y]) 

2654 

2655 self.nucleation_y__ = nucleation_y 

2656 

2657 @property 

2658 def nucleation(self): 

2659 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2660 

2661 if (nucl_x is None) or (nucl_y is None): 

2662 return None 

2663 

2664 assert nucl_x.shape[0] == nucl_y.shape[0] 

2665 

2666 return num.concatenate( 

2667 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2668 

2669 @nucleation.setter 

2670 def nucleation(self, nucleation): 

2671 if isinstance(nucleation, list): 

2672 nucleation = num.array(nucleation) 

2673 

2674 assert nucleation.shape[1] == 2 

2675 

2676 self.nucleation_x = nucleation[:, 0] 

2677 self.nucleation_y = nucleation[:, 1] 

2678 

2679 @property 

2680 def nucleation_time(self): 

2681 return self.nucleation_time__ 

2682 

2683 @nucleation_time.setter 

2684 def nucleation_time(self, nucleation_time): 

2685 if not isinstance(nucleation_time, num.ndarray) \ 

2686 and nucleation_time is not None: 

2687 nucleation_time = num.array([nucleation_time]) 

2688 

2689 self.nucleation_time__ = nucleation_time 

2690 

2691 @property 

2692 def patch_mask(self): 

2693 if (self.patch_mask__ is not None and 

2694 self.patch_mask__.shape == (self.nx * self.ny,)): 

2695 

2696 return self.patch_mask__ 

2697 else: 

2698 return num.ones(self.nx * self.ny, dtype=bool) 

2699 

2700 @patch_mask.setter 

2701 def patch_mask(self, patch_mask): 

2702 if isinstance(patch_mask, list): 

2703 patch_mask = num.array(patch_mask) 

2704 

2705 self.patch_mask__ = patch_mask 

2706 

2707 def get_tractions(self): 

2708 ''' 

2709 Get source traction vectors. 

2710 

2711 If :py:attr:`rake` is given, unit length directed traction vectors 

2712 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2713 else the given :py:attr:`tractions` are used. 

2714 

2715 :returns: 

2716 Traction vectors per patch. 

2717 :rtype: 

2718 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2719 ''' 

2720 

2721 if self.rake is not None: 

2722 if num.isnan(self.rake): 

2723 raise ValueError('Rake must be a real number, not NaN.') 

2724 

2725 logger.warning( 

2726 'Tractions are derived based on the given source rake.') 

2727 tractions = DirectedTractions(rake=self.rake) 

2728 else: 

2729 tractions = self.tractions 

2730 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2731 

2732 def base_key(self): 

2733 return SourceWithDerivedMagnitude.base_key(self) + ( 

2734 self.slip, 

2735 self.strike, 

2736 self.dip, 

2737 self.rake, 

2738 self.length, 

2739 self.width, 

2740 float(self.nucleation_x.mean()), 

2741 float(self.nucleation_y.mean()), 

2742 self.decimation_factor, 

2743 self.anchor, 

2744 self.pure_shear, 

2745 self.gamma, 

2746 tuple(self.patch_mask)) 

2747 

2748 def check_conflicts(self): 

2749 if self.tractions and self.rake: 

2750 raise AttributeError( 

2751 'Tractions and rake are mutually exclusive.') 

2752 if self.tractions is None and self.rake is None: 

2753 self.rake = 0. 

2754 

2755 def get_magnitude(self, store=None, target=None): 

2756 self.check_conflicts() 

2757 if self.slip is not None or self.tractions is not None: 

2758 if store is None: 

2759 raise DerivedMagnitudeError( 

2760 'Magnitude for a rectangular source with slip or ' 

2761 'tractions defined can only be derived when earth model ' 

2762 'is set.') 

2763 

2764 moment_rate, calc_times = self.discretize_basesource( 

2765 store, target=target).get_moment_rate(store.config.deltat) 

2766 

2767 deltat = num.concatenate(( 

2768 (num.diff(calc_times)[0],), 

2769 num.diff(calc_times))) 

2770 

2771 return float(pmt.moment_to_magnitude( 

2772 num.sum(moment_rate * deltat))) 

2773 

2774 else: 

2775 return float(pmt.moment_to_magnitude(1.0)) 

2776 

2777 def get_factor(self): 

2778 return 1.0 

2779 

2780 def outline(self, cs='xyz'): 

2781 ''' 

2782 Get source outline corner coordinates. 

2783 

2784 :param cs: 

2785 :ref:`Output coordinate system <coordinate-system-names>`. 

2786 :type cs: 

2787 optional, str 

2788 

2789 :returns: 

2790 Corner points in desired coordinate system. 

2791 :rtype: 

2792 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2793 ''' 

2794 points = outline_rect_source(self.strike, self.dip, self.length, 

2795 self.width, self.anchor) 

2796 

2797 points[:, 0] += self.north_shift 

2798 points[:, 1] += self.east_shift 

2799 points[:, 2] += self.depth 

2800 if cs == 'xyz': 

2801 return points 

2802 elif cs == 'xy': 

2803 return points[:, :2] 

2804 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2805 latlon = ne_to_latlon( 

2806 self.lat, self.lon, points[:, 0], points[:, 1]) 

2807 

2808 latlon = num.array(latlon).T 

2809 if cs == 'latlon': 

2810 return latlon 

2811 elif cs == 'lonlat': 

2812 return latlon[:, ::-1] 

2813 else: 

2814 return num.concatenate( 

2815 (latlon, points[:, 2].reshape((len(points), 1))), 

2816 axis=1) 

2817 

2818 def points_on_source(self, cs='xyz', **kwargs): 

2819 ''' 

2820 Convert relative plane coordinates to geographical coordinates. 

2821 

2822 Given x and y coordinates (relative source coordinates between -1. 

2823 and 1.) are converted to desired geographical coordinates. Coordinates 

2824 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2825 and ``points_y``. 

2826 

2827 :param cs: 

2828 :ref:`Output coordinate system <coordinate-system-names>`. 

2829 :type cs: 

2830 optional, str 

2831 

2832 :returns: 

2833 Point coordinates in desired coordinate system. 

2834 :rtype: 

2835 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2836 ''' 

2837 points = points_on_rect_source( 

2838 self.strike, self.dip, self.length, self.width, 

2839 self.anchor, **kwargs) 

2840 

2841 points[:, 0] += self.north_shift 

2842 points[:, 1] += self.east_shift 

2843 points[:, 2] += self.depth 

2844 if cs == 'xyz': 

2845 return points 

2846 elif cs == 'xy': 

2847 return points[:, :2] 

2848 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2849 latlon = ne_to_latlon( 

2850 self.lat, self.lon, points[:, 0], points[:, 1]) 

2851 

2852 latlon = num.array(latlon).T 

2853 if cs == 'latlon': 

2854 return latlon 

2855 elif cs == 'lonlat': 

2856 return latlon[:, ::-1] 

2857 else: 

2858 return num.concatenate( 

2859 (latlon, points[:, 2].reshape((len(points), 1))), 

2860 axis=1) 

2861 

2862 def pyrocko_moment_tensor(self, store=None, target=None): 

2863 if store is not None: 

2864 if not self.patches: 

2865 self.discretize_patches(store) 

2866 

2867 data = self.get_slip() 

2868 else: 

2869 data = self.get_tractions() 

2870 

2871 weights = num.linalg.norm(data, axis=1) 

2872 weights /= weights.sum() 

2873 

2874 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2875 rake = num.average(rakes, weights=weights) 

2876 

2877 return pmt.MomentTensor( 

2878 strike=self.strike, 

2879 dip=self.dip, 

2880 rake=rake, 

2881 scalar_moment=self.get_moment(store, target)) 

2882 

2883 def pyrocko_event(self, store=None, target=None, **kwargs): 

2884 return SourceWithDerivedMagnitude.pyrocko_event( 

2885 self, store, target, 

2886 **kwargs) 

2887 

2888 @classmethod 

2889 def from_pyrocko_event(cls, ev, **kwargs): 

2890 d = {} 

2891 mt = ev.moment_tensor 

2892 if mt: 

2893 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2894 d.update( 

2895 strike=float(strike), 

2896 dip=float(dip), 

2897 rake=float(rake)) 

2898 

2899 d.update(kwargs) 

2900 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2901 

2902 def _discretize_points(self, store, *args, **kwargs): 

2903 ''' 

2904 Discretize source plane with equal vertical and horizontal spacing. 

2905 

2906 Additional ``*args`` and ``**kwargs`` are passed to 

2907 :py:meth:`points_on_source`. 

2908 

2909 :param store: 

2910 Green's function database (needs to cover whole region of the 

2911 source). 

2912 :type store: 

2913 :py:class:`~pyrocko.gf.store.Store` 

2914 

2915 :returns: 

2916 Number of points in strike and dip direction, distance 

2917 between adjacent points, coordinates (latlondepth) and coordinates 

2918 (xy on fault) for discrete points. 

2919 :rtype: 

2920 (int, int, float, :py:class:`~numpy.ndarray`, 

2921 :py:class:`~numpy.ndarray`). 

2922 ''' 

2923 anch_x, anch_y = map_anchor[self.anchor] 

2924 

2925 npoints = int(self.width // km) + 1 

2926 points = num.zeros((npoints, 3)) 

2927 points[:, 1] = num.linspace(-1., 1., npoints) 

2928 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2929 

2930 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2931 points = num.dot(rotmat.T, points.T).T 

2932 points[:, 2] += self.depth 

2933 

2934 vs_min = store.config.get_vs( 

2935 self.lat, self.lon, points, 

2936 interpolation='nearest_neighbor') 

2937 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2938 

2939 oversampling = 10. 

2940 delta_l = self.length / (self.nx * oversampling) 

2941 delta_w = self.width / (self.ny * oversampling) 

2942 

2943 delta = self.eikonal_decimation * num.min([ 

2944 store.config.deltat * vr_min / oversampling, 

2945 delta_l, delta_w] + [ 

2946 deltas for deltas in store.config.deltas]) 

2947 

2948 delta = delta_w / num.ceil(delta_w / delta) 

2949 

2950 nx = int(num.ceil(self.length / delta)) + 1 

2951 ny = int(num.ceil(self.width / delta)) + 1 

2952 

2953 rem_l = (nx-1)*delta - self.length 

2954 lim_x = rem_l / self.length 

2955 

2956 points_xy = num.zeros((nx * ny, 2)) 

2957 points_xy[:, 0] = num.repeat( 

2958 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2959 points_xy[:, 1] = num.tile( 

2960 num.linspace(-1., 1., ny), nx) 

2961 

2962 points = self.points_on_source( 

2963 points_x=points_xy[:, 0], 

2964 points_y=points_xy[:, 1], 

2965 **kwargs) 

2966 

2967 return nx, ny, delta, points, points_xy 

2968 

2969 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2970 points=None): 

2971 ''' 

2972 Get rupture velocity for discrete points on source plane. 

2973 

2974 :param store: 

2975 Green's function database (needs to cover the whole region of the 

2976 source) 

2977 :type store: 

2978 optional, :py:class:`~pyrocko.gf.store.Store` 

2979 

2980 :param interpolation: 

2981 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2982 and ``'multilinear'``). 

2983 :type interpolation: 

2984 optional, str 

2985 

2986 :param points: 

2987 Coordinates on fault (-1.:1.) of discrete points. 

2988 :type points: 

2989 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2990 

2991 :returns: 

2992 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2993 points. 

2994 :rtype: 

2995 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2996 ''' 

2997 

2998 if points is None: 

2999 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

3000 

3001 return store.config.get_vs( 

3002 self.lat, self.lon, 

3003 points=points, 

3004 interpolation=interpolation) * self.gamma 

3005 

3006 def discretize_time( 

3007 self, store, interpolation='nearest_neighbor', 

3008 vr=None, times=None, *args, **kwargs): 

3009 ''' 

3010 Get rupture start time for discrete points on source plane. 

3011 

3012 :param store: 

3013 Green's function database (needs to cover whole region of the 

3014 source) 

3015 :type store: 

3016 :py:class:`~pyrocko.gf.store.Store` 

3017 

3018 :param interpolation: 

3019 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3020 and ``'multilinear'``). 

3021 :type interpolation: 

3022 optional, str 

3023 

3024 :param vr: 

3025 Array, containing rupture user defined rupture velocity values. 

3026 :type vr: 

3027 optional, :py:class:`~numpy.ndarray` 

3028 

3029 :param times: 

3030 Array, containing zeros, where rupture is starting, real positive 

3031 numbers at later secondary nucleation points and -1, where time 

3032 will be calculated. If not given, rupture starts at nucleation_x, 

3033 nucleation_y. Times are given for discrete points with equal 

3034 horizontal and vertical spacing. 

3035 :type times: 

3036 optional, :py:class:`~numpy.ndarray` 

3037 

3038 :returns: 

3039 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

3040 rupture propagation time of discrete points. 

3041 :rtype: 

3042 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

3043 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

3044 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

3045 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

3046 ''' 

3047 nx, ny, delta, points, points_xy = self._discretize_points( 

3048 store, cs='xyz') 

3049 

3050 if vr is None or vr.shape != tuple((nx, ny)): 

3051 if vr: 

3052 logger.warning( 

3053 'Given rupture velocities are not in right shape: ' 

3054 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

3055 vr = self._discretize_rupture_v(store, interpolation, points)\ 

3056 .reshape(nx, ny) 

3057 

3058 if vr.shape != tuple((nx, ny)): 

3059 logger.warning( 

3060 'Given rupture velocities are not in right shape. Therefore' 

3061 ' standard rupture velocity array is used.') 

3062 

3063 def initialize_times(): 

3064 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

3065 

3066 if nucl_x.shape != nucl_y.shape: 

3067 raise ValueError( 

3068 'Nucleation coordinates have different shape.') 

3069 

3070 dist_points = num.array([ 

3071 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

3072 for x, y in zip(nucl_x, nucl_y)]) 

3073 nucl_indices = num.argmin(dist_points, axis=1) 

3074 

3075 if self.nucleation_time is None: 

3076 nucl_times = num.zeros_like(nucl_indices) 

3077 else: 

3078 if self.nucleation_time.shape == nucl_x.shape: 

3079 nucl_times = self.nucleation_time 

3080 else: 

3081 raise ValueError( 

3082 'Nucleation coordinates and times have different ' 

3083 'shapes') 

3084 

3085 t = num.full(nx * ny, -1.) 

3086 t[nucl_indices] = nucl_times 

3087 return t.reshape(nx, ny) 

3088 

3089 if times is None: 

3090 times = initialize_times() 

3091 elif times.shape != tuple((nx, ny)): 

3092 times = initialize_times() 

3093 logger.warning( 

3094 'Given times are not in right shape. Therefore standard time' 

3095 ' array is used.') 

3096 

3097 eikonal_ext.eikonal_solver_fmm_cartesian( 

3098 speeds=vr, times=times, delta=delta) 

3099 

3100 return points, points_xy, vr, times 

3101 

3102 def get_vr_time_interpolators( 

3103 self, store, interpolation='nearest_neighbor', force=False, 

3104 **kwargs): 

3105 ''' 

3106 Get interpolators for rupture velocity and rupture time. 

3107 

3108 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3109 

3110 :param store: 

3111 Green's function database (needs to cover whole region of the 

3112 source). 

3113 :type store: 

3114 :py:class:`~pyrocko.gf.store.Store` 

3115 

3116 :param interpolation: 

3117 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3118 and ``'multilinear'``). 

3119 :type interpolation: 

3120 optional, str 

3121 

3122 :param force: 

3123 Force recalculation of the interpolators (e.g. after change of 

3124 nucleation point locations/times). Default is ``False``. 

3125 :type force: 

3126 optional, bool 

3127 ''' 

3128 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3129 if interpolation not in interp_map: 

3130 raise TypeError( 

3131 'Interpolation method %s not available' % interpolation) 

3132 

3133 if not self._interpolators.get(interpolation, False) or force: 

3134 _, points_xy, vr, times = self.discretize_time( 

3135 store, **kwargs) 

3136 

3137 if self.length <= 0.: 

3138 raise ValueError( 

3139 'length must be larger then 0. not %g' % self.length) 

3140 

3141 if self.width <= 0.: 

3142 raise ValueError( 

3143 'width must be larger then 0. not %g' % self.width) 

3144 

3145 nx, ny = times.shape 

3146 anch_x, anch_y = map_anchor[self.anchor] 

3147 

3148 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3149 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3150 

3151 ascont = num.ascontiguousarray 

3152 

3153 self._interpolators[interpolation] = ( 

3154 nx, ny, times, vr, 

3155 RegularGridInterpolator( 

3156 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3157 times, 

3158 method=interp_map[interpolation]), 

3159 RegularGridInterpolator( 

3160 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3161 vr, 

3162 method=interp_map[interpolation])) 

3163 

3164 return self._interpolators[interpolation] 

3165 

3166 def discretize_patches( 

3167 self, store, interpolation='nearest_neighbor', force=False, 

3168 grid_shape=(), 

3169 **kwargs): 

3170 ''' 

3171 Get rupture start time and OkadaSource elements for points on rupture. 

3172 

3173 All source elements and their corresponding center points are 

3174 calculated and stored in the :py:attr:`patches` attribute. 

3175 

3176 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3177 

3178 :param store: 

3179 Green's function database (needs to cover whole region of the 

3180 source). 

3181 :type store: 

3182 :py:class:`~pyrocko.gf.store.Store` 

3183 

3184 :param interpolation: 

3185 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3186 and ``'multilinear'``). 

3187 :type interpolation: 

3188 optional, str 

3189 

3190 :param force: 

3191 Force recalculation of the vr and time interpolators ( e.g. after 

3192 change of nucleation point locations/times). Default is ``False``. 

3193 :type force: 

3194 optional, bool 

3195 

3196 :param grid_shape: 

3197 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3198 or grid_shape should be set. 

3199 :type grid_shape: 

3200 optional, tuple of int 

3201 ''' 

3202 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3203 self.get_vr_time_interpolators( 

3204 store, 

3205 interpolation=interpolation, force=force, **kwargs) 

3206 anch_x, anch_y = map_anchor[self.anchor] 

3207 

3208 al = self.length / 2. 

3209 aw = self.width / 2. 

3210 al1 = -(al + anch_x * al) 

3211 al2 = al - anch_x * al 

3212 aw1 = -aw + anch_y * aw 

3213 aw2 = aw + anch_y * aw 

3214 assert num.abs([al1, al2]).sum() == self.length 

3215 assert num.abs([aw1, aw2]).sum() == self.width 

3216 

3217 def get_lame(*a, **kw): 

3218 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3219 lamb = store.config.get_vp(*a, **kw)**2 \ 

3220 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3221 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3222 

3223 shear_mod, poisson = get_lame( 

3224 self.lat, self.lon, 

3225 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3226 interpolation=interpolation) 

3227 

3228 okada_src = OkadaSource( 

3229 lat=self.lat, lon=self.lon, 

3230 strike=self.strike, dip=self.dip, 

3231 north_shift=self.north_shift, east_shift=self.east_shift, 

3232 depth=self.depth, 

3233 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3234 poisson=poisson.mean(), 

3235 shearmod=shear_mod.mean(), 

3236 opening=kwargs.get('opening', 0.)) 

3237 

3238 if not (self.nx and self.ny): 

3239 if grid_shape: 

3240 self.nx, self.ny = grid_shape 

3241 else: 

3242 self.nx = nx 

3243 self.ny = ny 

3244 

3245 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3246 

3247 shear_mod, poisson = get_lame( 

3248 self.lat, self.lon, 

3249 num.array([src.source_patch()[:3] for src in source_disc]), 

3250 interpolation=interpolation) 

3251 

3252 if (self.nx, self.ny) != (nx, ny): 

3253 times_interp = time_interpolator( 

3254 num.ascontiguousarray(source_points[:, :2])) 

3255 vr_interp = vr_interpolator( 

3256 num.ascontiguousarray(source_points[:, :2])) 

3257 else: 

3258 times_interp = times.T.ravel() 

3259 vr_interp = vr.T.ravel() 

3260 

3261 for isrc, src in enumerate(source_disc): 

3262 src.vr = vr_interp[isrc] 

3263 src.time = times_interp[isrc] + self.time 

3264 

3265 self.patches = source_disc 

3266 

3267 def discretize_basesource(self, store, target=None): 

3268 ''' 

3269 Prepare source for synthetic waveform calculation. 

3270 

3271 :param store: 

3272 Green's function database (needs to cover whole region of the 

3273 source). 

3274 :type store: 

3275 :py:class:`~pyrocko.gf.store.Store` 

3276 

3277 :param target: 

3278 Target information. 

3279 :type target: 

3280 optional, :py:class:`~pyrocko.gf.targets.Target` 

3281 

3282 :returns: 

3283 Source discretized by a set of moment tensors and times. 

3284 :rtype: 

3285 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3286 ''' 

3287 if not target: 

3288 interpolation = 'nearest_neighbor' 

3289 else: 

3290 interpolation = target.interpolation 

3291 

3292 if not self.patches: 

3293 self.discretize_patches(store, interpolation) 

3294 

3295 if self.coef_mat is None: 

3296 self.calc_coef_mat() 

3297 

3298 delta_slip, slip_times = self.get_delta_slip(store) 

3299 npatches = self.nx * self.ny 

3300 ntimes = slip_times.size 

3301 

3302 anch_x, anch_y = map_anchor[self.anchor] 

3303 

3304 pln = self.length / self.nx 

3305 pwd = self.width / self.ny 

3306 

3307 patch_coords = num.array([ 

3308 (p.ix, p.iy) 

3309 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3310 

3311 # boundary condition is zero-slip 

3312 # is not valid to avoid unwished interpolation effects 

3313 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3314 slip_grid[1:-1, 1:-1, :, :] = \ 

3315 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3316 

3317 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3318 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3319 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3320 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3321 

3322 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3323 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3324 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3325 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3326 

3327 def make_grid(patch_parameter): 

3328 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3329 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3330 

3331 grid[0, 0] = grid[1, 1] 

3332 grid[0, -1] = grid[1, -2] 

3333 grid[-1, 0] = grid[-2, 1] 

3334 grid[-1, -1] = grid[-2, -2] 

3335 

3336 grid[1:-1, 0] = grid[1:-1, 1] 

3337 grid[1:-1, -1] = grid[1:-1, -2] 

3338 grid[0, 1:-1] = grid[1, 1:-1] 

3339 grid[-1, 1:-1] = grid[-2, 1:-1] 

3340 

3341 return grid 

3342 

3343 lamb = self.get_patch_attribute('lamb') 

3344 mu = self.get_patch_attribute('shearmod') 

3345 

3346 lamb_grid = make_grid(lamb) 

3347 mu_grid = make_grid(mu) 

3348 

3349 coords_x = num.zeros(self.nx + 2) 

3350 coords_x[1:-1] = patch_coords[:, 0, 0] 

3351 coords_x[0] = coords_x[1] - pln / 2 

3352 coords_x[-1] = coords_x[-2] + pln / 2 

3353 

3354 coords_y = num.zeros(self.ny + 2) 

3355 coords_y[1:-1] = patch_coords[0, :, 1] 

3356 coords_y[0] = coords_y[1] - pwd / 2 

3357 coords_y[-1] = coords_y[-2] + pwd / 2 

3358 

3359 slip_interp = RegularGridInterpolator( 

3360 (coords_x, coords_y, slip_times), 

3361 slip_grid, method='nearest') 

3362 

3363 lamb_interp = RegularGridInterpolator( 

3364 (coords_x, coords_y), 

3365 lamb_grid, method='nearest') 

3366 

3367 mu_interp = RegularGridInterpolator( 

3368 (coords_x, coords_y), 

3369 mu_grid, method='nearest') 

3370 

3371 # discretize basesources 

3372 mindeltagf = min(tuple( 

3373 (self.length / self.nx, self.width / self.ny) + 

3374 tuple(store.config.deltas))) 

3375 

3376 nl = int((1. / self.decimation_factor) * 

3377 num.ceil(pln / mindeltagf)) + 1 

3378 nw = int((1. / self.decimation_factor) * 

3379 num.ceil(pwd / mindeltagf)) + 1 

3380 nsrc_patch = int(nl * nw) 

3381 dl = pln / nl 

3382 dw = pwd / nw 

3383 

3384 patch_area = dl * dw 

3385 

3386 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3387 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3388 

3389 base_coords = num.zeros((nsrc_patch, 3)) 

3390 base_coords[:, 0] = num.tile(xl, nw) 

3391 base_coords[:, 1] = num.repeat(xw, nl) 

3392 base_coords = num.tile(base_coords, (npatches, 1)) 

3393 

3394 center_coords = num.zeros((npatches, 3)) 

3395 center_coords[:, 0] = num.repeat( 

3396 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3397 center_coords[:, 1] = num.tile( 

3398 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3399 

3400 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3401 nbaselocs = base_coords.shape[0] 

3402 

3403 base_interp = base_coords.repeat(ntimes, axis=0) 

3404 

3405 base_times = num.tile(slip_times, nbaselocs) 

3406 base_interp[:, 0] -= anch_x * self.length / 2 

3407 base_interp[:, 1] -= anch_y * self.width / 2 

3408 base_interp[:, 2] = base_times 

3409 

3410 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3411 store, interpolation=interpolation) 

3412 

3413 time_eikonal_max = time_interpolator.values.max() 

3414 

3415 nbasesrcs = base_interp.shape[0] 

3416 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3417 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3418 mu = mu_interp(base_interp[:, :2]).ravel() 

3419 

3420 if False: 

3421 try: 

3422 import matplotlib.pyplot as plt 

3423 coords = base_coords.copy() 

3424 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3425 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3426 plt.show() 

3427 except AttributeError: 

3428 pass 

3429 

3430 base_interp[:, 2] = 0. 

3431 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3432 base_interp = num.dot(rotmat.T, base_interp.T).T 

3433 base_interp[:, 0] += self.north_shift 

3434 base_interp[:, 1] += self.east_shift 

3435 base_interp[:, 2] += self.depth 

3436 

3437 slip_strike = delta_slip[:, :, 0].ravel() 

3438 slip_dip = delta_slip[:, :, 1].ravel() 

3439 slip_norm = delta_slip[:, :, 2].ravel() 

3440 

3441 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3442 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3443 

3444 m6s = okada_ext.patch2m6( 

3445 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3446 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3447 rakes=slip_rake, 

3448 disl_shear=slip_shear, 

3449 disl_norm=slip_norm, 

3450 lamb=lamb, 

3451 mu=mu, 

3452 nthreads=self.nthreads) 

3453 

3454 m6s *= patch_area 

3455 

3456 dl = -self.patches[0].al1 + self.patches[0].al2 

3457 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3458 

3459 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3460 

3461 ds = meta.DiscretizedMTSource( 

3462 lat=self.lat, 

3463 lon=self.lon, 

3464 times=base_times + self.time, 

3465 north_shifts=base_interp[:, 0], 

3466 east_shifts=base_interp[:, 1], 

3467 depths=base_interp[:, 2], 

3468 m6s=m6s, 

3469 dl=dl, 

3470 dw=dw, 

3471 nl=self.nx, 

3472 nw=self.ny) 

3473 

3474 return ds 

3475 

3476 def calc_coef_mat(self): 

3477 ''' 

3478 Calculate coefficients connecting tractions and dislocations. 

3479 ''' 

3480 if not self.patches: 

3481 raise ValueError( 

3482 'Patches are needed. Please calculate them first.') 

3483 

3484 self.coef_mat = make_okada_coefficient_matrix( 

3485 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3486 

3487 def get_patch_attribute(self, attr): 

3488 ''' 

3489 Get patch attributes. 

3490 

3491 :param attr: 

3492 Name of selected attribute (see 

3493 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3494 :type attr: 

3495 str 

3496 

3497 :returns: 

3498 Array with attribute value for each fault patch. 

3499 :rtype: 

3500 :py:class:`~numpy.ndarray` 

3501 

3502 ''' 

3503 if not self.patches: 

3504 raise ValueError( 

3505 'Patches are needed. Please calculate them first.') 

3506 return num.array([getattr(p, attr) for p in self.patches]) 

3507 

3508 def get_slip( 

3509 self, 

3510 time=None, 

3511 scale_slip=True, 

3512 interpolation='nearest_neighbor', 

3513 **kwargs): 

3514 ''' 

3515 Get slip per subfault patch for given time after rupture start. 

3516 

3517 :param time: 

3518 Time after origin [s], for which slip is computed. If not 

3519 given, final static slip is returned. 

3520 :type time: 

3521 optional, float > 0. 

3522 

3523 :param scale_slip: 

3524 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3525 to fit the given maximum slip. 

3526 :type scale_slip: 

3527 optional, bool 

3528 

3529 :param interpolation: 

3530 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3531 and ``'multilinear'``). 

3532 :type interpolation: 

3533 optional, str 

3534 

3535 :returns: 

3536 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3537 for each source patch. 

3538 :rtype: 

3539 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3540 ''' 

3541 

3542 if self.patches is None: 

3543 raise ValueError( 

3544 'Please discretize the source first (discretize_patches())') 

3545 npatches = len(self.patches) 

3546 tractions = self.get_tractions() 

3547 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3548 

3549 time_patch = time 

3550 if time is None: 

3551 time_patch = time_patch_max 

3552 

3553 if self.coef_mat is None: 

3554 self.calc_coef_mat() 

3555 

3556 if tractions.shape != (npatches, 3): 

3557 raise AttributeError( 

3558 'The traction vector is of invalid shape.' 

3559 ' Required shape is (npatches, 3)') 

3560 

3561 patch_mask = num.ones(npatches, dtype=bool) 

3562 if self.patch_mask is not None: 

3563 patch_mask = self.patch_mask 

3564 

3565 times = self.get_patch_attribute('time') - self.time 

3566 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3567 relevant_sources = num.nonzero(times <= time_patch)[0] 

3568 disloc_est = num.zeros_like(tractions) 

3569 

3570 if self.smooth_rupture: 

3571 patch_activation = num.zeros(npatches) 

3572 

3573 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3574 self.get_vr_time_interpolators( 

3575 store, interpolation=interpolation) 

3576 

3577 # Getting the native Eikonal grid, bit hackish 

3578 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3579 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3580 times_eikonal = time_interpolator.values 

3581 

3582 time_max = time 

3583 if time is None: 

3584 time_max = times_eikonal.max() 

3585 

3586 for ip, p in enumerate(self.patches): 

3587 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3588 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3589 

3590 idx_length = num.logical_and( 

3591 points_x >= ul[0], points_x <= lr[0]) 

3592 idx_width = num.logical_and( 

3593 points_y >= ul[1], points_y <= lr[1]) 

3594 

3595 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3596 if times_patch.size == 0: 

3597 raise AttributeError('could not use smooth_rupture') 

3598 

3599 patch_activation[ip] = \ 

3600 (times_patch <= time_max).sum() / times_patch.size 

3601 

3602 if time_patch == 0 and time_patch != time_patch_max: 

3603 patch_activation[ip] = 0. 

3604 

3605 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3606 

3607 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3608 

3609 if relevant_sources.size == 0: 

3610 return disloc_est 

3611 

3612 indices_disl = num.repeat(relevant_sources * 3, 3) 

3613 indices_disl[1::3] += 1 

3614 indices_disl[2::3] += 2 

3615 

3616 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3617 stress_field=tractions[relevant_sources, :].ravel(), 

3618 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3619 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3620 epsilon=None, 

3621 **kwargs) 

3622 

3623 if self.smooth_rupture: 

3624 disloc_est *= patch_activation[:, num.newaxis] 

3625 

3626 if scale_slip and self.slip is not None: 

3627 disloc_tmax = num.zeros(npatches) 

3628 

3629 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3630 indices_disl[1::3] += 1 

3631 indices_disl[2::3] += 2 

3632 

3633 disloc_tmax[patch_mask] = num.linalg.norm( 

3634 invert_fault_dislocations_bem( 

3635 stress_field=tractions[patch_mask, :].ravel(), 

3636 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3637 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3638 epsilon=None, 

3639 **kwargs), axis=1) 

3640 

3641 disloc_tmax_max = disloc_tmax.max() 

3642 if disloc_tmax_max == 0.: 

3643 logger.warning( 

3644 'slip scaling not performed. Maximum slip is 0.') 

3645 

3646 disloc_est *= self.slip / disloc_tmax_max 

3647 

3648 return disloc_est 

3649 

3650 def get_delta_slip( 

3651 self, 

3652 store=None, 

3653 deltat=None, 

3654 delta=True, 

3655 interpolation='nearest_neighbor', 

3656 **kwargs): 

3657 ''' 

3658 Get slip change snapshots. 

3659 

3660 The time interval, within which the slip changes are computed is 

3661 determined by the sampling rate of the Green's function database or 

3662 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3663 

3664 :param store: 

3665 Green's function database (needs to cover whole region of of the 

3666 source). Its sampling interval is used as time increment for slip 

3667 difference calculation. Either ``deltat`` or ``store`` should be 

3668 given. 

3669 :type store: 

3670 optional, :py:class:`~pyrocko.gf.store.Store` 

3671 

3672 :param deltat: 

3673 Time interval for slip difference calculation [s]. Either 

3674 ``deltat`` or ``store`` should be given. 

3675 :type deltat: 

3676 optional, float 

3677 

3678 :param delta: 

3679 If ``True``, slip differences between two time steps are given. If 

3680 ``False``, cumulative slip for all time steps. 

3681 :type delta: 

3682 optional, bool 

3683 

3684 :param interpolation: 

3685 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3686 and ``'multilinear'``). 

3687 :type interpolation: 

3688 optional, str 

3689 

3690 :returns: 

3691 Displacement changes(:math:`\\Delta u_{strike}, 

3692 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3693 time; corner times, for which delta slip is computed. The order of 

3694 displacement changes array is: 

3695 

3696 .. math:: 

3697 

3698 &[[\\\\ 

3699 &[\\Delta u_{strike, patch1, t1}, 

3700 \\Delta u_{dip, patch1, t1}, 

3701 \\Delta u_{tensile, patch1, t1}],\\\\ 

3702 &[\\Delta u_{strike, patch1, t2}, 

3703 \\Delta u_{dip, patch1, t2}, 

3704 \\Delta u_{tensile, patch1, t2}]\\\\ 

3705 &], [\\\\ 

3706 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3707 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3708 

3709 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3710 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3711 ''' 

3712 if store and deltat: 

3713 raise AttributeError( 

3714 'Argument collision. ' 

3715 'Please define only the store or the deltat argument.') 

3716 

3717 if store: 

3718 deltat = store.config.deltat 

3719 

3720 if not deltat: 

3721 raise AttributeError('Please give a GF store or set deltat.') 

3722 

3723 npatches = len(self.patches) 

3724 

3725 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3726 store, interpolation=interpolation) 

3727 tmax = time_interpolator.values.max() 

3728 

3729 calc_times = num.arange(0., tmax + deltat, deltat) 

3730 calc_times[calc_times > tmax] = tmax 

3731 

3732 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3733 

3734 for itime, t in enumerate(calc_times): 

3735 disloc_est[:, itime, :] = self.get_slip( 

3736 time=t, scale_slip=False, **kwargs) 

3737 

3738 if self.slip: 

3739 disloc_tmax = num.linalg.norm( 

3740 self.get_slip(scale_slip=False, time=tmax), 

3741 axis=1) 

3742 

3743 disloc_tmax_max = disloc_tmax.max() 

3744 if disloc_tmax_max == 0.: 

3745 logger.warning( 

3746 'Slip scaling not performed. Maximum slip is 0.') 

3747 else: 

3748 disloc_est *= self.slip / disloc_tmax_max 

3749 

3750 if not delta: 

3751 return disloc_est, calc_times 

3752 

3753 # if we have only one timestep there is no gradient 

3754 if calc_times.size > 1: 

3755 disloc_init = disloc_est[:, 0, :] 

3756 disloc_est = num.diff(disloc_est, axis=1) 

3757 disloc_est = num.concatenate(( 

3758 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3759 

3760 calc_times = calc_times 

3761 

3762 return disloc_est, calc_times 

3763 

3764 def get_slip_rate(self, *args, **kwargs): 

3765 ''' 

3766 Get slip rate inverted from patches. 

3767 

3768 The time interval, within which the slip rates are computed is 

3769 determined by the sampling rate of the Green's function database or 

3770 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3771 :py:meth:`get_delta_slip`. 

3772 

3773 :returns: 

3774 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3775 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3776 for each source patch and time; corner times, for which slip rate 

3777 is computed. The order of sliprate array is: 

3778 

3779 .. math:: 

3780 

3781 &[[\\\\ 

3782 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3783 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3784 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3785 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3786 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3787 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3788 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3789 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3790 

3791 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3792 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3793 ''' 

3794 ddisloc_est, calc_times = self.get_delta_slip( 

3795 *args, delta=True, **kwargs) 

3796 

3797 dt = num.concatenate( 

3798 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3799 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3800 

3801 return slip_rate, calc_times 

3802 

3803 def get_moment_rate_patches(self, *args, **kwargs): 

3804 ''' 

3805 Get scalar seismic moment rate for each patch individually. 

3806 

3807 Additional ``*args`` and ``**kwargs`` are passed to 

3808 :py:meth:`get_slip_rate`. 

3809 

3810 :returns: 

3811 Seismic moment rate for each source patch and time; corner times, 

3812 for which patch moment rate is computed based on slip rate. The 

3813 order of the moment rate array is: 

3814 

3815 .. math:: 

3816 

3817 &[\\\\ 

3818 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3819 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3820 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3821 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3822 &[...]]\\\\ 

3823 

3824 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3825 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3826 ''' 

3827 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3828 

3829 shear_mod = self.get_patch_attribute('shearmod') 

3830 p_length = self.get_patch_attribute('length') 

3831 p_width = self.get_patch_attribute('width') 

3832 

3833 dA = p_length * p_width 

3834 

3835 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3836 

3837 return mom_rate, calc_times 

3838 

3839 def get_moment_rate(self, store, target=None, deltat=None): 

3840 ''' 

3841 Get seismic source moment rate for the total source (STF). 

3842 

3843 :param store: 

3844 Green's function database (needs to cover whole region of of the 

3845 source). Its ``deltat`` [s] is used as time increment for slip 

3846 difference calculation. Either ``deltat`` or ``store`` should be 

3847 given. 

3848 :type store: 

3849 :py:class:`~pyrocko.gf.store.Store` 

3850 

3851 :param target: 

3852 Target information, needed for interpolation method. 

3853 :type target: 

3854 optional, :py:class:`~pyrocko.gf.targets.Target` 

3855 

3856 :param deltat: 

3857 Time increment for slip difference calculation [s]. If not given 

3858 ``store.deltat`` is used. 

3859 :type deltat: 

3860 optional, float 

3861 

3862 :return: 

3863 Seismic moment rate [Nm/s] for each time; corner times, for which 

3864 moment rate is computed. The order of the moment rate array is: 

3865 

3866 .. math:: 

3867 

3868 &[\\\\ 

3869 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3870 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3871 &...]\\\\ 

3872 

3873 :rtype: 

3874 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3875 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3876 ''' 

3877 if not deltat: 

3878 deltat = store.config.deltat 

3879 return self.discretize_basesource( 

3880 store, target=target).get_moment_rate(deltat) 

3881 

3882 def get_moment(self, *args, **kwargs): 

3883 ''' 

3884 Get seismic cumulative moment. 

3885 

3886 Additional ``*args`` and ``**kwargs`` are passed to 

3887 :py:meth:`get_magnitude`. 

3888 

3889 :returns: 

3890 Cumulative seismic moment in [Nm]. 

3891 :rtype: 

3892 float 

3893 ''' 

3894 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3895 *args, **kwargs))) 

3896 

3897 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3898 ''' 

3899 Rescale source slip based on given target magnitude or seismic moment. 

3900 

3901 Rescale the maximum source slip to fit the source moment magnitude or 

3902 seismic moment to the given target values. Either ``magnitude`` or 

3903 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3904 :py:meth:`get_moment`. 

3905 

3906 :param magnitude: 

3907 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3908 [Hanks and Kanamori, 1979] 

3909 :type magnitude: 

3910 optional, float 

3911 

3912 :param moment: 

3913 Target seismic moment :math:`M_0` [Nm]. 

3914 :type moment: 

3915 optional, float 

3916 ''' 

3917 if self.slip is None: 

3918 self.slip = 1. 

3919 logger.warning('No slip found for rescaling. ' 

3920 'An initial slip of 1 m is assumed.') 

3921 

3922 if magnitude is None and moment is None: 

3923 raise ValueError( 

3924 'Either target magnitude or moment need to be given.') 

3925 

3926 moment_init = self.get_moment(**kwargs) 

3927 

3928 if magnitude is not None: 

3929 moment = pmt.magnitude_to_moment(magnitude) 

3930 

3931 self.slip *= moment / moment_init 

3932 

3933 def get_centroid(self, store, *args, **kwargs): 

3934 ''' 

3935 Centroid of the pseudo dynamic rupture model. 

3936 

3937 The centroid location and time are derived from the locations and times 

3938 of the individual patches weighted with their moment contribution. 

3939 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3940 

3941 :param store: 

3942 Green's function database (needs to cover whole region of of the 

3943 source). Its ``deltat`` [s] is used as time increment for slip 

3944 difference calculation. Either ``deltat`` or ``store`` should be 

3945 given. 

3946 :type store: 

3947 :py:class:`~pyrocko.gf.store.Store` 

3948 

3949 :returns: 

3950 The centroid location and associated moment tensor. 

3951 :rtype: 

3952 :py:class:`pyrocko.model.Event` 

3953 ''' 

3954 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3955 t_max = time.values.max() 

3956 

3957 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3958 

3959 moment = num.sum(moment_rate * times, axis=1) 

3960 weights = moment / moment.sum() 

3961 

3962 norths = self.get_patch_attribute('north_shift') 

3963 easts = self.get_patch_attribute('east_shift') 

3964 depths = self.get_patch_attribute('depth') 

3965 

3966 centroid_n = num.sum(weights * norths) 

3967 centroid_e = num.sum(weights * easts) 

3968 centroid_d = num.sum(weights * depths) 

3969 

3970 centroid_lat, centroid_lon = ne_to_latlon( 

3971 self.lat, self.lon, centroid_n, centroid_e) 

3972 

3973 moment_rate_, times = self.get_moment_rate(store) 

3974 delta_times = num.concatenate(( 

3975 [times[1] - times[0]], 

3976 num.diff(times))) 

3977 moment_src = delta_times * moment_rate 

3978 

3979 centroid_t = num.sum( 

3980 moment_src / num.sum(moment_src) * times) + self.time 

3981 

3982 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3983 

3984 return model.Event( 

3985 lat=centroid_lat, 

3986 lon=centroid_lon, 

3987 depth=centroid_d, 

3988 time=centroid_t, 

3989 moment_tensor=mt, 

3990 magnitude=mt.magnitude, 

3991 duration=t_max) 

3992 

3993 

3994class DoubleDCSource(SourceWithMagnitude): 

3995 ''' 

3996 Two double-couple point sources separated in space and time. 

3997 Moment share between the sub-sources is controlled by the 

3998 parameter mix. 

3999 The position of the subsources is dependent on the moment 

4000 distribution between the two sources. Depth, east and north 

4001 shift are given for the centroid between the two double-couples. 

4002 The subsources will positioned according to their moment shares 

4003 around this centroid position. 

4004 This is done according to their delta parameters, which are 

4005 therefore in relation to that centroid. 

4006 Note that depth of the subsources therefore can be 

4007 depth+/-delta_depth. For shallow earthquakes therefore 

4008 the depth has to be chosen deeper to avoid sampling 

4009 above surface. 

4010 ''' 

4011 

4012 strike1 = Float.T( 

4013 default=0.0, 

4014 help='strike direction in [deg], measured clockwise from north') 

4015 

4016 dip1 = Float.T( 

4017 default=90.0, 

4018 help='dip angle in [deg], measured downward from horizontal') 

4019 

4020 azimuth = Float.T( 

4021 default=0.0, 

4022 help='azimuth to second double-couple [deg], ' 

4023 'measured at first, clockwise from north') 

4024 

4025 rake1 = Float.T( 

4026 default=0.0, 

4027 help='rake angle in [deg], ' 

4028 'measured counter-clockwise from right-horizontal ' 

4029 'in on-plane view') 

4030 

4031 strike2 = Float.T( 

4032 default=0.0, 

4033 help='strike direction in [deg], measured clockwise from north') 

4034 

4035 dip2 = Float.T( 

4036 default=90.0, 

4037 help='dip angle in [deg], measured downward from horizontal') 

4038 

4039 rake2 = Float.T( 

4040 default=0.0, 

4041 help='rake angle in [deg], ' 

4042 'measured counter-clockwise from right-horizontal ' 

4043 'in on-plane view') 

4044 

4045 delta_time = Float.T( 

4046 default=0.0, 

4047 help='separation of double-couples in time (t2-t1) [s]') 

4048 

4049 delta_depth = Float.T( 

4050 default=0.0, 

4051 help='difference in depth (z2-z1) [m]') 

4052 

4053 distance = Float.T( 

4054 default=0.0, 

4055 help='distance between the two double-couples [m]') 

4056 

4057 mix = Float.T( 

4058 default=0.5, 

4059 help='how to distribute the moment to the two doublecouples ' 

4060 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

4061 

4062 stf1 = STF.T( 

4063 optional=True, 

4064 help='Source time function of subsource 1 ' 

4065 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4066 

4067 stf2 = STF.T( 

4068 optional=True, 

4069 help='Source time function of subsource 2 ' 

4070 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4071 

4072 discretized_source_class = meta.DiscretizedMTSource 

4073 

4074 def base_key(self): 

4075 return ( 

4076 self.time, self.depth, self.lat, self.north_shift, 

4077 self.lon, self.east_shift, type(self).__name__) + \ 

4078 self.effective_stf1_pre().base_key() + \ 

4079 self.effective_stf2_pre().base_key() + ( 

4080 self.strike1, self.dip1, self.rake1, 

4081 self.strike2, self.dip2, self.rake2, 

4082 self.delta_time, self.delta_depth, 

4083 self.azimuth, self.distance, self.mix) 

4084 

4085 def get_factor(self): 

4086 return self.moment 

4087 

4088 def effective_stf1_pre(self): 

4089 return self.stf1 or self.stf or g_unit_pulse 

4090 

4091 def effective_stf2_pre(self): 

4092 return self.stf2 or self.stf or g_unit_pulse 

4093 

4094 def effective_stf_post(self): 

4095 return g_unit_pulse 

4096 

4097 def split(self): 

4098 a1 = 1.0 - self.mix 

4099 a2 = self.mix 

4100 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4101 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4102 

4103 dc1 = DCSource( 

4104 lat=self.lat, 

4105 lon=self.lon, 

4106 time=self.time - self.delta_time * a2, 

4107 north_shift=self.north_shift - delta_north * a2, 

4108 east_shift=self.east_shift - delta_east * a2, 

4109 depth=self.depth - self.delta_depth * a2, 

4110 moment=self.moment * a1, 

4111 strike=self.strike1, 

4112 dip=self.dip1, 

4113 rake=self.rake1, 

4114 stf=self.stf1 or self.stf) 

4115 

4116 dc2 = DCSource( 

4117 lat=self.lat, 

4118 lon=self.lon, 

4119 time=self.time + self.delta_time * a1, 

4120 north_shift=self.north_shift + delta_north * a1, 

4121 east_shift=self.east_shift + delta_east * a1, 

4122 depth=self.depth + self.delta_depth * a1, 

4123 moment=self.moment * a2, 

4124 strike=self.strike2, 

4125 dip=self.dip2, 

4126 rake=self.rake2, 

4127 stf=self.stf2 or self.stf) 

4128 

4129 return [dc1, dc2] 

4130 

4131 def discretize_basesource(self, store, target=None): 

4132 a1 = 1.0 - self.mix 

4133 a2 = self.mix 

4134 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4135 rake=self.rake1, scalar_moment=a1) 

4136 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4137 rake=self.rake2, scalar_moment=a2) 

4138 

4139 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4140 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4141 

4142 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4143 store.config.deltat, self.time - self.delta_time * a2) 

4144 

4145 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4146 store.config.deltat, self.time + self.delta_time * a1) 

4147 

4148 nt1 = times1.size 

4149 nt2 = times2.size 

4150 

4151 ds = meta.DiscretizedMTSource( 

4152 lat=self.lat, 

4153 lon=self.lon, 

4154 times=num.concatenate((times1, times2)), 

4155 north_shifts=num.concatenate(( 

4156 num.repeat(self.north_shift - delta_north * a2, nt1), 

4157 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4158 east_shifts=num.concatenate(( 

4159 num.repeat(self.east_shift - delta_east * a2, nt1), 

4160 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4161 depths=num.concatenate(( 

4162 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4163 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4164 m6s=num.vstack(( 

4165 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4166 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4167 

4168 return ds 

4169 

4170 def pyrocko_moment_tensor(self, store=None, target=None): 

4171 a1 = 1.0 - self.mix 

4172 a2 = self.mix 

4173 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4174 rake=self.rake1, 

4175 scalar_moment=a1 * self.moment) 

4176 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4177 rake=self.rake2, 

4178 scalar_moment=a2 * self.moment) 

4179 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4180 

4181 def pyrocko_event(self, store=None, target=None, **kwargs): 

4182 return SourceWithMagnitude.pyrocko_event( 

4183 self, store, target, 

4184 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4185 **kwargs) 

4186 

4187 @classmethod 

4188 def from_pyrocko_event(cls, ev, **kwargs): 

4189 d = {} 

4190 mt = ev.moment_tensor 

4191 if mt: 

4192 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4193 d.update( 

4194 strike1=float(strike), 

4195 dip1=float(dip), 

4196 rake1=float(rake), 

4197 strike2=float(strike), 

4198 dip2=float(dip), 

4199 rake2=float(rake), 

4200 mix=0.0, 

4201 magnitude=float(mt.moment_magnitude())) 

4202 

4203 d.update(kwargs) 

4204 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4205 source.stf1 = source.stf 

4206 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4207 source.stf = None 

4208 return source 

4209 

4210 

4211class RingfaultSource(SourceWithMagnitude): 

4212 ''' 

4213 A ring fault with vertical doublecouples. 

4214 ''' 

4215 

4216 diameter = Float.T( 

4217 default=1.0, 

4218 help='diameter of the ring in [m]') 

4219 

4220 sign = Float.T( 

4221 default=1.0, 

4222 help='inside of the ring moves up (+1) or down (-1)') 

4223 

4224 strike = Float.T( 

4225 default=0.0, 

4226 help='strike direction of the ring plane, clockwise from north,' 

4227 ' in [deg]') 

4228 

4229 dip = Float.T( 

4230 default=0.0, 

4231 help='dip angle of the ring plane from horizontal in [deg]') 

4232 

4233 npointsources = Int.T( 

4234 default=360, 

4235 help='number of point sources to use') 

4236 

4237 discretized_source_class = meta.DiscretizedMTSource 

4238 

4239 def base_key(self): 

4240 return Source.base_key(self) + ( 

4241 self.strike, self.dip, self.diameter, self.npointsources) 

4242 

4243 def get_factor(self): 

4244 return self.sign * self.moment 

4245 

4246 def discretize_basesource(self, store=None, target=None): 

4247 n = self.npointsources 

4248 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4249 

4250 points = num.zeros((n, 3)) 

4251 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4252 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4253 

4254 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4255 points = num.dot(rotmat.T, points.T).T # !!! ? 

4256 

4257 points[:, 0] += self.north_shift 

4258 points[:, 1] += self.east_shift 

4259 points[:, 2] += self.depth 

4260 

4261 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4262 scalar_moment=1.0 / n).m()) 

4263 

4264 rotmats = num.transpose( 

4265 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4266 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4267 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4268 

4269 ms = num.zeros((n, 3, 3)) 

4270 for i in range(n): 

4271 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4272 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4273 

4274 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4275 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4276 

4277 times, amplitudes = self.effective_stf_pre().discretize_t( 

4278 store.config.deltat, self.time) 

4279 

4280 nt = times.size 

4281 

4282 return meta.DiscretizedMTSource( 

4283 times=num.tile(times, n), 

4284 lat=self.lat, 

4285 lon=self.lon, 

4286 north_shifts=num.repeat(points[:, 0], nt), 

4287 east_shifts=num.repeat(points[:, 1], nt), 

4288 depths=num.repeat(points[:, 2], nt), 

4289 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4290 amplitudes, n)[:, num.newaxis]) 

4291 

4292 

4293class CombiSource(Source): 

4294 ''' 

4295 Composite source model. 

4296 ''' 

4297 

4298 discretized_source_class = meta.DiscretizedMTSource 

4299 

4300 subsources = List.T(Source.T()) 

4301 

4302 def __init__(self, subsources=[], **kwargs): 

4303 if not subsources: 

4304 raise BadRequest( 

4305 'Need at least one sub-source to create a CombiSource object.') 

4306 

4307 lats = num.array( 

4308 [subsource.lat for subsource in subsources], dtype=float) 

4309 lons = num.array( 

4310 [subsource.lon for subsource in subsources], dtype=float) 

4311 

4312 lat, lon = lats[0], lons[0] 

4313 if not num.all(lats == lat) and num.all(lons == lon): 

4314 subsources = [s.clone() for s in subsources] 

4315 for subsource in subsources[1:]: 

4316 subsource.set_origin(lat, lon) 

4317 

4318 depth = float(num.mean([p.depth for p in subsources])) 

4319 time = float(num.mean([p.time for p in subsources])) 

4320 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4321 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4322 kwargs.update( 

4323 time=time, 

4324 lat=float(lat), 

4325 lon=float(lon), 

4326 north_shift=north_shift, 

4327 east_shift=east_shift, 

4328 depth=depth) 

4329 

4330 Source.__init__(self, subsources=subsources, **kwargs) 

4331 

4332 def get_factor(self): 

4333 return 1.0 

4334 

4335 def discretize_basesource(self, store, target=None): 

4336 dsources = [] 

4337 for sf in self.subsources: 

4338 ds = sf.discretize_basesource(store, target) 

4339 ds.m6s *= sf.get_factor() 

4340 dsources.append(ds) 

4341 

4342 return meta.DiscretizedMTSource.combine(dsources) 

4343 

4344 

4345class SFSource(Source): 

4346 ''' 

4347 A single force point source. 

4348 

4349 Supported GF schemes: `'elastic5'`. 

4350 ''' 

4351 

4352 discretized_source_class = meta.DiscretizedSFSource 

4353 

4354 fn = Float.T( 

4355 default=0., 

4356 help='northward component of single force [N]') 

4357 

4358 fe = Float.T( 

4359 default=0., 

4360 help='eastward component of single force [N]') 

4361 

4362 fd = Float.T( 

4363 default=0., 

4364 help='downward component of single force [N]') 

4365 

4366 def __init__(self, **kwargs): 

4367 Source.__init__(self, **kwargs) 

4368 

4369 def base_key(self): 

4370 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4371 

4372 def get_factor(self): 

4373 return 1.0 

4374 

4375 def discretize_basesource(self, store, target=None): 

4376 times, amplitudes = self.effective_stf_pre().discretize_t( 

4377 store.config.deltat, self.time) 

4378 forces = amplitudes[:, num.newaxis] * num.array( 

4379 [[self.fn, self.fe, self.fd]], dtype=float) 

4380 

4381 return meta.DiscretizedSFSource(forces=forces, 

4382 **self._dparams_base_repeated(times)) 

4383 

4384 def pyrocko_event(self, store=None, target=None, **kwargs): 

4385 return Source.pyrocko_event( 

4386 self, store, target, 

4387 **kwargs) 

4388 

4389 @classmethod 

4390 def from_pyrocko_event(cls, ev, **kwargs): 

4391 d = {} 

4392 d.update(kwargs) 

4393 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4394 

4395 

4396class PorePressurePointSource(Source): 

4397 ''' 

4398 Excess pore pressure point source. 

4399 

4400 For poro-elastic initial value problem where an excess pore pressure is 

4401 brought into a small source volume. 

4402 ''' 

4403 

4404 discretized_source_class = meta.DiscretizedPorePressureSource 

4405 

4406 pp = Float.T( 

4407 default=1.0, 

4408 help='initial excess pore pressure in [Pa]') 

4409 

4410 def base_key(self): 

4411 return Source.base_key(self) 

4412 

4413 def get_factor(self): 

4414 return self.pp 

4415 

4416 def discretize_basesource(self, store, target=None): 

4417 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4418 **self._dparams_base()) 

4419 

4420 

4421class PorePressureLineSource(Source): 

4422 ''' 

4423 Excess pore pressure line source. 

4424 

4425 The line source is centered at (north_shift, east_shift, depth). 

4426 ''' 

4427 

4428 discretized_source_class = meta.DiscretizedPorePressureSource 

4429 

4430 pp = Float.T( 

4431 default=1.0, 

4432 help='initial excess pore pressure in [Pa]') 

4433 

4434 length = Float.T( 

4435 default=0.0, 

4436 help='length of the line source [m]') 

4437 

4438 azimuth = Float.T( 

4439 default=0.0, 

4440 help='azimuth direction, clockwise from north [deg]') 

4441 

4442 dip = Float.T( 

4443 default=90., 

4444 help='dip direction, downward from horizontal [deg]') 

4445 

4446 def base_key(self): 

4447 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4448 

4449 def get_factor(self): 

4450 return self.pp 

4451 

4452 def discretize_basesource(self, store, target=None): 

4453 

4454 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4455 

4456 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4457 

4458 sa = math.sin(self.azimuth * d2r) 

4459 ca = math.cos(self.azimuth * d2r) 

4460 sd = math.sin(self.dip * d2r) 

4461 cd = math.cos(self.dip * d2r) 

4462 

4463 points = num.zeros((n, 3)) 

4464 points[:, 0] = self.north_shift + a * ca * cd 

4465 points[:, 1] = self.east_shift + a * sa * cd 

4466 points[:, 2] = self.depth + a * sd 

4467 

4468 return meta.DiscretizedPorePressureSource( 

4469 times=util.num_full(n, self.time), 

4470 lat=self.lat, 

4471 lon=self.lon, 

4472 north_shifts=points[:, 0], 

4473 east_shifts=points[:, 1], 

4474 depths=points[:, 2], 

4475 pp=num.ones(n) / n) 

4476 

4477 

4478class Request(Object): 

4479 ''' 

4480 Synthetic seismogram computation request. 

4481 

4482 :: 

4483 

4484 Request(**kwargs) 

4485 Request(sources, targets, **kwargs) 

4486 ''' 

4487 

4488 sources = List.T( 

4489 Source.T(), 

4490 help='list of sources for which to produce synthetics.') 

4491 

4492 targets = List.T( 

4493 Target.T(), 

4494 help='list of targets for which to produce synthetics.') 

4495 

4496 @classmethod 

4497 def args2kwargs(cls, args): 

4498 if len(args) not in (0, 2, 3): 

4499 raise BadRequest('Invalid arguments.') 

4500 

4501 if len(args) == 2: 

4502 return dict(sources=args[0], targets=args[1]) 

4503 else: 

4504 return {} 

4505 

4506 def __init__(self, *args, **kwargs): 

4507 kwargs.update(self.args2kwargs(args)) 

4508 sources = kwargs.pop('sources', []) 

4509 targets = kwargs.pop('targets', []) 

4510 

4511 if isinstance(sources, Source): 

4512 sources = [sources] 

4513 

4514 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4515 targets = [targets] 

4516 

4517 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4518 

4519 @property 

4520 def targets_dynamic(self): 

4521 return [t for t in self.targets if isinstance(t, Target)] 

4522 

4523 @property 

4524 def targets_static(self): 

4525 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4526 

4527 @property 

4528 def has_dynamic(self): 

4529 return True if len(self.targets_dynamic) > 0 else False 

4530 

4531 @property 

4532 def has_statics(self): 

4533 return True if len(self.targets_static) > 0 else False 

4534 

4535 def subsources_map(self): 

4536 m = defaultdict(list) 

4537 for source in self.sources: 

4538 m[source.base_key()].append(source) 

4539 

4540 return m 

4541 

4542 def subtargets_map(self): 

4543 m = defaultdict(list) 

4544 for target in self.targets: 

4545 m[target.base_key()].append(target) 

4546 

4547 return m 

4548 

4549 def subrequest_map(self): 

4550 ms = self.subsources_map() 

4551 mt = self.subtargets_map() 

4552 m = {} 

4553 for (ks, ls) in ms.items(): 

4554 for (kt, lt) in mt.items(): 

4555 m[ks, kt] = (ls, lt) 

4556 

4557 return m 

4558 

4559 

4560class ProcessingStats(Object): 

4561 t_perc_get_store_and_receiver = Float.T(default=0.) 

4562 t_perc_discretize_source = Float.T(default=0.) 

4563 t_perc_make_base_seismogram = Float.T(default=0.) 

4564 t_perc_make_same_span = Float.T(default=0.) 

4565 t_perc_post_process = Float.T(default=0.) 

4566 t_perc_optimize = Float.T(default=0.) 

4567 t_perc_stack = Float.T(default=0.) 

4568 t_perc_static_get_store = Float.T(default=0.) 

4569 t_perc_static_discretize_basesource = Float.T(default=0.) 

4570 t_perc_static_sum_statics = Float.T(default=0.) 

4571 t_perc_static_post_process = Float.T(default=0.) 

4572 t_wallclock = Float.T(default=0.) 

4573 t_cpu = Float.T(default=0.) 

4574 n_read_blocks = Int.T(default=0) 

4575 n_results = Int.T(default=0) 

4576 n_subrequests = Int.T(default=0) 

4577 n_stores = Int.T(default=0) 

4578 n_records_stacked = Int.T(default=0) 

4579 

4580 

4581class Response(Object): 

4582 ''' 

4583 Resonse object to a synthetic seismogram computation request. 

4584 ''' 

4585 

4586 request = Request.T() 

4587 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4588 stats = ProcessingStats.T() 

4589 

4590 def pyrocko_traces(self): 

4591 ''' 

4592 Return a list of requested 

4593 :class:`~pyrocko.trace.Trace` instances. 

4594 ''' 

4595 

4596 traces = [] 

4597 for results in self.results_list: 

4598 for result in results: 

4599 if not isinstance(result, meta.Result): 

4600 continue 

4601 traces.append(result.trace.pyrocko_trace()) 

4602 

4603 return traces 

4604 

4605 def kite_scenes(self): 

4606 ''' 

4607 Return a list of requested 

4608 :class:`~kite.scenes` instances. 

4609 ''' 

4610 kite_scenes = [] 

4611 for results in self.results_list: 

4612 for result in results: 

4613 if isinstance(result, meta.KiteSceneResult): 

4614 sc = result.get_scene() 

4615 kite_scenes.append(sc) 

4616 

4617 return kite_scenes 

4618 

4619 def static_results(self): 

4620 ''' 

4621 Return a list of requested 

4622 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4623 ''' 

4624 statics = [] 

4625 for results in self.results_list: 

4626 for result in results: 

4627 if not isinstance(result, meta.StaticResult): 

4628 continue 

4629 statics.append(result) 

4630 

4631 return statics 

4632 

4633 def iter_results(self, get='pyrocko_traces'): 

4634 ''' 

4635 Generator function to iterate over results of request. 

4636 

4637 Yields associated :py:class:`Source`, 

4638 :class:`~pyrocko.gf.targets.Target`, 

4639 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4640 ''' 

4641 

4642 for isource, source in enumerate(self.request.sources): 

4643 for itarget, target in enumerate(self.request.targets): 

4644 result = self.results_list[isource][itarget] 

4645 if get == 'pyrocko_traces': 

4646 yield source, target, result.trace.pyrocko_trace() 

4647 elif get == 'results': 

4648 yield source, target, result 

4649 

4650 def snuffle(self, **kwargs): 

4651 ''' 

4652 Open *snuffler* with requested traces. 

4653 ''' 

4654 

4655 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4656 

4657 

4658class Engine(Object): 

4659 ''' 

4660 Base class for synthetic seismogram calculators. 

4661 ''' 

4662 

4663 def get_store_ids(self): 

4664 ''' 

4665 Get list of available GF store IDs 

4666 ''' 

4667 

4668 return [] 

4669 

4670 

4671class Rule(object): 

4672 pass 

4673 

4674 

4675class VectorRule(Rule): 

4676 

4677 def __init__(self, quantity, differentiate=0, integrate=0): 

4678 self.components = [quantity + '.' + c for c in 'ned'] 

4679 self.differentiate = differentiate 

4680 self.integrate = integrate 

4681 

4682 def required_components(self, target): 

4683 n, e, d = self.components 

4684 sa, ca, sd, cd = target.get_sin_cos_factors() 

4685 

4686 comps = [] 

4687 if nonzero(ca * cd): 

4688 comps.append(n) 

4689 

4690 if nonzero(sa * cd): 

4691 comps.append(e) 

4692 

4693 if nonzero(sd): 

4694 comps.append(d) 

4695 

4696 return tuple(comps) 

4697 

4698 def apply_(self, target, base_seismogram): 

4699 n, e, d = self.components 

4700 sa, ca, sd, cd = target.get_sin_cos_factors() 

4701 

4702 if nonzero(ca * cd): 

4703 data = base_seismogram[n].data * (ca * cd) 

4704 deltat = base_seismogram[n].deltat 

4705 else: 

4706 data = 0.0 

4707 

4708 if nonzero(sa * cd): 

4709 data = data + base_seismogram[e].data * (sa * cd) 

4710 deltat = base_seismogram[e].deltat 

4711 

4712 if nonzero(sd): 

4713 data = data + base_seismogram[d].data * sd 

4714 deltat = base_seismogram[d].deltat 

4715 

4716 if self.differentiate: 

4717 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4718 

4719 if self.integrate: 

4720 raise NotImplementedError('Integration is not implemented yet.') 

4721 

4722 return data 

4723 

4724 

4725class HorizontalVectorRule(Rule): 

4726 

4727 def __init__(self, quantity, differentiate=0, integrate=0): 

4728 self.components = [quantity + '.' + c for c in 'ne'] 

4729 self.differentiate = differentiate 

4730 self.integrate = integrate 

4731 

4732 def required_components(self, target): 

4733 n, e = self.components 

4734 sa, ca, _, _ = target.get_sin_cos_factors() 

4735 

4736 comps = [] 

4737 if nonzero(ca): 

4738 comps.append(n) 

4739 

4740 if nonzero(sa): 

4741 comps.append(e) 

4742 

4743 return tuple(comps) 

4744 

4745 def apply_(self, target, base_seismogram): 

4746 n, e = self.components 

4747 sa, ca, _, _ = target.get_sin_cos_factors() 

4748 

4749 if nonzero(ca): 

4750 data = base_seismogram[n].data * ca 

4751 else: 

4752 data = 0.0 

4753 

4754 if nonzero(sa): 

4755 data = data + base_seismogram[e].data * sa 

4756 

4757 if self.differentiate: 

4758 deltat = base_seismogram[e].deltat 

4759 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4760 

4761 if self.integrate: 

4762 raise NotImplementedError('Integration is not implemented yet.') 

4763 

4764 return data 

4765 

4766 

4767class ScalarRule(Rule): 

4768 

4769 def __init__(self, quantity, differentiate=0): 

4770 self.c = quantity 

4771 

4772 def required_components(self, target): 

4773 return (self.c, ) 

4774 

4775 def apply_(self, target, base_seismogram): 

4776 data = base_seismogram[self.c].data.copy() 

4777 deltat = base_seismogram[self.c].deltat 

4778 if self.differentiate: 

4779 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4780 

4781 return data 

4782 

4783 

4784class StaticDisplacement(Rule): 

4785 

4786 def required_components(self, target): 

4787 return tuple(['displacement.%s' % c for c in list('ned')]) 

4788 

4789 def apply_(self, target, base_statics): 

4790 if isinstance(target, SatelliteTarget): 

4791 los_fac = target.get_los_factors() 

4792 base_statics['displacement.los'] =\ 

4793 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4794 los_fac[:, 1] * base_statics['displacement.e'] + 

4795 los_fac[:, 2] * base_statics['displacement.n']) 

4796 return base_statics 

4797 

4798 

4799channel_rules = { 

4800 'displacement': [VectorRule('displacement')], 

4801 'rotation': [VectorRule('rotation')], 

4802 'velocity': [ 

4803 VectorRule('velocity'), 

4804 VectorRule('displacement', differentiate=1)], 

4805 'acceleration': [ 

4806 VectorRule('acceleration'), 

4807 VectorRule('velocity', differentiate=1), 

4808 VectorRule('displacement', differentiate=2)], 

4809 'pore_pressure': [ScalarRule('pore_pressure')], 

4810 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4811 'darcy_velocity': [VectorRule('darcy_velocity')], 

4812} 

4813 

4814static_rules = { 

4815 'displacement': [StaticDisplacement()] 

4816} 

4817 

4818 

4819class OutOfBoundsContext(Object): 

4820 source = Source.T() 

4821 target = Target.T() 

4822 distance = Float.T() 

4823 components = List.T(String.T()) 

4824 

4825 

4826def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4827 dsource_cache = {} 

4828 tcounters = list(range(6)) 

4829 

4830 store_ids = set() 

4831 sources = set() 

4832 targets = set() 

4833 

4834 for itarget, target in enumerate(ptargets): 

4835 target._id = itarget 

4836 

4837 for w in work: 

4838 _, _, isources, itargets = w 

4839 

4840 sources.update([psources[isource] for isource in isources]) 

4841 targets.update([ptargets[itarget] for itarget in itargets]) 

4842 

4843 store_ids = set([t.store_id for t in targets]) 

4844 

4845 for isource, source in enumerate(psources): 

4846 

4847 components = set() 

4848 for itarget, target in enumerate(targets): 

4849 rule = engine.get_rule(source, target) 

4850 components.update(rule.required_components(target)) 

4851 

4852 for store_id in store_ids: 

4853 store_targets = [t for t in targets if t.store_id == store_id] 

4854 

4855 sample_rates = set([t.sample_rate for t in store_targets]) 

4856 interpolations = set([t.interpolation for t in store_targets]) 

4857 

4858 base_seismograms = [] 

4859 store_targets_out = [] 

4860 

4861 for samp_rate in sample_rates: 

4862 for interp in interpolations: 

4863 engine_targets = [ 

4864 t for t in store_targets if t.sample_rate == samp_rate 

4865 and t.interpolation == interp] 

4866 

4867 if not engine_targets: 

4868 continue 

4869 

4870 store_targets_out += engine_targets 

4871 

4872 base_seismograms += engine.base_seismograms( 

4873 source, 

4874 engine_targets, 

4875 components, 

4876 dsource_cache, 

4877 nthreads) 

4878 

4879 for iseis, seismogram in enumerate(base_seismograms): 

4880 for tr in seismogram.values(): 

4881 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4882 e = SeismosizerError( 

4883 'Seismosizer failed with return code %i\n%s' % ( 

4884 tr.err, str( 

4885 OutOfBoundsContext( 

4886 source=source, 

4887 target=store_targets[iseis], 

4888 distance=source.distance_to( 

4889 store_targets[iseis]), 

4890 components=components)))) 

4891 raise e 

4892 

4893 for seismogram, target in zip(base_seismograms, store_targets_out): 

4894 

4895 try: 

4896 result = engine._post_process_dynamic( 

4897 seismogram, source, target) 

4898 except SeismosizerError as e: 

4899 result = e 

4900 

4901 yield (isource, target._id, result), tcounters 

4902 

4903 

4904def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4905 dsource_cache = {} 

4906 

4907 for w in work: 

4908 _, _, isources, itargets = w 

4909 

4910 sources = [psources[isource] for isource in isources] 

4911 targets = [ptargets[itarget] for itarget in itargets] 

4912 

4913 components = set() 

4914 for target in targets: 

4915 rule = engine.get_rule(sources[0], target) 

4916 components.update(rule.required_components(target)) 

4917 

4918 for isource, source in zip(isources, sources): 

4919 for itarget, target in zip(itargets, targets): 

4920 

4921 try: 

4922 base_seismogram, tcounters = engine.base_seismogram( 

4923 source, target, components, dsource_cache, nthreads) 

4924 except meta.OutOfBounds as e: 

4925 e.context = OutOfBoundsContext( 

4926 source=sources[0], 

4927 target=targets[0], 

4928 distance=sources[0].distance_to(targets[0]), 

4929 components=components) 

4930 raise 

4931 

4932 n_records_stacked = 0 

4933 t_optimize = 0.0 

4934 t_stack = 0.0 

4935 

4936 for _, tr in base_seismogram.items(): 

4937 n_records_stacked += tr.n_records_stacked 

4938 t_optimize += tr.t_optimize 

4939 t_stack += tr.t_stack 

4940 

4941 try: 

4942 result = engine._post_process_dynamic( 

4943 base_seismogram, source, target) 

4944 result.n_records_stacked = n_records_stacked 

4945 result.n_shared_stacking = len(sources) *\ 

4946 len(targets) 

4947 result.t_optimize = t_optimize 

4948 result.t_stack = t_stack 

4949 except SeismosizerError as e: 

4950 result = e 

4951 

4952 tcounters.append(xtime()) 

4953 yield (isource, itarget, result), tcounters 

4954 

4955 

4956def process_static(work, psources, ptargets, engine, nthreads=0): 

4957 for w in work: 

4958 _, _, isources, itargets = w 

4959 

4960 sources = [psources[isource] for isource in isources] 

4961 targets = [ptargets[itarget] for itarget in itargets] 

4962 

4963 for isource, source in zip(isources, sources): 

4964 for itarget, target in zip(itargets, targets): 

4965 components = engine.get_rule(source, target)\ 

4966 .required_components(target) 

4967 

4968 try: 

4969 base_statics, tcounters = engine.base_statics( 

4970 source, target, components, nthreads) 

4971 except meta.OutOfBounds as e: 

4972 e.context = OutOfBoundsContext( 

4973 source=sources[0], 

4974 target=targets[0], 

4975 distance=float('nan'), 

4976 components=components) 

4977 raise 

4978 result = engine._post_process_statics( 

4979 base_statics, source, target) 

4980 tcounters.append(xtime()) 

4981 

4982 yield (isource, itarget, result), tcounters 

4983 

4984 

4985class LocalEngine(Engine): 

4986 ''' 

4987 Offline synthetic seismogram calculator. 

4988 

4989 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4990 :py:attr:`store_dirs` with paths set in environment variables 

4991 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4992 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4993 :py:attr:`store_dirs` with paths set in the user's config file. 

4994 

4995 The config file can be found at :file:`~/.pyrocko/config.pf` 

4996 

4997 .. code-block :: python 

4998 

4999 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

5000 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

5001 ''' 

5002 

5003 store_superdirs = List.T( 

5004 String.T(), 

5005 help='directories which are searched for Green\'s function stores') 

5006 

5007 store_dirs = List.T( 

5008 String.T(), 

5009 help='additional individual Green\'s function store directories') 

5010 

5011 default_store_id = String.T( 

5012 optional=True, 

5013 help='default store ID to be used when a request does not provide ' 

5014 'one') 

5015 

5016 def __init__(self, **kwargs): 

5017 use_env = kwargs.pop('use_env', False) 

5018 use_config = kwargs.pop('use_config', False) 

5019 Engine.__init__(self, **kwargs) 

5020 if use_env: 

5021 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

5022 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

5023 if env_store_superdirs: 

5024 self.store_superdirs.extend(env_store_superdirs.split(':')) 

5025 

5026 if env_store_dirs: 

5027 self.store_dirs.extend(env_store_dirs.split(':')) 

5028 

5029 if use_config: 

5030 c = config.config() 

5031 self.store_superdirs.extend(c.gf_store_superdirs) 

5032 self.store_dirs.extend(c.gf_store_dirs) 

5033 

5034 self._check_store_dirs_type() 

5035 self._id_to_store_dir = {} 

5036 self._open_stores = {} 

5037 self._effective_default_store_id = None 

5038 

5039 def _check_store_dirs_type(self): 

5040 for sdir in ['store_dirs', 'store_superdirs']: 

5041 if not isinstance(self.__getattribute__(sdir), list): 

5042 raise TypeError("{} of {} is not of type list".format( 

5043 sdir, self.__class__.__name__)) 

5044 

5045 def _get_store_id(self, store_dir): 

5046 store_ = store.Store(store_dir) 

5047 store_id = store_.config.id 

5048 store_.close() 

5049 return store_id 

5050 

5051 def _looks_like_store_dir(self, store_dir): 

5052 return os.path.isdir(store_dir) and \ 

5053 all(os.path.isfile(pjoin(store_dir, x)) for x in 

5054 ('index', 'traces', 'config')) 

5055 

5056 def iter_store_dirs(self): 

5057 store_dirs = set() 

5058 for d in self.store_superdirs: 

5059 if not os.path.exists(d): 

5060 logger.warning('store_superdir not available: %s' % d) 

5061 continue 

5062 

5063 for entry in os.listdir(d): 

5064 store_dir = os.path.realpath(pjoin(d, entry)) 

5065 if self._looks_like_store_dir(store_dir): 

5066 store_dirs.add(store_dir) 

5067 

5068 for store_dir in self.store_dirs: 

5069 store_dirs.add(os.path.realpath(store_dir)) 

5070 

5071 return store_dirs 

5072 

5073 def _scan_stores(self): 

5074 for store_dir in self.iter_store_dirs(): 

5075 store_id = self._get_store_id(store_dir) 

5076 if store_id not in self._id_to_store_dir: 

5077 self._id_to_store_dir[store_id] = store_dir 

5078 else: 

5079 if store_dir != self._id_to_store_dir[store_id]: 

5080 raise DuplicateStoreId( 

5081 'GF store ID %s is used in (at least) two ' 

5082 'different stores. Locations are: %s and %s' % 

5083 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5084 

5085 def get_store_dir(self, store_id): 

5086 ''' 

5087 Lookup directory given a GF store ID. 

5088 ''' 

5089 

5090 if store_id not in self._id_to_store_dir: 

5091 self._scan_stores() 

5092 

5093 if store_id not in self._id_to_store_dir: 

5094 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5095 

5096 return self._id_to_store_dir[store_id] 

5097 

5098 def get_store_ids(self): 

5099 ''' 

5100 Get list of available store IDs. 

5101 ''' 

5102 

5103 self._scan_stores() 

5104 return sorted(self._id_to_store_dir.keys()) 

5105 

5106 def effective_default_store_id(self): 

5107 if self._effective_default_store_id is None: 

5108 if self.default_store_id is None: 

5109 store_ids = self.get_store_ids() 

5110 if len(store_ids) == 1: 

5111 self._effective_default_store_id = self.get_store_ids()[0] 

5112 else: 

5113 raise NoDefaultStoreSet() 

5114 else: 

5115 self._effective_default_store_id = self.default_store_id 

5116 

5117 return self._effective_default_store_id 

5118 

5119 def get_store(self, store_id=None): 

5120 ''' 

5121 Get a store from the engine. 

5122 

5123 :param store_id: identifier of the store (optional) 

5124 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5125 

5126 If no ``store_id`` is provided the store 

5127 associated with the :py:gattr:`default_store_id` is returned. 

5128 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5129 undefined. 

5130 ''' 

5131 

5132 if store_id is None: 

5133 store_id = self.effective_default_store_id() 

5134 

5135 if store_id not in self._open_stores: 

5136 store_dir = self.get_store_dir(store_id) 

5137 self._open_stores[store_id] = store.Store(store_dir) 

5138 

5139 return self._open_stores[store_id] 

5140 

5141 def get_store_config(self, store_id): 

5142 store = self.get_store(store_id) 

5143 return store.config 

5144 

5145 def get_store_extra(self, store_id, key): 

5146 store = self.get_store(store_id) 

5147 return store.get_extra(key) 

5148 

5149 def close_cashed_stores(self): 

5150 ''' 

5151 Close and remove ids from cashed stores. 

5152 ''' 

5153 store_ids = [] 

5154 for store_id, store_ in self._open_stores.items(): 

5155 store_.close() 

5156 store_ids.append(store_id) 

5157 

5158 for store_id in store_ids: 

5159 self._open_stores.pop(store_id) 

5160 

5161 def get_rule(self, source, target): 

5162 cprovided = self.get_store(target.store_id).get_provided_components() 

5163 

5164 if isinstance(target, StaticTarget): 

5165 quantity = target.quantity 

5166 available_rules = static_rules 

5167 elif isinstance(target, Target): 

5168 quantity = target.effective_quantity() 

5169 available_rules = channel_rules 

5170 

5171 try: 

5172 for rule in available_rules[quantity]: 

5173 cneeded = rule.required_components(target) 

5174 if all(c in cprovided for c in cneeded): 

5175 return rule 

5176 

5177 except KeyError: 

5178 pass 

5179 

5180 raise BadRequest( 

5181 'No rule to calculate "%s" with GFs from store "%s" ' 

5182 'for source model "%s".' % ( 

5183 target.effective_quantity(), 

5184 target.store_id, 

5185 source.__class__.__name__)) 

5186 

5187 def _cached_discretize_basesource(self, source, store, cache, target): 

5188 if (source, store) not in cache: 

5189 cache[source, store] = source.discretize_basesource(store, target) 

5190 

5191 return cache[source, store] 

5192 

5193 def base_seismograms(self, source, targets, components, dsource_cache, 

5194 nthreads=0): 

5195 

5196 target = targets[0] 

5197 

5198 interp = set([t.interpolation for t in targets]) 

5199 if len(interp) > 1: 

5200 raise BadRequest('Targets have different interpolation schemes.') 

5201 

5202 rates = set([t.sample_rate for t in targets]) 

5203 if len(rates) > 1: 

5204 raise BadRequest('Targets have different sample rates.') 

5205 

5206 store_ = self.get_store(target.store_id) 

5207 receivers = [t.receiver(store_) for t in targets] 

5208 

5209 if target.sample_rate is not None: 

5210 deltat = 1. / target.sample_rate 

5211 rate = target.sample_rate 

5212 else: 

5213 deltat = None 

5214 rate = store_.config.sample_rate 

5215 

5216 tmin = num.fromiter( 

5217 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5218 tmax = num.fromiter( 

5219 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5220 

5221 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax)) 

5222 

5223 itmin = num.zeros_like(tmin, dtype=num.int64) 

5224 itmax = num.zeros_like(tmin, dtype=num.int64) 

5225 nsamples = num.full_like(tmin, -1, dtype=num.int64) 

5226 

5227 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64) 

5228 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64) 

5229 nsamples = itmax - itmin + 1 

5230 nsamples[num.logical_not(mask)] = -1 

5231 

5232 base_source = self._cached_discretize_basesource( 

5233 source, store_, dsource_cache, target) 

5234 

5235 base_seismograms = store_.calc_seismograms( 

5236 base_source, receivers, components, 

5237 deltat=deltat, 

5238 itmin=itmin, nsamples=nsamples, 

5239 interpolation=target.interpolation, 

5240 optimization=target.optimization, 

5241 nthreads=nthreads) 

5242 

5243 for i, base_seismogram in enumerate(base_seismograms): 

5244 base_seismograms[i] = store.make_same_span(base_seismogram) 

5245 

5246 return base_seismograms 

5247 

5248 def base_seismogram(self, source, target, components, dsource_cache, 

5249 nthreads): 

5250 

5251 tcounters = [xtime()] 

5252 

5253 store_ = self.get_store(target.store_id) 

5254 receiver = target.receiver(store_) 

5255 

5256 if target.tmin and target.tmax is not None: 

5257 rate = store_.config.sample_rate 

5258 itmin = int(num.floor(target.tmin * rate)) 

5259 itmax = int(num.ceil(target.tmax * rate)) 

5260 nsamples = itmax - itmin + 1 

5261 else: 

5262 itmin = None 

5263 nsamples = None 

5264 

5265 tcounters.append(xtime()) 

5266 base_source = self._cached_discretize_basesource( 

5267 source, store_, dsource_cache, target) 

5268 

5269 tcounters.append(xtime()) 

5270 

5271 if target.sample_rate is not None: 

5272 deltat = 1. / target.sample_rate 

5273 else: 

5274 deltat = None 

5275 

5276 base_seismogram = store_.seismogram( 

5277 base_source, receiver, components, 

5278 deltat=deltat, 

5279 itmin=itmin, nsamples=nsamples, 

5280 interpolation=target.interpolation, 

5281 optimization=target.optimization, 

5282 nthreads=nthreads) 

5283 

5284 tcounters.append(xtime()) 

5285 

5286 base_seismogram = store.make_same_span(base_seismogram) 

5287 

5288 tcounters.append(xtime()) 

5289 

5290 return base_seismogram, tcounters 

5291 

5292 def base_statics(self, source, target, components, nthreads): 

5293 tcounters = [xtime()] 

5294 store_ = self.get_store(target.store_id) 

5295 

5296 if target.tsnapshot is not None: 

5297 rate = store_.config.sample_rate 

5298 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5299 else: 

5300 itsnapshot = None 

5301 tcounters.append(xtime()) 

5302 

5303 base_source = source.discretize_basesource(store_, target=target) 

5304 

5305 tcounters.append(xtime()) 

5306 

5307 base_statics = store_.statics( 

5308 base_source, 

5309 target, 

5310 itsnapshot, 

5311 components, 

5312 target.interpolation, 

5313 nthreads) 

5314 

5315 tcounters.append(xtime()) 

5316 

5317 return base_statics, tcounters 

5318 

5319 def _post_process_dynamic(self, base_seismogram, source, target): 

5320 base_any = next(iter(base_seismogram.values())) 

5321 deltat = base_any.deltat 

5322 itmin = base_any.itmin 

5323 

5324 rule = self.get_rule(source, target) 

5325 data = rule.apply_(target, base_seismogram) 

5326 

5327 factor = source.get_factor() * target.get_factor() 

5328 if factor != 1.0: 

5329 data = data * factor 

5330 

5331 stf = source.effective_stf_post() 

5332 

5333 times, amplitudes = stf.discretize_t( 

5334 deltat, 0.0) 

5335 

5336 # repeat end point to prevent boundary effects 

5337 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5338 padded_data[:data.size] = data 

5339 padded_data[data.size:] = data[-1] 

5340 data = num.convolve(amplitudes, padded_data) 

5341 

5342 tmin = itmin * deltat + times[0] 

5343 

5344 tr = meta.SeismosizerTrace( 

5345 codes=target.codes, 

5346 data=data[:-amplitudes.size], 

5347 deltat=deltat, 

5348 tmin=tmin) 

5349 

5350 return target.post_process(self, source, tr) 

5351 

5352 def _post_process_statics(self, base_statics, source, starget): 

5353 rule = self.get_rule(source, starget) 

5354 data = rule.apply_(starget, base_statics) 

5355 

5356 factor = source.get_factor() 

5357 if factor != 1.0: 

5358 for v in data.values(): 

5359 v *= factor 

5360 

5361 return starget.post_process(self, source, base_statics) 

5362 

5363 def process(self, *args, **kwargs): 

5364 ''' 

5365 Process a request. 

5366 

5367 :: 

5368 

5369 process(**kwargs) 

5370 process(request, **kwargs) 

5371 process(sources, targets, **kwargs) 

5372 

5373 The request can be given a a :py:class:`Request` object, or such an 

5374 object is created using ``Request(**kwargs)`` for convenience. 

5375 

5376 :returns: :py:class:`Response` object 

5377 ''' 

5378 

5379 if len(args) not in (0, 1, 2): 

5380 raise BadRequest('Invalid arguments.') 

5381 

5382 if len(args) == 1: 

5383 kwargs['request'] = args[0] 

5384 

5385 elif len(args) == 2: 

5386 kwargs.update(Request.args2kwargs(args)) 

5387 

5388 request = kwargs.pop('request', None) 

5389 status_callback = kwargs.pop('status_callback', None) 

5390 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5391 

5392 nprocs = kwargs.pop('nprocs', None) 

5393 nthreads = kwargs.pop('nthreads', 1) 

5394 if nprocs is not None: 

5395 nthreads = nprocs 

5396 

5397 if request is None: 

5398 request = Request(**kwargs) 

5399 

5400 if resource: 

5401 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5402 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5403 tt0 = xtime() 

5404 

5405 # make sure stores are open before fork() 

5406 store_ids = set(target.store_id for target in request.targets) 

5407 for store_id in store_ids: 

5408 self.get_store(store_id) 

5409 

5410 source_index = dict((x, i) for (i, x) in 

5411 enumerate(request.sources)) 

5412 target_index = dict((x, i) for (i, x) in 

5413 enumerate(request.targets)) 

5414 

5415 m = request.subrequest_map() 

5416 

5417 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5418 results_list = [] 

5419 

5420 for i in range(len(request.sources)): 

5421 results_list.append([None] * len(request.targets)) 

5422 

5423 tcounters_dyn_list = [] 

5424 tcounters_static_list = [] 

5425 nsub = len(skeys) 

5426 isub = 0 

5427 

5428 # Processing dynamic targets through 

5429 # parimap(process_subrequest_dynamic) 

5430 

5431 if calc_timeseries: 

5432 _process_dynamic = process_dynamic_timeseries 

5433 else: 

5434 _process_dynamic = process_dynamic 

5435 

5436 if request.has_dynamic: 

5437 work_dynamic = [ 

5438 (i, nsub, 

5439 [source_index[source] for source in m[k][0]], 

5440 [target_index[target] for target in m[k][1] 

5441 if not isinstance(target, StaticTarget)]) 

5442 for (i, k) in enumerate(skeys)] 

5443 

5444 for ii_results, tcounters_dyn in _process_dynamic( 

5445 work_dynamic, request.sources, request.targets, self, 

5446 nthreads): 

5447 

5448 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5449 isource, itarget, result = ii_results 

5450 results_list[isource][itarget] = result 

5451 

5452 if status_callback: 

5453 status_callback(isub, nsub) 

5454 

5455 isub += 1 

5456 

5457 # Processing static targets through process_static 

5458 if request.has_statics: 

5459 work_static = [ 

5460 (i, nsub, 

5461 [source_index[source] for source in m[k][0]], 

5462 [target_index[target] for target in m[k][1] 

5463 if isinstance(target, StaticTarget)]) 

5464 for (i, k) in enumerate(skeys)] 

5465 

5466 for ii_results, tcounters_static in process_static( 

5467 work_static, request.sources, request.targets, self, 

5468 nthreads=nthreads): 

5469 

5470 tcounters_static_list.append(num.diff(tcounters_static)) 

5471 isource, itarget, result = ii_results 

5472 results_list[isource][itarget] = result 

5473 

5474 if status_callback: 

5475 status_callback(isub, nsub) 

5476 

5477 isub += 1 

5478 

5479 if status_callback: 

5480 status_callback(nsub, nsub) 

5481 

5482 tt1 = time.time() 

5483 if resource: 

5484 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5485 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5486 

5487 s = ProcessingStats() 

5488 

5489 if request.has_dynamic: 

5490 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5491 t_dyn = float(num.sum(tcumu_dyn)) 

5492 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5493 (s.t_perc_get_store_and_receiver, 

5494 s.t_perc_discretize_source, 

5495 s.t_perc_make_base_seismogram, 

5496 s.t_perc_make_same_span, 

5497 s.t_perc_post_process) = perc_dyn 

5498 else: 

5499 t_dyn = 0. 

5500 

5501 if request.has_statics: 

5502 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5503 t_static = num.sum(tcumu_static) 

5504 perc_static = map(float, tcumu_static / t_static * 100.) 

5505 (s.t_perc_static_get_store, 

5506 s.t_perc_static_discretize_basesource, 

5507 s.t_perc_static_sum_statics, 

5508 s.t_perc_static_post_process) = perc_static 

5509 

5510 s.t_wallclock = tt1 - tt0 

5511 if resource: 

5512 s.t_cpu = ( 

5513 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5514 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5515 s.n_read_blocks = ( 

5516 (rs1.ru_inblock + rc1.ru_inblock) - 

5517 (rs0.ru_inblock + rc0.ru_inblock)) 

5518 

5519 n_records_stacked = 0. 

5520 for results in results_list: 

5521 for result in results: 

5522 if not isinstance(result, meta.Result): 

5523 continue 

5524 shr = float(result.n_shared_stacking) 

5525 n_records_stacked += result.n_records_stacked / shr 

5526 s.t_perc_optimize += result.t_optimize / shr 

5527 s.t_perc_stack += result.t_stack / shr 

5528 s.n_records_stacked = int(n_records_stacked) 

5529 if t_dyn != 0.: 

5530 s.t_perc_optimize /= t_dyn * 100 

5531 s.t_perc_stack /= t_dyn * 100 

5532 

5533 return Response( 

5534 request=request, 

5535 results_list=results_list, 

5536 stats=s) 

5537 

5538 

5539class RemoteEngine(Engine): 

5540 ''' 

5541 Client for remote synthetic seismogram calculator. 

5542 ''' 

5543 

5544 site = String.T(default=ws.g_default_site, optional=True) 

5545 url = String.T(default=ws.g_url, optional=True) 

5546 

5547 def process(self, request=None, status_callback=None, **kwargs): 

5548 

5549 if request is None: 

5550 request = Request(**kwargs) 

5551 

5552 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5553 

5554 

5555g_engine = None 

5556 

5557 

5558def get_engine(store_superdirs=[]): 

5559 global g_engine 

5560 if g_engine is None: 

5561 g_engine = LocalEngine(use_env=True, use_config=True) 

5562 

5563 for d in store_superdirs: 

5564 if d not in g_engine.store_superdirs: 

5565 g_engine.store_superdirs.append(d) 

5566 

5567 return g_engine 

5568 

5569 

5570class SourceGroup(Object): 

5571 

5572 def __getattr__(self, k): 

5573 return num.fromiter((getattr(s, k) for s in self), 

5574 dtype=float) 

5575 

5576 def __iter__(self): 

5577 raise NotImplementedError( 

5578 'This method should be implemented in subclass.') 

5579 

5580 def __len__(self): 

5581 raise NotImplementedError( 

5582 'This method should be implemented in subclass.') 

5583 

5584 

5585class SourceList(SourceGroup): 

5586 sources = List.T(Source.T()) 

5587 

5588 def append(self, s): 

5589 self.sources.append(s) 

5590 

5591 def __iter__(self): 

5592 return iter(self.sources) 

5593 

5594 def __len__(self): 

5595 return len(self.sources) 

5596 

5597 

5598class SourceGrid(SourceGroup): 

5599 

5600 base = Source.T() 

5601 variables = Dict.T(String.T(), Range.T()) 

5602 order = List.T(String.T()) 

5603 

5604 def __len__(self): 

5605 n = 1 

5606 for (k, v) in self.make_coords(self.base): 

5607 n *= len(list(v)) 

5608 

5609 return n 

5610 

5611 def __iter__(self): 

5612 for items in permudef(self.make_coords(self.base)): 

5613 s = self.base.clone(**{k: v for (k, v) in items}) 

5614 s.regularize() 

5615 yield s 

5616 

5617 def ordered_params(self): 

5618 ks = list(self.variables.keys()) 

5619 for k in self.order + list(self.base.keys()): 

5620 if k in ks: 

5621 yield k 

5622 ks.remove(k) 

5623 if ks: 

5624 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5625 (ks[0], self.base.__class__.__name__)) 

5626 

5627 def make_coords(self, base): 

5628 return [(param, self.variables[param].make(base=base[param])) 

5629 for param in self.ordered_params()] 

5630 

5631 

5632source_classes = [ 

5633 Source, 

5634 SourceWithMagnitude, 

5635 SourceWithDerivedMagnitude, 

5636 ExplosionSource, 

5637 RectangularExplosionSource, 

5638 DCSource, 

5639 CLVDSource, 

5640 VLVDSource, 

5641 MTSource, 

5642 RectangularSource, 

5643 PseudoDynamicRupture, 

5644 DoubleDCSource, 

5645 RingfaultSource, 

5646 CombiSource, 

5647 SFSource, 

5648 PorePressurePointSource, 

5649 PorePressureLineSource, 

5650] 

5651 

5652stf_classes = [ 

5653 STF, 

5654 BoxcarSTF, 

5655 TriangularSTF, 

5656 HalfSinusoidSTF, 

5657 ResonatorSTF, 

5658] 

5659 

5660__all__ = ''' 

5661SeismosizerError 

5662BadRequest 

5663NoSuchStore 

5664DerivedMagnitudeError 

5665STFMode 

5666'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5667Request 

5668ProcessingStats 

5669Response 

5670Engine 

5671LocalEngine 

5672RemoteEngine 

5673source_classes 

5674get_engine 

5675Range 

5676SourceGroup 

5677SourceList 

5678SourceGrid 

5679map_anchor 

5680'''.split()