1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28from collections import defaultdict 

29from functools import cmp_to_key 

30import time 

31import math 

32import os 

33import re 

34import logging 

35try: 

36 import resource 

37except ImportError: 

38 resource = None 

39from hashlib import sha1 

40 

41import numpy as num 

42from scipy.interpolate import RegularGridInterpolator 

43 

44from pyrocko.guts import (Object, Float, String, StringChoice, List, 

45 Timestamp, Int, SObject, ArgumentError, Dict, 

46 ValidationError, Bool) 

47from pyrocko.guts_array import Array 

48 

49from pyrocko import moment_tensor as pmt 

50from pyrocko import trace, util, config, model, eikonal_ext 

51from pyrocko.orthodrome import ne_to_latlon 

52from pyrocko.model import Location 

53from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

54 okada_ext, invert_fault_dislocations_bem 

55 

56from . import meta, store, ws 

57from .tractions import TractionField, DirectedTractions 

58from .targets import Target, StaticTarget, SatelliteTarget 

59 

60pjoin = os.path.join 

61 

62guts_prefix = 'pf' 

63 

64d2r = math.pi / 180. 

65r2d = 180. / math.pi 

66km = 1e3 

67 

68logger = logging.getLogger('pyrocko.gf.seismosizer') 

69 

70 

71def cmp_none_aware(a, b): 

72 if isinstance(a, tuple) and isinstance(b, tuple): 

73 for xa, xb in zip(a, b): 

74 rv = cmp_none_aware(xa, xb) 

75 if rv != 0: 

76 return rv 

77 

78 return 0 

79 

80 anone = a is None 

81 bnone = b is None 

82 

83 if anone and bnone: 

84 return 0 

85 

86 if anone: 

87 return -1 

88 

89 if bnone: 

90 return 1 

91 

92 return bool(a > b) - bool(a < b) 

93 

94 

95def xtime(): 

96 return time.time() 

97 

98 

99class SeismosizerError(Exception): 

100 pass 

101 

102 

103class BadRequest(SeismosizerError): 

104 pass 

105 

106 

107class DuplicateStoreId(Exception): 

108 pass 

109 

110 

111class NoDefaultStoreSet(Exception): 

112 pass 

113 

114 

115class ConversionError(Exception): 

116 pass 

117 

118 

119class NoSuchStore(BadRequest): 

120 

121 def __init__(self, store_id=None, dirs=None): 

122 BadRequest.__init__(self) 

123 self.store_id = store_id 

124 self.dirs = dirs 

125 

126 def __str__(self): 

127 if self.store_id is not None: 

128 rstr = 'no GF store with id "%s" found.' % self.store_id 

129 else: 

130 rstr = 'GF store not found.' 

131 

132 if self.dirs is not None: 

133 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

134 return rstr 

135 

136 

137def ufloat(s): 

138 units = { 

139 'k': 1e3, 

140 'M': 1e6, 

141 } 

142 

143 factor = 1.0 

144 if s and s[-1] in units: 

145 factor = units[s[-1]] 

146 s = s[:-1] 

147 if not s: 

148 raise ValueError("unit without a number: '%s'" % s) 

149 

150 return float(s) * factor 

151 

152 

153def ufloat_or_none(s): 

154 if s: 

155 return ufloat(s) 

156 else: 

157 return None 

158 

159 

160def int_or_none(s): 

161 if s: 

162 return int(s) 

163 else: 

164 return None 

165 

166 

167def nonzero(x, eps=1e-15): 

168 return abs(x) > eps 

169 

170 

171def permudef(ln, j=0): 

172 if j < len(ln): 

173 k, v = ln[j] 

174 for y in v: 

175 ln[j] = k, y 

176 for s in permudef(ln, j + 1): 

177 yield s 

178 

179 ln[j] = k, v 

180 return 

181 else: 

182 yield ln 

183 

184 

185def arr(x): 

186 return num.atleast_1d(num.asarray(x)) 

187 

188 

189def discretize_rect_source(deltas, deltat, time, north, east, depth, 

190 strike, dip, length, width, 

191 anchor, velocity=None, stf=None, 

192 nucleation_x=None, nucleation_y=None, 

193 decimation_factor=1, pointsonly=False, 

194 plane_coords=False, 

195 aggressive_oversampling=False): 

196 

197 if stf is None: 

198 stf = STF() 

199 

200 if not velocity and not pointsonly: 

201 raise AttributeError('velocity is required in time mode') 

202 

203 mindeltagf = float(num.min(deltas)) 

204 if velocity: 

205 mindeltagf = min(mindeltagf, deltat * velocity) 

206 

207 ln = length 

208 wd = width 

209 

210 if aggressive_oversampling: 

211 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

212 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

213 else: 

214 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 

217 n = int(nl * nw) 

218 

219 dl = ln / nl 

220 dw = wd / nw 

221 

222 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

223 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

224 

225 points = num.zeros((n, 3)) 

226 points[:, 0] = num.tile(xl, nw) 

227 points[:, 1] = num.repeat(xw, nl) 

228 

229 if nucleation_x is not None: 

230 dist_x = num.abs(nucleation_x - points[:, 0]) 

231 else: 

232 dist_x = num.zeros(n) 

233 

234 if nucleation_y is not None: 

235 dist_y = num.abs(nucleation_y - points[:, 1]) 

236 else: 

237 dist_y = num.zeros(n) 

238 

239 dist = num.sqrt(dist_x**2 + dist_y**2) 

240 times = dist / velocity 

241 

242 anch_x, anch_y = map_anchor[anchor] 

243 

244 points[:, 0] -= anch_x * 0.5 * length 

245 points[:, 1] -= anch_y * 0.5 * width 

246 

247 if plane_coords: 

248 return points, dl, dw, nl, nw 

249 

250 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

251 points = num.dot(rotmat.T, points.T).T 

252 

253 points[:, 0] += north 

254 points[:, 1] += east 

255 points[:, 2] += depth 

256 

257 if pointsonly: 

258 return points, dl, dw, nl, nw 

259 

260 xtau, amplitudes = stf.discretize_t(deltat, time) 

261 nt = xtau.size 

262 

263 points2 = num.repeat(points, nt, axis=0) 

264 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

265 amplitudes2 = num.tile(amplitudes, n) 

266 

267 return points2, times2, amplitudes2, dl, dw, nl, nw 

268 

269 

270def check_rect_source_discretisation(points2, nl, nw, store): 

271 # We assume a non-rotated fault plane 

272 N_CRITICAL = 8 

273 points = points2.T.reshape((3, nl, nw)) 

274 if points.size <= N_CRITICAL: 

275 logger.warning('RectangularSource is defined by only %d sub-sources!' 

276 % points.size) 

277 return True 

278 

279 distances = num.sqrt( 

280 (points[0, 0, :] - points[0, 1, :])**2 + 

281 (points[1, 0, :] - points[1, 1, :])**2 + 

282 (points[2, 0, :] - points[2, 1, :])**2) 

283 

284 depths = points[2, 0, :] 

285 vs_profile = store.config.get_vs( 

286 lat=0., lon=0., 

287 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

288 interpolation='multilinear') 

289 

290 min_wavelength = vs_profile * (store.config.deltat * 2) 

291 if not num.all(min_wavelength > distances / 2): 

292 return False 

293 return True 

294 

295 

296def outline_rect_source(strike, dip, length, width, anchor): 

297 ln = length 

298 wd = width 

299 points = num.array( 

300 [[-0.5 * ln, -0.5 * wd, 0.], 

301 [0.5 * ln, -0.5 * wd, 0.], 

302 [0.5 * ln, 0.5 * wd, 0.], 

303 [-0.5 * ln, 0.5 * wd, 0.], 

304 [-0.5 * ln, -0.5 * wd, 0.]]) 

305 

306 anch_x, anch_y = map_anchor[anchor] 

307 points[:, 0] -= anch_x * 0.5 * length 

308 points[:, 1] -= anch_y * 0.5 * width 

309 

310 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

311 

312 return num.dot(rotmat.T, points.T).T 

313 

314 

315def from_plane_coords( 

316 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

317 lat=0., lon=0., 

318 north_shift=0, east_shift=0, 

319 anchor='top', cs='xy'): 

320 

321 ln = length 

322 wd = width 

323 x_abs = [] 

324 y_abs = [] 

325 if not isinstance(x_plane_coords, list): 

326 x_plane_coords = [x_plane_coords] 

327 y_plane_coords = [y_plane_coords] 

328 

329 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

330 points = num.array( 

331 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

332 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

333 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

334 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

335 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

336 

337 anch_x, anch_y = map_anchor[anchor] 

338 points[:, 0] -= anch_x * 0.5 * length 

339 points[:, 1] -= anch_y * 0.5 * width 

340 

341 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

342 

343 points = num.dot(rotmat.T, points.T).T 

344 points[:, 0] += north_shift 

345 points[:, 1] += east_shift 

346 points[:, 2] += depth 

347 if cs in ('latlon', 'lonlat'): 

348 latlon = ne_to_latlon(lat, lon, 

349 points[:, 0], points[:, 1]) 

350 latlon = num.array(latlon).T 

351 x_abs.append(latlon[1:2, 1]) 

352 y_abs.append(latlon[2:3, 0]) 

353 if cs == 'xy': 

354 x_abs.append(points[1:2, 1]) 

355 y_abs.append(points[2:3, 0]) 

356 

357 if cs == 'lonlat': 

358 return y_abs, x_abs 

359 else: 

360 return x_abs, y_abs 

361 

362 

363def points_on_rect_source( 

364 strike, dip, length, width, anchor, 

365 discretized_basesource=None, points_x=None, points_y=None): 

366 

367 ln = length 

368 wd = width 

369 

370 if isinstance(points_x, list) or isinstance(points_x, float): 

371 points_x = num.array([points_x]) 

372 if isinstance(points_y, list) or isinstance(points_y, float): 

373 points_y = num.array([points_y]) 

374 

375 if discretized_basesource: 

376 ds = discretized_basesource 

377 

378 nl_patches = ds.nl + 1 

379 nw_patches = ds.nw + 1 

380 

381 npoints = nl_patches * nw_patches 

382 points = num.zeros((npoints, 3)) 

383 ln_patches = num.array([il for il in range(nl_patches)]) 

384 wd_patches = num.array([iw for iw in range(nw_patches)]) 

385 

386 points_ln =\ 

387 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

388 points_wd =\ 

389 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

390 

391 for il in range(nl_patches): 

392 for iw in range(nw_patches): 

393 points[il * nw_patches + iw, :] = num.array([ 

394 points_ln[il] * ln * 0.5, 

395 points_wd[iw] * wd * 0.5, 0.0]) 

396 

397 elif points_x.shape[0] > 0 and points_y.shape[0] > 0: 

398 points = num.zeros(shape=((len(points_x), 3))) 

399 for i, (x, y) in enumerate(zip(points_x, points_y)): 

400 points[i, :] = num.array( 

401 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

402 

403 anch_x, anch_y = map_anchor[anchor] 

404 

405 points[:, 0] -= anch_x * 0.5 * ln 

406 points[:, 1] -= anch_y * 0.5 * wd 

407 

408 rotmat = pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0) 

409 

410 return num.dot(rotmat.T, points.T).T 

411 

412 

413class InvalidGridDef(Exception): 

414 pass 

415 

416 

417class Range(SObject): 

418 ''' 

419 Convenient range specification. 

420 

421 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

422 

423 Range('0 .. 10k : 1k') 

424 Range(start=0., stop=10e3, step=1e3) 

425 Range(0, 10e3, 1e3) 

426 Range('0 .. 10k @ 11') 

427 Range(start=0., stop=10*km, n=11) 

428 

429 Range(0, 10e3, n=11) 

430 Range(values=[x*1e3 for x in range(11)]) 

431 

432 Depending on the use context, it can be possible to omit any part of the 

433 specification. E.g. in the context of extracting a subset of an already 

434 existing range, the existing range's specification values would be filled 

435 in where missing. 

436 

437 The values are distributed with equal spacing, unless the ``spacing`` 

438 argument is modified. The values can be created offset or relative to an 

439 external base value with the ``relative`` argument if the use context 

440 supports this. 

441 

442 The range specification can be expressed with a short string 

443 representation:: 

444 

445 'start .. stop @ num | spacing, relative' 

446 'start .. stop : step | spacing, relative' 

447 

448 most parts of the expression can be omitted if not needed. Whitespace is 

449 allowed for readability but can also be omitted. 

450 ''' 

451 

452 start = Float.T(optional=True) 

453 stop = Float.T(optional=True) 

454 step = Float.T(optional=True) 

455 n = Int.T(optional=True) 

456 values = Array.T(optional=True, dtype=float, shape=(None,)) 

457 

458 spacing = StringChoice.T( 

459 choices=['lin', 'log', 'symlog'], 

460 default='lin', 

461 optional=True) 

462 

463 relative = StringChoice.T( 

464 choices=['', 'add', 'mult'], 

465 default='', 

466 optional=True) 

467 

468 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

469 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

470 r'(\|(?P<stuff>.+))?$') 

471 

472 def __init__(self, *args, **kwargs): 

473 d = {} 

474 if len(args) == 1: 

475 d = self.parse(args[0]) 

476 elif len(args) in (2, 3): 

477 d['start'], d['stop'] = [float(x) for x in args[:2]] 

478 if len(args) == 3: 

479 d['step'] = float(args[2]) 

480 

481 for k, v in kwargs.items(): 

482 if k in d: 

483 raise ArgumentError('%s specified more than once' % k) 

484 

485 d[k] = v 

486 

487 SObject.__init__(self, **d) 

488 

489 def __str__(self): 

490 def sfloat(x): 

491 if x is not None: 

492 return '%g' % x 

493 else: 

494 return '' 

495 

496 if self.values: 

497 return ','.join('%g' % x for x in self.values) 

498 

499 if self.start is None and self.stop is None: 

500 s0 = '' 

501 else: 

502 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

503 

504 s1 = '' 

505 if self.step is not None: 

506 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

507 elif self.n is not None: 

508 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

509 

510 if self.spacing == 'lin' and self.relative == '': 

511 s2 = '' 

512 else: 

513 x = [] 

514 if self.spacing != 'lin': 

515 x.append(self.spacing) 

516 

517 if self.relative != '': 

518 x.append(self.relative) 

519 

520 s2 = ' | %s' % ','.join(x) 

521 

522 return s0 + s1 + s2 

523 

524 @classmethod 

525 def parse(cls, s): 

526 s = re.sub(r'\s+', '', s) 

527 m = cls.pattern.match(s) 

528 if not m: 

529 try: 

530 vals = [ufloat(x) for x in s.split(',')] 

531 except Exception: 

532 raise InvalidGridDef( 

533 '"%s" is not a valid range specification' % s) 

534 

535 return dict(values=num.array(vals, dtype=float)) 

536 

537 d = m.groupdict() 

538 try: 

539 start = ufloat_or_none(d['start']) 

540 stop = ufloat_or_none(d['stop']) 

541 step = ufloat_or_none(d['step']) 

542 n = int_or_none(d['n']) 

543 except Exception: 

544 raise InvalidGridDef( 

545 '"%s" is not a valid range specification' % s) 

546 

547 spacing = 'lin' 

548 relative = '' 

549 

550 if d['stuff'] is not None: 

551 t = d['stuff'].split(',') 

552 for x in t: 

553 if x in cls.spacing.choices: 

554 spacing = x 

555 elif x and x in cls.relative.choices: 

556 relative = x 

557 else: 

558 raise InvalidGridDef( 

559 '"%s" is not a valid range specification' % s) 

560 

561 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

562 relative=relative) 

563 

564 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

565 if self.values: 

566 return self.values 

567 

568 start = self.start 

569 stop = self.stop 

570 step = self.step 

571 n = self.n 

572 

573 swap = step is not None and step < 0. 

574 if start is None: 

575 start = [mi, ma][swap] 

576 if stop is None: 

577 stop = [ma, mi][swap] 

578 if step is None and inc is not None: 

579 step = [inc, -inc][ma < mi] 

580 

581 if start is None or stop is None: 

582 raise InvalidGridDef( 

583 'Cannot use range specification "%s" without start ' 

584 'and stop in this context' % self) 

585 

586 if step is None and n is None: 

587 step = stop - start 

588 

589 if n is None: 

590 if (step < 0) != (stop - start < 0): 

591 raise InvalidGridDef( 

592 'Range specification "%s" has inconsistent ordering ' 

593 '(step < 0 => stop > start)' % self) 

594 

595 n = int(round((stop - start) / step)) + 1 

596 stop2 = start + (n - 1) * step 

597 if abs(stop - stop2) > eps: 

598 n = int(math.floor((stop - start) / step)) + 1 

599 stop = start + (n - 1) * step 

600 else: 

601 stop = stop2 

602 

603 if start == stop: 

604 n = 1 

605 

606 if self.spacing == 'lin': 

607 vals = num.linspace(start, stop, n) 

608 

609 elif self.spacing in ('log', 'symlog'): 

610 if start > 0. and stop > 0.: 

611 vals = num.exp(num.linspace(num.log(start), 

612 num.log(stop), n)) 

613 elif start < 0. and stop < 0.: 

614 vals = -num.exp(num.linspace(num.log(-start), 

615 num.log(-stop), n)) 

616 else: 

617 raise InvalidGridDef( 

618 'Log ranges should not include or cross zero ' 

619 '(in range specification "%s").' % self) 

620 

621 if self.spacing == 'symlog': 

622 nvals = - vals 

623 vals = num.concatenate((nvals[::-1], vals)) 

624 

625 if self.relative in ('add', 'mult') and base is None: 

626 raise InvalidGridDef( 

627 'Cannot use relative range specification in this context.') 

628 

629 vals = self.make_relative(base, vals) 

630 

631 return list(map(float, vals)) 

632 

633 def make_relative(self, base, vals): 

634 if self.relative == 'add': 

635 vals += base 

636 

637 if self.relative == 'mult': 

638 vals *= base 

639 

640 return vals 

641 

642 

643class GridDefElement(Object): 

644 

645 param = meta.StringID.T() 

646 rs = Range.T() 

647 

648 def __init__(self, shorthand=None, **kwargs): 

649 if shorthand is not None: 

650 t = shorthand.split('=') 

651 if len(t) != 2: 

652 raise InvalidGridDef( 

653 'Invalid grid specification element: %s' % shorthand) 

654 

655 sp, sr = t[0].strip(), t[1].strip() 

656 

657 kwargs['param'] = sp 

658 kwargs['rs'] = Range(sr) 

659 

660 Object.__init__(self, **kwargs) 

661 

662 def shorthand(self): 

663 return self.param + ' = ' + str(self.rs) 

664 

665 

666class GridDef(Object): 

667 

668 elements = List.T(GridDefElement.T()) 

669 

670 def __init__(self, shorthand=None, **kwargs): 

671 if shorthand is not None: 

672 t = shorthand.splitlines() 

673 tt = [] 

674 for x in t: 

675 x = x.strip() 

676 if x: 

677 tt.extend(x.split(';')) 

678 

679 elements = [] 

680 for se in tt: 

681 elements.append(GridDef(se)) 

682 

683 kwargs['elements'] = elements 

684 

685 Object.__init__(self, **kwargs) 

686 

687 def shorthand(self): 

688 return '; '.join(str(x) for x in self.elements) 

689 

690 

691class Cloneable(object): 

692 

693 def __iter__(self): 

694 return iter(self.T.propnames) 

695 

696 def __getitem__(self, k): 

697 if k not in self.keys(): 

698 raise KeyError(k) 

699 

700 return getattr(self, k) 

701 

702 def __setitem__(self, k, v): 

703 if k not in self.keys(): 

704 raise KeyError(k) 

705 

706 return setattr(self, k, v) 

707 

708 def clone(self, **kwargs): 

709 ''' 

710 Make a copy of the object. 

711 

712 A new object of the same class is created and initialized with the 

713 parameters of the object on which this method is called on. If 

714 ``kwargs`` are given, these are used to override any of the 

715 initialization parameters. 

716 ''' 

717 

718 d = dict(self) 

719 for k in d: 

720 v = d[k] 

721 if isinstance(v, Cloneable): 

722 d[k] = v.clone() 

723 

724 d.update(kwargs) 

725 return self.__class__(**d) 

726 

727 @classmethod 

728 def keys(cls): 

729 ''' 

730 Get list of the source model's parameter names. 

731 ''' 

732 

733 return cls.T.propnames 

734 

735 

736class STF(Object, Cloneable): 

737 

738 ''' 

739 Base class for source time functions. 

740 ''' 

741 

742 def __init__(self, effective_duration=None, **kwargs): 

743 if effective_duration is not None: 

744 kwargs['duration'] = effective_duration / \ 

745 self.factor_duration_to_effective() 

746 

747 Object.__init__(self, **kwargs) 

748 

749 @classmethod 

750 def factor_duration_to_effective(cls): 

751 return 1.0 

752 

753 def centroid_time(self, tref): 

754 return tref 

755 

756 @property 

757 def effective_duration(self): 

758 return self.duration * self.factor_duration_to_effective() 

759 

760 def discretize_t(self, deltat, tref): 

761 tl = math.floor(tref / deltat) * deltat 

762 th = math.ceil(tref / deltat) * deltat 

763 if tl == th: 

764 return num.array([tl], dtype=float), num.ones(1) 

765 else: 

766 return ( 

767 num.array([tl, th], dtype=float), 

768 num.array([th - tref, tref - tl], dtype=float) / deltat) 

769 

770 def base_key(self): 

771 return (type(self).__name__,) 

772 

773 

774g_unit_pulse = STF() 

775 

776 

777def sshift(times, amplitudes, tshift, deltat): 

778 

779 t0 = math.floor(tshift / deltat) * deltat 

780 t1 = math.ceil(tshift / deltat) * deltat 

781 if t0 == t1: 

782 return times, amplitudes 

783 

784 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

785 

786 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

787 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

788 

789 times2 = num.arange(times.size + 1, dtype=float) * \ 

790 deltat + times[0] + t0 

791 

792 return times2, amplitudes2 

793 

794 

795class BoxcarSTF(STF): 

796 

797 ''' 

798 Boxcar type source time function. 

799 

800 .. figure :: /static/stf-BoxcarSTF.svg 

801 :width: 40% 

802 :align: center 

803 :alt: boxcar source time function 

804 ''' 

805 

806 duration = Float.T( 

807 default=0.0, 

808 help='duration of the boxcar') 

809 

810 anchor = Float.T( 

811 default=0.0, 

812 help='anchor point with respect to source.time: (' 

813 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

814 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

815 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

816 

817 @classmethod 

818 def factor_duration_to_effective(cls): 

819 return 1.0 

820 

821 def centroid_time(self, tref): 

822 return tref - 0.5 * self.duration * self.anchor 

823 

824 def discretize_t(self, deltat, tref): 

825 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

826 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

827 tmin = round(tmin_stf / deltat) * deltat 

828 tmax = round(tmax_stf / deltat) * deltat 

829 nt = int(round((tmax - tmin) / deltat)) + 1 

830 times = num.linspace(tmin, tmax, nt) 

831 amplitudes = num.ones_like(times) 

832 if times.size > 1: 

833 t_edges = num.linspace( 

834 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

835 t = tmin_stf + self.duration * num.array( 

836 [0.0, 0.0, 1.0, 1.0], dtype=float) 

837 f = num.array([0., 1., 1., 0.], dtype=float) 

838 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

839 amplitudes /= num.sum(amplitudes) 

840 

841 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

842 

843 return sshift(times, amplitudes, -tshift, deltat) 

844 

845 def base_key(self): 

846 return (type(self).__name__, self.duration, self.anchor) 

847 

848 

849class TriangularSTF(STF): 

850 

851 ''' 

852 Triangular type source time function. 

853 

854 .. figure :: /static/stf-TriangularSTF.svg 

855 :width: 40% 

856 :align: center 

857 :alt: triangular source time function 

858 ''' 

859 

860 duration = Float.T( 

861 default=0.0, 

862 help='baseline of the triangle') 

863 

864 peak_ratio = Float.T( 

865 default=0.5, 

866 help='fraction of time compared to duration, ' 

867 'when the maximum amplitude is reached') 

868 

869 anchor = Float.T( 

870 default=0.0, 

871 help='anchor point with respect to source.time: (' 

872 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

873 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

874 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

875 

876 @classmethod 

877 def factor_duration_to_effective(cls, peak_ratio=None): 

878 if peak_ratio is None: 

879 peak_ratio = cls.peak_ratio.default() 

880 

881 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

882 

883 def __init__(self, effective_duration=None, **kwargs): 

884 if effective_duration is not None: 

885 kwargs['duration'] = effective_duration / \ 

886 self.factor_duration_to_effective( 

887 kwargs.get('peak_ratio', None)) 

888 

889 STF.__init__(self, **kwargs) 

890 

891 @property 

892 def centroid_ratio(self): 

893 ra = self.peak_ratio 

894 rb = 1.0 - ra 

895 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

896 

897 def centroid_time(self, tref): 

898 ca = self.centroid_ratio 

899 cb = 1.0 - ca 

900 if self.anchor <= 0.: 

901 return tref - ca * self.duration * self.anchor 

902 else: 

903 return tref - cb * self.duration * self.anchor 

904 

905 @property 

906 def effective_duration(self): 

907 return self.duration * self.factor_duration_to_effective( 

908 self.peak_ratio) 

909 

910 def tminmax_stf(self, tref): 

911 ca = self.centroid_ratio 

912 cb = 1.0 - ca 

913 if self.anchor <= 0.: 

914 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

915 tmax_stf = tmin_stf + self.duration 

916 else: 

917 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

918 tmin_stf = tmax_stf - self.duration 

919 

920 return tmin_stf, tmax_stf 

921 

922 def discretize_t(self, deltat, tref): 

923 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

924 

925 tmin = round(tmin_stf / deltat) * deltat 

926 tmax = round(tmax_stf / deltat) * deltat 

927 nt = int(round((tmax - tmin) / deltat)) + 1 

928 if nt > 1: 

929 t_edges = num.linspace( 

930 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

931 t = tmin_stf + self.duration * num.array( 

932 [0.0, self.peak_ratio, 1.0], dtype=float) 

933 f = num.array([0., 1., 0.], dtype=float) 

934 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

935 amplitudes /= num.sum(amplitudes) 

936 else: 

937 amplitudes = num.ones(1) 

938 

939 times = num.linspace(tmin, tmax, nt) 

940 return times, amplitudes 

941 

942 def base_key(self): 

943 return ( 

944 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

945 

946 

947class HalfSinusoidSTF(STF): 

948 

949 ''' 

950 Half sinusoid type source time function. 

951 

952 .. figure :: /static/stf-HalfSinusoidSTF.svg 

953 :width: 40% 

954 :align: center 

955 :alt: half-sinusouid source time function 

956 ''' 

957 

958 duration = Float.T( 

959 default=0.0, 

960 help='duration of the half-sinusoid (baseline)') 

961 

962 anchor = Float.T( 

963 default=0.0, 

964 help='anchor point with respect to source.time: (' 

965 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

966 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

967 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

968 

969 exponent = Int.T( 

970 default=1, 

971 help='set to 2 to use square of the half-period sinusoidal function.') 

972 

973 def __init__(self, effective_duration=None, **kwargs): 

974 if effective_duration is not None: 

975 kwargs['duration'] = effective_duration / \ 

976 self.factor_duration_to_effective( 

977 kwargs.get('exponent', 1)) 

978 

979 STF.__init__(self, **kwargs) 

980 

981 @classmethod 

982 def factor_duration_to_effective(cls, exponent): 

983 if exponent == 1: 

984 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

985 elif exponent == 2: 

986 return math.sqrt(math.pi**2 - 6) / math.pi 

987 else: 

988 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

989 

990 @property 

991 def effective_duration(self): 

992 return self.duration * self.factor_duration_to_effective(self.exponent) 

993 

994 def centroid_time(self, tref): 

995 return tref - 0.5 * self.duration * self.anchor 

996 

997 def discretize_t(self, deltat, tref): 

998 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

999 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1000 tmin = round(tmin_stf / deltat) * deltat 

1001 tmax = round(tmax_stf / deltat) * deltat 

1002 nt = int(round((tmax - tmin) / deltat)) + 1 

1003 if nt > 1: 

1004 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1005 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1006 

1007 if self.exponent == 1: 

1008 fint = -num.cos( 

1009 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1010 

1011 elif self.exponent == 2: 

1012 fint = (t_edges - tmin_stf) / self.duration \ 

1013 - 1.0 / (2.0 * math.pi) * num.sin( 

1014 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1015 else: 

1016 raise ValueError( 

1017 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1018 

1019 amplitudes = fint[1:] - fint[:-1] 

1020 amplitudes /= num.sum(amplitudes) 

1021 else: 

1022 amplitudes = num.ones(1) 

1023 

1024 times = num.linspace(tmin, tmax, nt) 

1025 return times, amplitudes 

1026 

1027 def base_key(self): 

1028 return (type(self).__name__, self.duration, self.anchor) 

1029 

1030 

1031class SmoothRampSTF(STF): 

1032 ''' 

1033 Smooth-ramp type source time function for near-field displacement. 

1034 Based on moment function of double-couple point source proposed by Bruestle 

1035 and Mueller (PEPI, 1983). 

1036 

1037 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1038 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1039 312-324. 

1040 

1041 .. figure :: /static/stf-SmoothRampSTF.svg 

1042 :width: 40% 

1043 :alt: smooth ramp source time function 

1044 ''' 

1045 duration = Float.T( 

1046 default=0.0, 

1047 help='duration of the ramp (baseline)') 

1048 

1049 rise_ratio = Float.T( 

1050 default=0.5, 

1051 help='fraction of time compared to duration, ' 

1052 'when the maximum amplitude is reached') 

1053 

1054 anchor = Float.T( 

1055 default=0.0, 

1056 help='anchor point with respect to source.time: (' 

1057 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1058 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1059 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1060 

1061 def discretize_t(self, deltat, tref): 

1062 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1063 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1064 tmin = round(tmin_stf / deltat) * deltat 

1065 tmax = round(tmax_stf / deltat) * deltat 

1066 D = round((tmax - tmin) / deltat) * deltat 

1067 nt = int(round(D / deltat)) + 1 

1068 times = num.linspace(tmin, tmax, nt) 

1069 if nt > 1: 

1070 rise_time = self.rise_ratio * self.duration 

1071 amplitudes = num.ones_like(times) 

1072 tp = tmin + rise_time 

1073 ii = num.where(times <= tp) 

1074 t_inc = times[ii] 

1075 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1076 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1077 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1078 

1079 amplitudes /= num.sum(amplitudes) 

1080 else: 

1081 amplitudes = num.ones(1) 

1082 

1083 return times, amplitudes 

1084 

1085 def base_key(self): 

1086 return (type(self).__name__, 

1087 self.duration, self.rise_ratio, self.anchor) 

1088 

1089 

1090class ResonatorSTF(STF): 

1091 ''' 

1092 Simple resonator like source time function. 

1093 

1094 .. math :: 

1095 

1096 f(t) = 0 for t < 0 

1097 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1098 

1099 

1100 .. figure :: /static/stf-SmoothRampSTF.svg 

1101 :width: 40% 

1102 :alt: smooth ramp source time function 

1103 

1104 ''' 

1105 

1106 duration = Float.T( 

1107 default=0.0, 

1108 help='decay time') 

1109 

1110 frequency = Float.T( 

1111 default=1.0, 

1112 help='resonance frequency') 

1113 

1114 def discretize_t(self, deltat, tref): 

1115 tmin_stf = tref 

1116 tmax_stf = tref + self.duration * 3 

1117 tmin = math.floor(tmin_stf / deltat) * deltat 

1118 tmax = math.ceil(tmax_stf / deltat) * deltat 

1119 times = util.arange2(tmin, tmax, deltat) 

1120 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1121 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1122 

1123 return times, amplitudes 

1124 

1125 def base_key(self): 

1126 return (type(self).__name__, 

1127 self.duration, self.frequency) 

1128 

1129 

1130class STFMode(StringChoice): 

1131 choices = ['pre', 'post'] 

1132 

1133 

1134class Source(Location, Cloneable): 

1135 ''' 

1136 Base class for all source models. 

1137 ''' 

1138 

1139 name = String.T(optional=True, default='') 

1140 

1141 time = Timestamp.T( 

1142 default=Timestamp.D('1970-01-01 00:00:00'), 

1143 help='source origin time.') 

1144 

1145 stf = STF.T( 

1146 optional=True, 

1147 help='source time function.') 

1148 

1149 stf_mode = STFMode.T( 

1150 default='post', 

1151 help='whether to apply source time function in pre or ' 

1152 'post-processing.') 

1153 

1154 def __init__(self, **kwargs): 

1155 Location.__init__(self, **kwargs) 

1156 

1157 def update(self, **kwargs): 

1158 ''' 

1159 Change some of the source models parameters. 

1160 

1161 Example:: 

1162 

1163 >>> from pyrocko import gf 

1164 >>> s = gf.DCSource() 

1165 >>> s.update(strike=66., dip=33.) 

1166 >>> print(s) 

1167 --- !pf.DCSource 

1168 depth: 0.0 

1169 time: 1970-01-01 00:00:00 

1170 magnitude: 6.0 

1171 strike: 66.0 

1172 dip: 33.0 

1173 rake: 0.0 

1174 

1175 ''' 

1176 

1177 for (k, v) in kwargs.items(): 

1178 self[k] = v 

1179 

1180 def grid(self, **variables): 

1181 ''' 

1182 Create grid of source model variations. 

1183 

1184 :returns: :py:class:`SourceGrid` instance. 

1185 

1186 Example:: 

1187 

1188 >>> from pyrocko import gf 

1189 >>> base = DCSource() 

1190 >>> R = gf.Range 

1191 >>> for s in base.grid(R(' 

1192 

1193 ''' 

1194 return SourceGrid(base=self, variables=variables) 

1195 

1196 def base_key(self): 

1197 ''' 

1198 Get key to decide about source discretization / GF stack sharing. 

1199 

1200 When two source models differ only in amplitude and origin time, the 

1201 discretization and the GF stacking can be done only once for a unit 

1202 amplitude and a zero origin time and the amplitude and origin times of 

1203 the seismograms can be applied during post-processing of the synthetic 

1204 seismogram. 

1205 

1206 For any derived parameterized source model, this method is called to 

1207 decide if discretization and stacking of the source should be shared. 

1208 When two source models return an equal vector of values discretization 

1209 is shared. 

1210 ''' 

1211 return (self.depth, self.lat, self.north_shift, 

1212 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1213 self.effective_stf_pre().base_key() 

1214 

1215 def get_factor(self): 

1216 ''' 

1217 Get the scaling factor to be applied during post-processing. 

1218 

1219 Discretization of the base seismogram is usually done for a unit 

1220 amplitude, because a common factor can be efficiently multiplied to 

1221 final seismograms. This eliminates to do repeat the stacking when 

1222 creating seismograms for a series of source models only differing in 

1223 amplitude. 

1224 

1225 This method should return the scaling factor to apply in the 

1226 post-processing (often this is simply the scalar moment of the source). 

1227 ''' 

1228 

1229 return 1.0 

1230 

1231 def effective_stf_pre(self): 

1232 ''' 

1233 Return the STF applied before stacking of the Green's functions. 

1234 

1235 This STF is used during discretization of the parameterized source 

1236 models, i.e. to produce a temporal distribution of point sources. 

1237 

1238 Handling of the STF before stacking of the GFs is less efficient but 

1239 allows to use different source time functions for different parts of 

1240 the source. 

1241 ''' 

1242 

1243 if self.stf is not None and self.stf_mode == 'pre': 

1244 return self.stf 

1245 else: 

1246 return g_unit_pulse 

1247 

1248 def effective_stf_post(self): 

1249 ''' 

1250 Return the STF applied after stacking of the Green's fuctions. 

1251 

1252 This STF is used in the post-processing of the synthetic seismograms. 

1253 

1254 Handling of the STF after stacking of the GFs is usually more efficient 

1255 but is only possible when a common STF is used for all subsources. 

1256 ''' 

1257 

1258 if self.stf is not None and self.stf_mode == 'post': 

1259 return self.stf 

1260 else: 

1261 return g_unit_pulse 

1262 

1263 def _dparams_base(self): 

1264 return dict(times=arr(self.time), 

1265 lat=self.lat, lon=self.lon, 

1266 north_shifts=arr(self.north_shift), 

1267 east_shifts=arr(self.east_shift), 

1268 depths=arr(self.depth)) 

1269 

1270 def _hash(self): 

1271 sha = sha1() 

1272 for k in self.base_key(): 

1273 sha.update(str(k).encode()) 

1274 return sha.hexdigest() 

1275 

1276 def _dparams_base_repeated(self, times): 

1277 if times is None: 

1278 return self._dparams_base() 

1279 

1280 nt = times.size 

1281 north_shifts = num.repeat(self.north_shift, nt) 

1282 east_shifts = num.repeat(self.east_shift, nt) 

1283 depths = num.repeat(self.depth, nt) 

1284 return dict(times=times, 

1285 lat=self.lat, lon=self.lon, 

1286 north_shifts=north_shifts, 

1287 east_shifts=east_shifts, 

1288 depths=depths) 

1289 

1290 def pyrocko_event(self, store=None, target=None, **kwargs): 

1291 duration = None 

1292 if self.stf: 

1293 duration = self.stf.effective_duration 

1294 

1295 return model.Event( 

1296 lat=self.lat, 

1297 lon=self.lon, 

1298 north_shift=self.north_shift, 

1299 east_shift=self.east_shift, 

1300 time=self.time, 

1301 name=self.name, 

1302 depth=self.depth, 

1303 duration=duration, 

1304 **kwargs) 

1305 

1306 def geometry(self, **kwargs): 

1307 raise NotImplementedError 

1308 

1309 def outline(self, cs='xyz'): 

1310 points = num.atleast_2d(num.zeros([1, 3])) 

1311 

1312 points[:, 0] += self.north_shift 

1313 points[:, 1] += self.east_shift 

1314 points[:, 2] += self.depth 

1315 if cs == 'xyz': 

1316 return points 

1317 elif cs == 'xy': 

1318 return points[:, :2] 

1319 elif cs in ('latlon', 'lonlat'): 

1320 latlon = ne_to_latlon( 

1321 self.lat, self.lon, points[:, 0], points[:, 1]) 

1322 

1323 latlon = num.array(latlon).T 

1324 if cs == 'latlon': 

1325 return latlon 

1326 else: 

1327 return latlon[:, ::-1] 

1328 

1329 @classmethod 

1330 def from_pyrocko_event(cls, ev, **kwargs): 

1331 if ev.depth is None: 

1332 raise ConversionError( 

1333 'Cannot convert event object to source object: ' 

1334 'no depth information available') 

1335 

1336 stf = None 

1337 if ev.duration is not None: 

1338 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1339 

1340 d = dict( 

1341 name=ev.name, 

1342 time=ev.time, 

1343 lat=ev.lat, 

1344 lon=ev.lon, 

1345 north_shift=ev.north_shift, 

1346 east_shift=ev.east_shift, 

1347 depth=ev.depth, 

1348 stf=stf) 

1349 d.update(kwargs) 

1350 return cls(**d) 

1351 

1352 def get_magnitude(self): 

1353 raise NotImplementedError( 

1354 '%s does not implement get_magnitude()' 

1355 % self.__class__.__name__) 

1356 

1357 

1358class SourceWithMagnitude(Source): 

1359 ''' 

1360 Base class for sources containing a moment magnitude. 

1361 ''' 

1362 

1363 magnitude = Float.T( 

1364 default=6.0, 

1365 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1366 

1367 def __init__(self, **kwargs): 

1368 if 'moment' in kwargs: 

1369 mom = kwargs.pop('moment') 

1370 if 'magnitude' not in kwargs: 

1371 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1372 

1373 Source.__init__(self, **kwargs) 

1374 

1375 @property 

1376 def moment(self): 

1377 return float(pmt.magnitude_to_moment(self.magnitude)) 

1378 

1379 @moment.setter 

1380 def moment(self, value): 

1381 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1382 

1383 def pyrocko_event(self, store=None, target=None, **kwargs): 

1384 return Source.pyrocko_event( 

1385 self, store, target, 

1386 magnitude=self.magnitude, 

1387 **kwargs) 

1388 

1389 @classmethod 

1390 def from_pyrocko_event(cls, ev, **kwargs): 

1391 d = {} 

1392 if ev.magnitude: 

1393 d.update(magnitude=ev.magnitude) 

1394 

1395 d.update(kwargs) 

1396 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1397 

1398 def get_magnitude(self): 

1399 return self.magnitude 

1400 

1401 

1402class DerivedMagnitudeError(ValidationError): 

1403 pass 

1404 

1405 

1406class SourceWithDerivedMagnitude(Source): 

1407 

1408 class __T(Source.T): 

1409 

1410 def validate_extra(self, val): 

1411 Source.T.validate_extra(self, val) 

1412 val.check_conflicts() 

1413 

1414 def check_conflicts(self): 

1415 ''' 

1416 Check for parameter conflicts. 

1417 

1418 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1419 on conflicts. 

1420 ''' 

1421 pass 

1422 

1423 def get_magnitude(self, store=None, target=None): 

1424 raise DerivedMagnitudeError('No magnitude set.') 

1425 

1426 def get_moment(self, store=None, target=None): 

1427 return float(pmt.magnitude_to_moment( 

1428 self.get_magnitude(store, target))) 

1429 

1430 def pyrocko_moment_tensor(self, store=None, target=None): 

1431 raise NotImplementedError( 

1432 '%s does not implement pyrocko_moment_tensor()' 

1433 % self.__class__.__name__) 

1434 

1435 def pyrocko_event(self, store=None, target=None, **kwargs): 

1436 try: 

1437 mt = self.pyrocko_moment_tensor(store, target) 

1438 magnitude = self.get_magnitude() 

1439 except (DerivedMagnitudeError, NotImplementedError): 

1440 mt = None 

1441 magnitude = None 

1442 

1443 return Source.pyrocko_event( 

1444 self, store, target, 

1445 moment_tensor=mt, 

1446 magnitude=magnitude, 

1447 **kwargs) 

1448 

1449 

1450class ExplosionSource(SourceWithDerivedMagnitude): 

1451 ''' 

1452 An isotropic explosion point source. 

1453 ''' 

1454 

1455 magnitude = Float.T( 

1456 optional=True, 

1457 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1458 

1459 volume_change = Float.T( 

1460 optional=True, 

1461 help='volume change of the explosion/implosion or ' 

1462 'the contracting/extending magmatic source. [m^3]') 

1463 

1464 discretized_source_class = meta.DiscretizedExplosionSource 

1465 

1466 def __init__(self, **kwargs): 

1467 if 'moment' in kwargs: 

1468 mom = kwargs.pop('moment') 

1469 if 'magnitude' not in kwargs: 

1470 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1471 

1472 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1473 

1474 def base_key(self): 

1475 return SourceWithDerivedMagnitude.base_key(self) + \ 

1476 (self.volume_change,) 

1477 

1478 def check_conflicts(self): 

1479 if self.magnitude is not None and self.volume_change is not None: 

1480 raise DerivedMagnitudeError( 

1481 'Magnitude and volume_change are both defined.') 

1482 

1483 def get_magnitude(self, store=None, target=None): 

1484 self.check_conflicts() 

1485 

1486 if self.magnitude is not None: 

1487 return self.magnitude 

1488 

1489 elif self.volume_change is not None: 

1490 moment = self.volume_change * \ 

1491 self.get_moment_to_volume_change_ratio(store, target) 

1492 

1493 return float(pmt.moment_to_magnitude(abs(moment))) 

1494 else: 

1495 return float(pmt.moment_to_magnitude(1.0)) 

1496 

1497 def get_volume_change(self, store=None, target=None): 

1498 self.check_conflicts() 

1499 

1500 if self.volume_change is not None: 

1501 return self.volume_change 

1502 

1503 elif self.magnitude is not None: 

1504 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1505 return moment / self.get_moment_to_volume_change_ratio( 

1506 store, target) 

1507 

1508 else: 

1509 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1510 

1511 def get_moment_to_volume_change_ratio(self, store, target=None): 

1512 if store is None: 

1513 raise DerivedMagnitudeError( 

1514 'Need earth model to convert between volume change and ' 

1515 'magnitude.') 

1516 

1517 points = num.array( 

1518 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1519 

1520 interpolation = target.interpolation if target else 'multilinear' 

1521 try: 

1522 shear_moduli = store.config.get_shear_moduli( 

1523 self.lat, self.lon, 

1524 points=points, 

1525 interpolation=interpolation)[0] 

1526 

1527 bulk_moduli = store.config.get_bulk_moduli( 

1528 self.lat, self.lon, 

1529 points=points, 

1530 interpolation=interpolation)[0] 

1531 except meta.OutOfBounds: 

1532 raise DerivedMagnitudeError( 

1533 'Could not get shear modulus at source position.') 

1534 

1535 return float(2. * shear_moduli + bulk_moduli) 

1536 

1537 def get_factor(self): 

1538 return 1.0 

1539 

1540 def discretize_basesource(self, store, target=None): 

1541 times, amplitudes = self.effective_stf_pre().discretize_t( 

1542 store.config.deltat, self.time) 

1543 

1544 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1545 

1546 if self.volume_change is not None: 

1547 if self.volume_change < 0.: 

1548 amplitudes *= -1 

1549 

1550 return meta.DiscretizedExplosionSource( 

1551 m0s=amplitudes, 

1552 **self._dparams_base_repeated(times)) 

1553 

1554 def pyrocko_moment_tensor(self, store=None, target=None): 

1555 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1556 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1557 

1558 

1559class RectangularExplosionSource(ExplosionSource): 

1560 ''' 

1561 Rectangular or line explosion source. 

1562 ''' 

1563 

1564 discretized_source_class = meta.DiscretizedExplosionSource 

1565 

1566 strike = Float.T( 

1567 default=0.0, 

1568 help='strike direction in [deg], measured clockwise from north') 

1569 

1570 dip = Float.T( 

1571 default=90.0, 

1572 help='dip angle in [deg], measured downward from horizontal') 

1573 

1574 length = Float.T( 

1575 default=0., 

1576 help='length of rectangular source area [m]') 

1577 

1578 width = Float.T( 

1579 default=0., 

1580 help='width of rectangular source area [m]') 

1581 

1582 anchor = StringChoice.T( 

1583 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1584 'bottom_left', 'bottom_right'], 

1585 default='center', 

1586 optional=True, 

1587 help='Anchor point for positioning the plane, can be: top, center or' 

1588 'bottom and also top_left, top_right,bottom_left,' 

1589 'bottom_right, center_left and center right') 

1590 

1591 nucleation_x = Float.T( 

1592 optional=True, 

1593 help='horizontal position of rupture nucleation in normalized fault ' 

1594 'plane coordinates (-1 = left edge, +1 = right edge)') 

1595 

1596 nucleation_y = Float.T( 

1597 optional=True, 

1598 help='down-dip position of rupture nucleation in normalized fault ' 

1599 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1600 

1601 velocity = Float.T( 

1602 default=3500., 

1603 help='speed of explosion front [m/s]') 

1604 

1605 aggressive_oversampling = Bool.T( 

1606 default=False, 

1607 help='Aggressive oversampling for basesource discretization. ' 

1608 "When using 'multilinear' interpolation oversampling has" 

1609 ' practically no effect.') 

1610 

1611 def base_key(self): 

1612 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1613 self.width, self.nucleation_x, 

1614 self.nucleation_y, self.velocity, 

1615 self.anchor) 

1616 

1617 def discretize_basesource(self, store, target=None): 

1618 

1619 if self.nucleation_x is not None: 

1620 nucx = self.nucleation_x * 0.5 * self.length 

1621 else: 

1622 nucx = None 

1623 

1624 if self.nucleation_y is not None: 

1625 nucy = self.nucleation_y * 0.5 * self.width 

1626 else: 

1627 nucy = None 

1628 

1629 stf = self.effective_stf_pre() 

1630 

1631 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1632 store.config.deltas, store.config.deltat, 

1633 self.time, self.north_shift, self.east_shift, self.depth, 

1634 self.strike, self.dip, self.length, self.width, self.anchor, 

1635 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1636 

1637 amplitudes /= num.sum(amplitudes) 

1638 amplitudes *= self.get_moment(store, target) 

1639 

1640 return meta.DiscretizedExplosionSource( 

1641 lat=self.lat, 

1642 lon=self.lon, 

1643 times=times, 

1644 north_shifts=points[:, 0], 

1645 east_shifts=points[:, 1], 

1646 depths=points[:, 2], 

1647 m0s=amplitudes) 

1648 

1649 def outline(self, cs='xyz'): 

1650 points = outline_rect_source(self.strike, self.dip, self.length, 

1651 self.width, self.anchor) 

1652 

1653 points[:, 0] += self.north_shift 

1654 points[:, 1] += self.east_shift 

1655 points[:, 2] += self.depth 

1656 if cs == 'xyz': 

1657 return points 

1658 elif cs == 'xy': 

1659 return points[:, :2] 

1660 elif cs in ('latlon', 'lonlat'): 

1661 latlon = ne_to_latlon( 

1662 self.lat, self.lon, points[:, 0], points[:, 1]) 

1663 

1664 latlon = num.array(latlon).T 

1665 if cs == 'latlon': 

1666 return latlon 

1667 else: 

1668 return latlon[:, ::-1] 

1669 

1670 def get_nucleation_abs_coord(self, cs='xy'): 

1671 

1672 if self.nucleation_x is None: 

1673 return None, None 

1674 

1675 coords = from_plane_coords(self.strike, self.dip, self.length, 

1676 self.width, self.depth, self.nucleation_x, 

1677 self.nucleation_y, lat=self.lat, 

1678 lon=self.lon, north_shift=self.north_shift, 

1679 east_shift=self.east_shift, cs=cs) 

1680 return coords 

1681 

1682 

1683class DCSource(SourceWithMagnitude): 

1684 ''' 

1685 A double-couple point source. 

1686 ''' 

1687 

1688 strike = Float.T( 

1689 default=0.0, 

1690 help='strike direction in [deg], measured clockwise from north') 

1691 

1692 dip = Float.T( 

1693 default=90.0, 

1694 help='dip angle in [deg], measured downward from horizontal') 

1695 

1696 rake = Float.T( 

1697 default=0.0, 

1698 help='rake angle in [deg], ' 

1699 'measured counter-clockwise from right-horizontal ' 

1700 'in on-plane view') 

1701 

1702 discretized_source_class = meta.DiscretizedMTSource 

1703 

1704 def base_key(self): 

1705 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1706 

1707 def get_factor(self): 

1708 return float(pmt.magnitude_to_moment(self.magnitude)) 

1709 

1710 def discretize_basesource(self, store, target=None): 

1711 mot = pmt.MomentTensor( 

1712 strike=self.strike, dip=self.dip, rake=self.rake) 

1713 

1714 times, amplitudes = self.effective_stf_pre().discretize_t( 

1715 store.config.deltat, self.time) 

1716 return meta.DiscretizedMTSource( 

1717 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1718 **self._dparams_base_repeated(times)) 

1719 

1720 def pyrocko_moment_tensor(self, store=None, target=None): 

1721 return pmt.MomentTensor( 

1722 strike=self.strike, 

1723 dip=self.dip, 

1724 rake=self.rake, 

1725 scalar_moment=self.moment) 

1726 

1727 def pyrocko_event(self, store=None, target=None, **kwargs): 

1728 return SourceWithMagnitude.pyrocko_event( 

1729 self, store, target, 

1730 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1731 **kwargs) 

1732 

1733 @classmethod 

1734 def from_pyrocko_event(cls, ev, **kwargs): 

1735 d = {} 

1736 mt = ev.moment_tensor 

1737 if mt: 

1738 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1739 d.update( 

1740 strike=float(strike), 

1741 dip=float(dip), 

1742 rake=float(rake), 

1743 magnitude=float(mt.moment_magnitude())) 

1744 

1745 d.update(kwargs) 

1746 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1747 

1748 

1749class CLVDSource(SourceWithMagnitude): 

1750 ''' 

1751 A pure CLVD point source. 

1752 ''' 

1753 

1754 discretized_source_class = meta.DiscretizedMTSource 

1755 

1756 azimuth = Float.T( 

1757 default=0.0, 

1758 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1759 

1760 dip = Float.T( 

1761 default=90., 

1762 help='dip direction of largest dipole, downward from horizontal [deg]') 

1763 

1764 def base_key(self): 

1765 return Source.base_key(self) + (self.azimuth, self.dip) 

1766 

1767 def get_factor(self): 

1768 return float(pmt.magnitude_to_moment(self.magnitude)) 

1769 

1770 @property 

1771 def m6(self): 

1772 a = math.sqrt(4. / 3.) * self.get_factor() 

1773 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1774 rotmat1 = pmt.euler_to_matrix( 

1775 d2r * (self.dip - 90.), 

1776 d2r * (self.azimuth - 90.), 

1777 0.) 

1778 m = num.dot(rotmat1.T, num.dot(m, rotmat1)) 

1779 return pmt.to6(m) 

1780 

1781 @property 

1782 def m6_astuple(self): 

1783 return tuple(self.m6.tolist()) 

1784 

1785 def discretize_basesource(self, store, target=None): 

1786 factor = self.get_factor() 

1787 times, amplitudes = self.effective_stf_pre().discretize_t( 

1788 store.config.deltat, self.time) 

1789 return meta.DiscretizedMTSource( 

1790 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1791 **self._dparams_base_repeated(times)) 

1792 

1793 def pyrocko_moment_tensor(self, store=None, target=None): 

1794 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1795 

1796 def pyrocko_event(self, store=None, target=None, **kwargs): 

1797 mt = self.pyrocko_moment_tensor(store, target) 

1798 return Source.pyrocko_event( 

1799 self, store, target, 

1800 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1801 magnitude=float(mt.moment_magnitude()), 

1802 **kwargs) 

1803 

1804 

1805class VLVDSource(SourceWithDerivedMagnitude): 

1806 ''' 

1807 Volumetric linear vector dipole source. 

1808 

1809 This source is a parameterization for a restricted moment tensor point 

1810 source, useful to represent dyke or sill like inflation or deflation 

1811 sources. The restriction is such that the moment tensor is rotational 

1812 symmetric. It can be represented by a superposition of a linear vector 

1813 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1814 restricted moment tensor has 4 degrees of freedom: 2 independent 

1815 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1816 

1817 In this parameterization, the isotropic component is controlled by 

1818 ``volume_change``. To define the moment tensor, it must be converted to the 

1819 scalar moment of the the MT's isotropic component. For the conversion, the 

1820 shear modulus at the source's position must be known. This value is 

1821 extracted from the earth model defined in the GF store in use. 

1822 

1823 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1824 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1825 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1826 ''' 

1827 

1828 discretized_source_class = meta.DiscretizedMTSource 

1829 

1830 azimuth = Float.T( 

1831 default=0.0, 

1832 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1833 

1834 dip = Float.T( 

1835 default=90., 

1836 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1837 

1838 volume_change = Float.T( 

1839 default=0., 

1840 help='volume change of the inflation/deflation [m^3].') 

1841 

1842 clvd_moment = Float.T( 

1843 default=0., 

1844 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1845 'controls the sign of the CLVD (the sign of its largest ' 

1846 'eigenvalue).') 

1847 

1848 def get_moment_to_volume_change_ratio(self, store, target): 

1849 if store is None or target is None: 

1850 raise DerivedMagnitudeError( 

1851 'Need earth model to convert between volume change and ' 

1852 'magnitude.') 

1853 

1854 points = num.array( 

1855 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1856 

1857 try: 

1858 shear_moduli = store.config.get_shear_moduli( 

1859 self.lat, self.lon, 

1860 points=points, 

1861 interpolation=target.interpolation)[0] 

1862 

1863 bulk_moduli = store.config.get_bulk_moduli( 

1864 self.lat, self.lon, 

1865 points=points, 

1866 interpolation=target.interpolation)[0] 

1867 except meta.OutOfBounds: 

1868 raise DerivedMagnitudeError( 

1869 'Could not get shear modulus at source position.') 

1870 

1871 return float(2. * shear_moduli + bulk_moduli) 

1872 

1873 def base_key(self): 

1874 return Source.base_key(self) + \ 

1875 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1876 

1877 def get_magnitude(self, store=None, target=None): 

1878 mt = self.pyrocko_moment_tensor(store, target) 

1879 return float(pmt.moment_to_magnitude(mt.moment)) 

1880 

1881 def get_m6(self, store, target): 

1882 a = math.sqrt(4. / 3.) * self.clvd_moment 

1883 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1884 

1885 rotmat1 = pmt.euler_to_matrix( 

1886 d2r * (self.dip - 90.), 

1887 d2r * (self.azimuth - 90.), 

1888 0.) 

1889 m_clvd = num.dot(rotmat1.T, num.dot(m_clvd, rotmat1)) 

1890 

1891 m_iso = self.volume_change * \ 

1892 self.get_moment_to_volume_change_ratio(store, target) 

1893 

1894 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1895 0., 0.,) * math.sqrt(2. / 3) 

1896 

1897 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1898 return m 

1899 

1900 def get_moment(self, store=None, target=None): 

1901 return float(pmt.magnitude_to_moment( 

1902 self.get_magnitude(store, target))) 

1903 

1904 def get_m6_astuple(self, store, target): 

1905 m6 = self.get_m6(store, target) 

1906 return tuple(m6.tolist()) 

1907 

1908 def discretize_basesource(self, store, target=None): 

1909 times, amplitudes = self.effective_stf_pre().discretize_t( 

1910 store.config.deltat, self.time) 

1911 

1912 m6 = self.get_m6(store, target) 

1913 m6 *= amplitudes / self.get_factor() 

1914 

1915 return meta.DiscretizedMTSource( 

1916 m6s=m6[num.newaxis, :], 

1917 **self._dparams_base_repeated(times)) 

1918 

1919 def pyrocko_moment_tensor(self, store=None, target=None): 

1920 m6_astuple = self.get_m6_astuple(store, target) 

1921 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

1922 

1923 

1924class MTSource(Source): 

1925 ''' 

1926 A moment tensor point source. 

1927 ''' 

1928 

1929 discretized_source_class = meta.DiscretizedMTSource 

1930 

1931 mnn = Float.T( 

1932 default=1., 

1933 help='north-north component of moment tensor in [Nm]') 

1934 

1935 mee = Float.T( 

1936 default=1., 

1937 help='east-east component of moment tensor in [Nm]') 

1938 

1939 mdd = Float.T( 

1940 default=1., 

1941 help='down-down component of moment tensor in [Nm]') 

1942 

1943 mne = Float.T( 

1944 default=0., 

1945 help='north-east component of moment tensor in [Nm]') 

1946 

1947 mnd = Float.T( 

1948 default=0., 

1949 help='north-down component of moment tensor in [Nm]') 

1950 

1951 med = Float.T( 

1952 default=0., 

1953 help='east-down component of moment tensor in [Nm]') 

1954 

1955 def __init__(self, **kwargs): 

1956 if 'm6' in kwargs: 

1957 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

1958 kwargs.pop('m6')): 

1959 kwargs[k] = float(v) 

1960 

1961 Source.__init__(self, **kwargs) 

1962 

1963 @property 

1964 def m6(self): 

1965 return num.array(self.m6_astuple) 

1966 

1967 @property 

1968 def m6_astuple(self): 

1969 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

1970 

1971 @m6.setter 

1972 def m6(self, value): 

1973 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

1974 

1975 def base_key(self): 

1976 return Source.base_key(self) + self.m6_astuple 

1977 

1978 def discretize_basesource(self, store, target=None): 

1979 times, amplitudes = self.effective_stf_pre().discretize_t( 

1980 store.config.deltat, self.time) 

1981 return meta.DiscretizedMTSource( 

1982 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

1983 **self._dparams_base_repeated(times)) 

1984 

1985 def get_magnitude(self, store=None, target=None): 

1986 m6 = self.m6 

1987 return pmt.moment_to_magnitude( 

1988 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

1989 math.sqrt(2.)) 

1990 

1991 def pyrocko_moment_tensor(self, store=None, target=None): 

1992 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1993 

1994 def pyrocko_event(self, store=None, target=None, **kwargs): 

1995 mt = self.pyrocko_moment_tensor(store, target) 

1996 return Source.pyrocko_event( 

1997 self, store, target, 

1998 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1999 magnitude=float(mt.moment_magnitude()), 

2000 **kwargs) 

2001 

2002 @classmethod 

2003 def from_pyrocko_event(cls, ev, **kwargs): 

2004 d = {} 

2005 mt = ev.moment_tensor 

2006 if mt: 

2007 d.update(m6=tuple(map(float, mt.m6()))) 

2008 else: 

2009 if ev.magnitude is not None: 

2010 mom = pmt.magnitude_to_moment(ev.magnitude) 

2011 v = math.sqrt(2. / 3.) * mom 

2012 d.update(m6=(v, v, v, 0., 0., 0.)) 

2013 

2014 d.update(kwargs) 

2015 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2016 

2017 

2018map_anchor = { 

2019 'center': (0.0, 0.0), 

2020 'center_left': (-1.0, 0.0), 

2021 'center_right': (1.0, 0.0), 

2022 'top': (0.0, -1.0), 

2023 'top_left': (-1.0, -1.0), 

2024 'top_right': (1.0, -1.0), 

2025 'bottom': (0.0, 1.0), 

2026 'bottom_left': (-1.0, 1.0), 

2027 'bottom_right': (1.0, 1.0)} 

2028 

2029 

2030class RectangularSource(SourceWithDerivedMagnitude): 

2031 ''' 

2032 Classical Haskell source model modified for bilateral rupture. 

2033 ''' 

2034 

2035 discretized_source_class = meta.DiscretizedMTSource 

2036 

2037 magnitude = Float.T( 

2038 optional=True, 

2039 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2040 

2041 strike = Float.T( 

2042 default=0.0, 

2043 help='strike direction in [deg], measured clockwise from north') 

2044 

2045 dip = Float.T( 

2046 default=90.0, 

2047 help='dip angle in [deg], measured downward from horizontal') 

2048 

2049 rake = Float.T( 

2050 default=0.0, 

2051 help='rake angle in [deg], ' 

2052 'measured counter-clockwise from right-horizontal ' 

2053 'in on-plane view') 

2054 

2055 length = Float.T( 

2056 default=0., 

2057 help='length of rectangular source area [m]') 

2058 

2059 width = Float.T( 

2060 default=0., 

2061 help='width of rectangular source area [m]') 

2062 

2063 anchor = StringChoice.T( 

2064 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2065 'bottom_left', 'bottom_right'], 

2066 default='center', 

2067 optional=True, 

2068 help='Anchor point for positioning the plane, can be: ``top, center ' 

2069 'bottom, top_left, top_right,bottom_left,' 

2070 'bottom_right, center_left, center right``.') 

2071 

2072 nucleation_x = Float.T( 

2073 optional=True, 

2074 help='horizontal position of rupture nucleation in normalized fault ' 

2075 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2076 

2077 nucleation_y = Float.T( 

2078 optional=True, 

2079 help='down-dip position of rupture nucleation in normalized fault ' 

2080 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2081 

2082 velocity = Float.T( 

2083 default=3500., 

2084 help='speed of rupture front [m/s]') 

2085 

2086 slip = Float.T( 

2087 optional=True, 

2088 help='Slip on the rectangular source area [m]') 

2089 

2090 opening_fraction = Float.T( 

2091 default=0., 

2092 help='Determines fraction of slip related to opening. ' 

2093 '(``-1``: pure tensile closing, ' 

2094 '``0``: pure shear, ' 

2095 '``1``: pure tensile opening)') 

2096 

2097 decimation_factor = Int.T( 

2098 optional=True, 

2099 default=1, 

2100 help='Sub-source decimation factor, a larger decimation will' 

2101 ' make the result inaccurate but shorten the necessary' 

2102 ' computation time (use for testing puposes only).') 

2103 

2104 aggressive_oversampling = Bool.T( 

2105 default=False, 

2106 help='Aggressive oversampling for basesource discretization. ' 

2107 "When using 'multilinear' interpolation oversampling has" 

2108 ' practically no effect.') 

2109 

2110 def __init__(self, **kwargs): 

2111 if 'moment' in kwargs: 

2112 mom = kwargs.pop('moment') 

2113 if 'magnitude' not in kwargs: 

2114 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2115 

2116 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2117 

2118 def base_key(self): 

2119 return SourceWithDerivedMagnitude.base_key(self) + ( 

2120 self.magnitude, 

2121 self.slip, 

2122 self.strike, 

2123 self.dip, 

2124 self.rake, 

2125 self.length, 

2126 self.width, 

2127 self.nucleation_x, 

2128 self.nucleation_y, 

2129 self.velocity, 

2130 self.decimation_factor, 

2131 self.anchor) 

2132 

2133 def check_conflicts(self): 

2134 if self.magnitude is not None and self.slip is not None: 

2135 raise DerivedMagnitudeError( 

2136 'Magnitude and slip are both defined.') 

2137 

2138 def get_magnitude(self, store=None, target=None): 

2139 self.check_conflicts() 

2140 if self.magnitude is not None: 

2141 return self.magnitude 

2142 

2143 elif self.slip is not None: 

2144 if None in (store, target): 

2145 raise DerivedMagnitudeError( 

2146 'Magnitude for a rectangular source with slip defined ' 

2147 'can only be derived when earth model and target ' 

2148 'interpolation method are available.') 

2149 

2150 amplitudes = self._discretize(store, target)[2] 

2151 if amplitudes.ndim == 2: 

2152 # CLVD component has no net moment, leave out 

2153 return float(pmt.moment_to_magnitude( 

2154 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2155 else: 

2156 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2157 

2158 else: 

2159 return float(pmt.moment_to_magnitude(1.0)) 

2160 

2161 def get_factor(self): 

2162 return 1.0 

2163 

2164 def get_slip_tensile(self): 

2165 return self.slip * self.opening_fraction 

2166 

2167 def get_slip_shear(self): 

2168 return self.slip - abs(self.get_slip_tensile) 

2169 

2170 def _discretize(self, store, target): 

2171 if self.nucleation_x is not None: 

2172 nucx = self.nucleation_x * 0.5 * self.length 

2173 else: 

2174 nucx = None 

2175 

2176 if self.nucleation_y is not None: 

2177 nucy = self.nucleation_y * 0.5 * self.width 

2178 else: 

2179 nucy = None 

2180 

2181 stf = self.effective_stf_pre() 

2182 

2183 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2184 store.config.deltas, store.config.deltat, 

2185 self.time, self.north_shift, self.east_shift, self.depth, 

2186 self.strike, self.dip, self.length, self.width, self.anchor, 

2187 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2188 decimation_factor=self.decimation_factor, 

2189 aggressive_oversampling=self.aggressive_oversampling) 

2190 

2191 if self.slip is not None: 

2192 if target is not None: 

2193 interpolation = target.interpolation 

2194 else: 

2195 interpolation = 'nearest_neighbor' 

2196 logger.warning( 

2197 'no target information available, will use ' 

2198 '"nearest_neighbor" interpolation when extracting shear ' 

2199 'modulus from earth model') 

2200 

2201 shear_moduli = store.config.get_shear_moduli( 

2202 self.lat, self.lon, 

2203 points=points, 

2204 interpolation=interpolation) 

2205 

2206 tensile_slip = self.get_slip_tensile() 

2207 shear_slip = self.slip - abs(tensile_slip) 

2208 

2209 amplitudes_total = [shear_moduli * shear_slip] 

2210 if tensile_slip != 0: 

2211 bulk_moduli = store.config.get_bulk_moduli( 

2212 self.lat, self.lon, 

2213 points=points, 

2214 interpolation=interpolation) 

2215 

2216 tensile_iso = bulk_moduli * tensile_slip 

2217 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2218 

2219 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2220 

2221 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2222 amplitudes * dl * dw 

2223 

2224 else: 

2225 # normalization to retain total moment 

2226 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2227 moment = self.get_moment(store, target) 

2228 

2229 amplitudes_total = [ 

2230 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2231 if self.opening_fraction != 0.: 

2232 amplitudes_total.append( 

2233 amplitudes_norm * self.opening_fraction * moment) 

2234 

2235 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2236 

2237 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2238 

2239 def discretize_basesource(self, store, target=None): 

2240 

2241 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2242 store, target) 

2243 

2244 mot = pmt.MomentTensor( 

2245 strike=self.strike, dip=self.dip, rake=self.rake) 

2246 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2247 

2248 if amplitudes.ndim == 1: 

2249 m6s[:, :] *= amplitudes[:, num.newaxis] 

2250 elif amplitudes.ndim == 2: 

2251 # shear MT components 

2252 rotmat1 = pmt.euler_to_matrix( 

2253 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2254 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2255 

2256 if amplitudes.shape[0] == 2: 

2257 # tensile MT components - moment/magnitude input 

2258 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2259 rot_tensile = pmt.to6( 

2260 num.dot(rotmat1.T, num.dot(tensile, rotmat1))) 

2261 

2262 m6s_tensile = rot_tensile[ 

2263 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2264 m6s += m6s_tensile 

2265 

2266 elif amplitudes.shape[0] == 3: 

2267 # tensile MT components - slip input 

2268 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2269 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2270 

2271 rot_iso = pmt.to6( 

2272 num.dot(rotmat1.T, num.dot(iso, rotmat1))) 

2273 rot_clvd = pmt.to6( 

2274 num.dot(rotmat1.T, num.dot(clvd, rotmat1))) 

2275 

2276 m6s_iso = rot_iso[ 

2277 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2278 m6s_clvd = rot_clvd[ 

2279 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2280 m6s += m6s_iso + m6s_clvd 

2281 else: 

2282 raise ValueError('Unknwown amplitudes shape!') 

2283 else: 

2284 raise ValueError( 

2285 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2286 

2287 ds = meta.DiscretizedMTSource( 

2288 lat=self.lat, 

2289 lon=self.lon, 

2290 times=times, 

2291 north_shifts=points[:, 0], 

2292 east_shifts=points[:, 1], 

2293 depths=points[:, 2], 

2294 m6s=m6s, 

2295 dl=dl, 

2296 dw=dw, 

2297 nl=nl, 

2298 nw=nw) 

2299 

2300 return ds 

2301 

2302 def xy_to_coord(self, x, y, cs='xyz'): 

2303 ln, wd = self.length, self.width 

2304 strike, dip = self.strike, self.dip 

2305 

2306 def array_check(variable): 

2307 if not isinstance(variable, num.ndarray): 

2308 return num.array(variable) 

2309 else: 

2310 return variable 

2311 

2312 x, y = array_check(x), array_check(y) 

2313 

2314 if x.shape[0] != y.shape[0]: 

2315 raise ValueError('Shapes of x and y mismatch') 

2316 

2317 x, y = x * 0.5 * ln, y * 0.5 * wd 

2318 

2319 points = num.hstack(( 

2320 x.reshape(-1, 1), y.reshape(-1, 1), num.zeros((x.shape[0], 1)))) 

2321 

2322 anch_x, anch_y = map_anchor[self.anchor] 

2323 points[:, 0] -= anch_x * 0.5 * ln 

2324 points[:, 1] -= anch_y * 0.5 * wd 

2325 

2326 rotmat = num.asarray( 

2327 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

2328 

2329 points_rot = num.dot(rotmat.T, points.T).T 

2330 

2331 points_rot[:, 0] += self.north_shift 

2332 points_rot[:, 1] += self.east_shift 

2333 points_rot[:, 2] += self.depth 

2334 

2335 if cs == 'xyz': 

2336 return points_rot 

2337 elif cs == 'xy': 

2338 return points_rot[:, :2] 

2339 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2340 latlon = ne_to_latlon( 

2341 self.lat, self.lon, points_rot[:, 0], points_rot[:, 1]) 

2342 latlon = num.array(latlon).T 

2343 if cs == 'latlon': 

2344 return latlon 

2345 elif cs == 'lonlat': 

2346 return latlon[:, ::-1] 

2347 else: 

2348 return num.concatenate( 

2349 (latlon, points_rot[:, 2].reshape((len(points_rot), 1))), 

2350 axis=1) 

2351 

2352 def outline(self, cs='xyz'): 

2353 x = num.array([-1., 1., 1., -1., -1.]) 

2354 y = num.array([-1., -1., 1., 1., -1.]) 

2355 

2356 return self.xy_to_coord(x, y, cs=cs) 

2357 

2358 def points_on_source(self, cs='xyz', **kwargs): 

2359 

2360 points = points_on_rect_source( 

2361 self.strike, self.dip, self.length, self.width, 

2362 self.anchor, **kwargs) 

2363 

2364 points[:, 0] += self.north_shift 

2365 points[:, 1] += self.east_shift 

2366 points[:, 2] += self.depth 

2367 if cs == 'xyz': 

2368 return points 

2369 elif cs == 'xy': 

2370 return points[:, :2] 

2371 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2372 latlon = ne_to_latlon( 

2373 self.lat, self.lon, points[:, 0], points[:, 1]) 

2374 

2375 latlon = num.array(latlon).T 

2376 if cs == 'latlon': 

2377 return latlon 

2378 elif cs == 'lonlat': 

2379 return latlon[:, ::-1] 

2380 else: 

2381 return num.concatenate( 

2382 (latlon, points[:, 2].reshape((len(points), 1))), 

2383 axis=1) 

2384 

2385 def geometry(self, *args, **kwargs): 

2386 from pyrocko.model import Geometry 

2387 

2388 ds = self.discretize_basesource(*args, **kwargs) 

2389 nx, ny = ds.nl, ds.nw 

2390 

2391 def patch_outlines_xy(nx, ny): 

2392 points = num.zeros((nx * ny, 2)) 

2393 points[:, 0] = num.tile(num.linspace(-1., 1., nx), ny) 

2394 points[:, 1] = num.repeat(num.linspace(-1., 1., ny), nx) 

2395 

2396 return points 

2397 

2398 points_ds = patch_outlines_xy(nx + 1, ny + 1) 

2399 npoints = (nx + 1) * (ny + 1) 

2400 

2401 vertices = num.hstack(( 

2402 num.ones((npoints, 1)) * self.lat, 

2403 num.ones((npoints, 1)) * self.lon, 

2404 self.xy_to_coord(points_ds[:, 0], points_ds[:, 1], cs='xyz'))) 

2405 

2406 faces = num.array([[ 

2407 iy * (nx + 1) + ix, 

2408 iy * (nx + 1) + ix + 1, 

2409 (iy + 1) * (nx + 1) + ix + 1, 

2410 (iy + 1) * (nx + 1) + ix, 

2411 iy * (nx + 1) + ix] 

2412 for iy in range(ny) for ix in range(nx)]) 

2413 

2414 xyz = self.outline('xyz') 

2415 latlon = num.ones((5, 2)) * num.array([self.lat, self.lon]) 

2416 patchverts = num.hstack((latlon, xyz)) 

2417 

2418 geom = Geometry() 

2419 geom.setup(vertices, faces) 

2420 geom.set_outlines([patchverts]) 

2421 

2422 if self.stf: 

2423 geom.times = num.unique(ds.times) 

2424 

2425 if self.nucleation_x is not None and self.nucleation_y is not None: 

2426 geom.add_property('t_arrival', ds.times) 

2427 

2428 geom.add_property( 

2429 'moment', ds.moments().reshape(ds.nl*ds.nw, -1)) 

2430 

2431 geom.add_property( 

2432 'slip', num.ones_like(ds.times) * self.slip) 

2433 

2434 return geom 

2435 

2436 def get_nucleation_abs_coord(self, cs='xy'): 

2437 

2438 if self.nucleation_x is None: 

2439 return None, None 

2440 

2441 coords = from_plane_coords(self.strike, self.dip, self.length, 

2442 self.width, self.depth, self.nucleation_x, 

2443 self.nucleation_y, lat=self.lat, 

2444 lon=self.lon, north_shift=self.north_shift, 

2445 east_shift=self.east_shift, cs=cs) 

2446 return coords 

2447 

2448 def pyrocko_moment_tensor(self, store=None, target=None): 

2449 return pmt.MomentTensor( 

2450 strike=self.strike, 

2451 dip=self.dip, 

2452 rake=self.rake, 

2453 scalar_moment=self.get_moment(store, target)) 

2454 

2455 def pyrocko_event(self, store=None, target=None, **kwargs): 

2456 return SourceWithDerivedMagnitude.pyrocko_event( 

2457 self, store, target, 

2458 **kwargs) 

2459 

2460 @classmethod 

2461 def from_pyrocko_event(cls, ev, **kwargs): 

2462 d = {} 

2463 mt = ev.moment_tensor 

2464 if mt: 

2465 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2466 d.update( 

2467 strike=float(strike), 

2468 dip=float(dip), 

2469 rake=float(rake), 

2470 magnitude=float(mt.moment_magnitude())) 

2471 

2472 d.update(kwargs) 

2473 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2474 

2475 

2476class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2477 ''' 

2478 Combined Eikonal and Okada quasi-dynamic rupture model. 

2479 

2480 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2481 Note: attribute `stf` is not used so far, but kept for future applications. 

2482 ''' 

2483 

2484 discretized_source_class = meta.DiscretizedMTSource 

2485 

2486 strike = Float.T( 

2487 default=0.0, 

2488 help='Strike direction in [deg], measured clockwise from north.') 

2489 

2490 dip = Float.T( 

2491 default=0.0, 

2492 help='Dip angle in [deg], measured downward from horizontal.') 

2493 

2494 length = Float.T( 

2495 default=10. * km, 

2496 help='Length of rectangular source area in [m].') 

2497 

2498 width = Float.T( 

2499 default=5. * km, 

2500 help='Width of rectangular source area in [m].') 

2501 

2502 anchor = StringChoice.T( 

2503 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2504 'bottom_left', 'bottom_right'], 

2505 default='center', 

2506 optional=True, 

2507 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2508 'bottom, top_left, top_right, bottom_left, ' 

2509 'bottom_right, center_left, center_right``.') 

2510 

2511 nucleation_x__ = Array.T( 

2512 default=num.array([0.]), 

2513 dtype=num.float64, 

2514 serialize_as='list', 

2515 help='Horizontal position of rupture nucleation in normalized fault ' 

2516 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2517 

2518 nucleation_y__ = Array.T( 

2519 default=num.array([0.]), 

2520 dtype=num.float64, 

2521 serialize_as='list', 

2522 help='Down-dip position of rupture nucleation in normalized fault ' 

2523 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2524 

2525 nucleation_time__ = Array.T( 

2526 optional=True, 

2527 help='Time in [s] after origin, when nucleation points defined by ' 

2528 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2529 dtype=num.float64, 

2530 serialize_as='list') 

2531 

2532 gamma = Float.T( 

2533 default=0.8, 

2534 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2535 r':math:`v_r = \gamma * v_s`.') 

2536 

2537 nx = Int.T( 

2538 default=2, 

2539 help='Number of discrete source patches in x direction (along ' 

2540 'strike).') 

2541 

2542 ny = Int.T( 

2543 default=2, 

2544 help='Number of discrete source patches in y direction (down dip).') 

2545 

2546 slip = Float.T( 

2547 optional=True, 

2548 help='Maximum slip of the rectangular source [m]. ' 

2549 'Setting the slip the tractions/stress field ' 

2550 'will be normalized to accomodate the desired maximum slip.') 

2551 

2552 rake = Float.T( 

2553 optional=True, 

2554 help='Rake angle in [deg], ' 

2555 'measured counter-clockwise from right-horizontal ' 

2556 'in on-plane view. Rake is translated into homogenous tractions ' 

2557 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2558 'with tractions parameter.') 

2559 

2560 patches = List.T( 

2561 OkadaSource.T(), 

2562 optional=True, 

2563 help='List of all boundary elements/sub faults/fault patches.') 

2564 

2565 patch_mask__ = Array.T( 

2566 dtype=bool, 

2567 serialize_as='list', 

2568 shape=(None,), 

2569 optional=True, 

2570 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2571 'leaves the patch in the calculation, False excludes the patch.') 

2572 

2573 tractions = TractionField.T( 

2574 optional=True, 

2575 help='Traction field the rupture plane is exposed to. See the' 

2576 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2577 'If ``tractions=None`` and ``rake`` is given' 

2578 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2579 ' be used.') 

2580 

2581 coef_mat = Array.T( 

2582 optional=True, 

2583 help='Coefficient matrix linking traction and dislocation field.', 

2584 dtype=num.float64, 

2585 shape=(None, None)) 

2586 

2587 eikonal_decimation = Int.T( 

2588 optional=True, 

2589 default=1, 

2590 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2591 ' increase the accuracy of rupture front calculation but' 

2592 ' increases also the computation time.') 

2593 

2594 decimation_factor = Int.T( 

2595 optional=True, 

2596 default=1, 

2597 help='Sub-source decimation factor, a larger decimation will' 

2598 ' make the result inaccurate but shorten the necessary' 

2599 ' computation time (use for testing puposes only).') 

2600 

2601 nthreads = Int.T( 

2602 optional=True, 

2603 default=1, 

2604 help='Number of threads for Okada forward modelling, ' 

2605 'matrix inversion and calculation of point subsources. ' 

2606 'Note: for small/medium matrices 1 thread is most efficient.') 

2607 

2608 pure_shear = Bool.T( 

2609 optional=True, 

2610 default=False, 

2611 help='Calculate only shear tractions and omit tensile tractions.') 

2612 

2613 smooth_rupture = Bool.T( 

2614 default=True, 

2615 help='Smooth the tractions by weighting partially ruptured' 

2616 ' fault patches.') 

2617 

2618 aggressive_oversampling = Bool.T( 

2619 default=False, 

2620 help='Aggressive oversampling for basesource discretization. ' 

2621 "When using 'multilinear' interpolation oversampling has" 

2622 ' practically no effect.') 

2623 

2624 def __init__(self, **kwargs): 

2625 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2626 self._interpolators = {} 

2627 self.check_conflicts() 

2628 

2629 @property 

2630 def nucleation_x(self): 

2631 return self.nucleation_x__ 

2632 

2633 @nucleation_x.setter 

2634 def nucleation_x(self, nucleation_x): 

2635 if isinstance(nucleation_x, list): 

2636 nucleation_x = num.array(nucleation_x) 

2637 

2638 elif not isinstance( 

2639 nucleation_x, num.ndarray) and nucleation_x is not None: 

2640 

2641 nucleation_x = num.array([nucleation_x]) 

2642 self.nucleation_x__ = nucleation_x 

2643 

2644 @property 

2645 def nucleation_y(self): 

2646 return self.nucleation_y__ 

2647 

2648 @nucleation_y.setter 

2649 def nucleation_y(self, nucleation_y): 

2650 if isinstance(nucleation_y, list): 

2651 nucleation_y = num.array(nucleation_y) 

2652 

2653 elif not isinstance(nucleation_y, num.ndarray) \ 

2654 and nucleation_y is not None: 

2655 nucleation_y = num.array([nucleation_y]) 

2656 

2657 self.nucleation_y__ = nucleation_y 

2658 

2659 @property 

2660 def nucleation(self): 

2661 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2662 

2663 if (nucl_x is None) or (nucl_y is None): 

2664 return None 

2665 

2666 assert nucl_x.shape[0] == nucl_y.shape[0] 

2667 

2668 return num.concatenate( 

2669 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2670 

2671 @nucleation.setter 

2672 def nucleation(self, nucleation): 

2673 if isinstance(nucleation, list): 

2674 nucleation = num.array(nucleation) 

2675 

2676 assert nucleation.shape[1] == 2 

2677 

2678 self.nucleation_x = nucleation[:, 0] 

2679 self.nucleation_y = nucleation[:, 1] 

2680 

2681 @property 

2682 def nucleation_time(self): 

2683 return self.nucleation_time__ 

2684 

2685 @nucleation_time.setter 

2686 def nucleation_time(self, nucleation_time): 

2687 if not isinstance(nucleation_time, num.ndarray) \ 

2688 and nucleation_time is not None: 

2689 nucleation_time = num.array([nucleation_time]) 

2690 

2691 self.nucleation_time__ = nucleation_time 

2692 

2693 @property 

2694 def patch_mask(self): 

2695 if (self.patch_mask__ is not None and 

2696 self.patch_mask__.shape == (self.nx * self.ny,)): 

2697 

2698 return self.patch_mask__ 

2699 else: 

2700 return num.ones(self.nx * self.ny, dtype=bool) 

2701 

2702 @patch_mask.setter 

2703 def patch_mask(self, patch_mask): 

2704 if isinstance(patch_mask, list): 

2705 patch_mask = num.array(patch_mask) 

2706 

2707 self.patch_mask__ = patch_mask 

2708 

2709 def get_tractions(self): 

2710 ''' 

2711 Get source traction vectors. 

2712 

2713 If :py:attr:`rake` is given, unit length directed traction vectors 

2714 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2715 else the given :py:attr:`tractions` are used. 

2716 

2717 :returns: 

2718 Traction vectors per patch. 

2719 :rtype: 

2720 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2721 ''' 

2722 

2723 if self.rake is not None: 

2724 if num.isnan(self.rake): 

2725 raise ValueError('Rake must be a real number, not NaN.') 

2726 

2727 logger.warning( 

2728 'Tractions are derived based on the given source rake.') 

2729 tractions = DirectedTractions(rake=self.rake) 

2730 else: 

2731 tractions = self.tractions 

2732 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2733 

2734 def base_key(self): 

2735 return SourceWithDerivedMagnitude.base_key(self) + ( 

2736 self.slip, 

2737 self.strike, 

2738 self.dip, 

2739 self.rake, 

2740 self.length, 

2741 self.width, 

2742 float(self.nucleation_x.mean()), 

2743 float(self.nucleation_y.mean()), 

2744 self.decimation_factor, 

2745 self.anchor, 

2746 self.pure_shear, 

2747 self.gamma, 

2748 tuple(self.patch_mask)) 

2749 

2750 def check_conflicts(self): 

2751 if self.tractions and self.rake: 

2752 raise AttributeError( 

2753 'Tractions and rake are mutually exclusive.') 

2754 if self.tractions is None and self.rake is None: 

2755 self.rake = 0. 

2756 

2757 def get_magnitude(self, store=None, target=None): 

2758 self.check_conflicts() 

2759 if self.slip is not None or self.tractions is not None: 

2760 if store is None: 

2761 raise DerivedMagnitudeError( 

2762 'Magnitude for a rectangular source with slip or ' 

2763 'tractions defined can only be derived when earth model ' 

2764 'is set.') 

2765 

2766 moment_rate, calc_times = self.discretize_basesource( 

2767 store, target=target).get_moment_rate(store.config.deltat) 

2768 

2769 deltat = num.concatenate(( 

2770 (num.diff(calc_times)[0],), 

2771 num.diff(calc_times))) 

2772 

2773 return float(pmt.moment_to_magnitude( 

2774 num.sum(moment_rate * deltat))) 

2775 

2776 else: 

2777 return float(pmt.moment_to_magnitude(1.0)) 

2778 

2779 def get_factor(self): 

2780 return 1.0 

2781 

2782 def outline(self, cs='xyz'): 

2783 ''' 

2784 Get source outline corner coordinates. 

2785 

2786 :param cs: 

2787 :ref:`Output coordinate system <coordinate-system-names>`. 

2788 :type cs: 

2789 optional, str 

2790 

2791 :returns: 

2792 Corner points in desired coordinate system. 

2793 :rtype: 

2794 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2795 ''' 

2796 points = outline_rect_source(self.strike, self.dip, self.length, 

2797 self.width, self.anchor) 

2798 

2799 points[:, 0] += self.north_shift 

2800 points[:, 1] += self.east_shift 

2801 points[:, 2] += self.depth 

2802 if cs == 'xyz': 

2803 return points 

2804 elif cs == 'xy': 

2805 return points[:, :2] 

2806 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2807 latlon = ne_to_latlon( 

2808 self.lat, self.lon, points[:, 0], points[:, 1]) 

2809 

2810 latlon = num.array(latlon).T 

2811 if cs == 'latlon': 

2812 return latlon 

2813 elif cs == 'lonlat': 

2814 return latlon[:, ::-1] 

2815 else: 

2816 return num.concatenate( 

2817 (latlon, points[:, 2].reshape((len(points), 1))), 

2818 axis=1) 

2819 

2820 def points_on_source(self, cs='xyz', **kwargs): 

2821 ''' 

2822 Convert relative plane coordinates to geographical coordinates. 

2823 

2824 Given x and y coordinates (relative source coordinates between -1. 

2825 and 1.) are converted to desired geographical coordinates. Coordinates 

2826 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2827 and ``points_y``. 

2828 

2829 :param cs: 

2830 :ref:`Output coordinate system <coordinate-system-names>`. 

2831 :type cs: 

2832 optional, str 

2833 

2834 :returns: 

2835 Point coordinates in desired coordinate system. 

2836 :rtype: 

2837 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2838 ''' 

2839 points = points_on_rect_source( 

2840 self.strike, self.dip, self.length, self.width, 

2841 self.anchor, **kwargs) 

2842 

2843 points[:, 0] += self.north_shift 

2844 points[:, 1] += self.east_shift 

2845 points[:, 2] += self.depth 

2846 if cs == 'xyz': 

2847 return points 

2848 elif cs == 'xy': 

2849 return points[:, :2] 

2850 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2851 latlon = ne_to_latlon( 

2852 self.lat, self.lon, points[:, 0], points[:, 1]) 

2853 

2854 latlon = num.array(latlon).T 

2855 if cs == 'latlon': 

2856 return latlon 

2857 elif cs == 'lonlat': 

2858 return latlon[:, ::-1] 

2859 else: 

2860 return num.concatenate( 

2861 (latlon, points[:, 2].reshape((len(points), 1))), 

2862 axis=1) 

2863 

2864 def pyrocko_moment_tensor(self, store=None, target=None): 

2865 if store is not None: 

2866 if not self.patches: 

2867 self.discretize_patches(store) 

2868 

2869 data = self.get_slip() 

2870 else: 

2871 data = self.get_tractions() 

2872 

2873 weights = num.linalg.norm(data, axis=1) 

2874 weights /= weights.sum() 

2875 

2876 rakes = num.arctan2(data[:, 1], data[:, 0]) * r2d 

2877 rake = num.average(rakes, weights=weights) 

2878 

2879 return pmt.MomentTensor( 

2880 strike=self.strike, 

2881 dip=self.dip, 

2882 rake=rake, 

2883 scalar_moment=self.get_moment(store, target)) 

2884 

2885 def pyrocko_event(self, store=None, target=None, **kwargs): 

2886 return SourceWithDerivedMagnitude.pyrocko_event( 

2887 self, store, target, 

2888 **kwargs) 

2889 

2890 @classmethod 

2891 def from_pyrocko_event(cls, ev, **kwargs): 

2892 d = {} 

2893 mt = ev.moment_tensor 

2894 if mt: 

2895 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2896 d.update( 

2897 strike=float(strike), 

2898 dip=float(dip), 

2899 rake=float(rake)) 

2900 

2901 d.update(kwargs) 

2902 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2903 

2904 def _discretize_points(self, store, *args, **kwargs): 

2905 ''' 

2906 Discretize source plane with equal vertical and horizontal spacing. 

2907 

2908 Additional ``*args`` and ``**kwargs`` are passed to 

2909 :py:meth:`points_on_source`. 

2910 

2911 :param store: 

2912 Green's function database (needs to cover whole region of the 

2913 source). 

2914 :type store: 

2915 :py:class:`~pyrocko.gf.store.Store` 

2916 

2917 :returns: 

2918 Number of points in strike and dip direction, distance 

2919 between adjacent points, coordinates (latlondepth) and coordinates 

2920 (xy on fault) for discrete points. 

2921 :rtype: 

2922 (int, int, float, :py:class:`~numpy.ndarray`, 

2923 :py:class:`~numpy.ndarray`). 

2924 ''' 

2925 anch_x, anch_y = map_anchor[self.anchor] 

2926 

2927 npoints = int(self.width // km) + 1 

2928 points = num.zeros((npoints, 3)) 

2929 points[:, 1] = num.linspace(-1., 1., npoints) 

2930 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2931 

2932 rotmat = pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0) 

2933 points = num.dot(rotmat.T, points.T).T 

2934 points[:, 2] += self.depth 

2935 

2936 vs_min = store.config.get_vs( 

2937 self.lat, self.lon, points, 

2938 interpolation='nearest_neighbor') 

2939 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2940 

2941 oversampling = 10. 

2942 delta_l = self.length / (self.nx * oversampling) 

2943 delta_w = self.width / (self.ny * oversampling) 

2944 

2945 delta = self.eikonal_decimation * num.min([ 

2946 store.config.deltat * vr_min / oversampling, 

2947 delta_l, delta_w] + [ 

2948 deltas for deltas in store.config.deltas]) 

2949 

2950 delta = delta_w / num.ceil(delta_w / delta) 

2951 

2952 nx = int(num.ceil(self.length / delta)) + 1 

2953 ny = int(num.ceil(self.width / delta)) + 1 

2954 

2955 rem_l = (nx-1)*delta - self.length 

2956 lim_x = rem_l / self.length 

2957 

2958 points_xy = num.zeros((nx * ny, 2)) 

2959 points_xy[:, 0] = num.repeat( 

2960 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2961 points_xy[:, 1] = num.tile( 

2962 num.linspace(-1., 1., ny), nx) 

2963 

2964 points = self.points_on_source( 

2965 points_x=points_xy[:, 0], 

2966 points_y=points_xy[:, 1], 

2967 **kwargs) 

2968 

2969 return nx, ny, delta, points, points_xy 

2970 

2971 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2972 points=None): 

2973 ''' 

2974 Get rupture velocity for discrete points on source plane. 

2975 

2976 :param store: 

2977 Green's function database (needs to cover the whole region of the 

2978 source) 

2979 :type store: 

2980 optional, :py:class:`~pyrocko.gf.store.Store` 

2981 

2982 :param interpolation: 

2983 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2984 and ``'multilinear'``). 

2985 :type interpolation: 

2986 optional, str 

2987 

2988 :param points: 

2989 Coordinates on fault (-1.:1.) of discrete points. 

2990 :type points: 

2991 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2992 

2993 :returns: 

2994 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

2995 points. 

2996 :rtype: 

2997 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

2998 ''' 

2999 

3000 if points is None: 

3001 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

3002 

3003 return store.config.get_vs( 

3004 self.lat, self.lon, 

3005 points=points, 

3006 interpolation=interpolation) * self.gamma 

3007 

3008 def discretize_time( 

3009 self, store, interpolation='nearest_neighbor', 

3010 vr=None, times=None, *args, **kwargs): 

3011 ''' 

3012 Get rupture start time for discrete points on source plane. 

3013 

3014 :param store: 

3015 Green's function database (needs to cover whole region of the 

3016 source) 

3017 :type store: 

3018 :py:class:`~pyrocko.gf.store.Store` 

3019 

3020 :param interpolation: 

3021 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3022 and ``'multilinear'``). 

3023 :type interpolation: 

3024 optional, str 

3025 

3026 :param vr: 

3027 Array, containing rupture user defined rupture velocity values. 

3028 :type vr: 

3029 optional, :py:class:`~numpy.ndarray` 

3030 

3031 :param times: 

3032 Array, containing zeros, where rupture is starting, real positive 

3033 numbers at later secondary nucleation points and -1, where time 

3034 will be calculated. If not given, rupture starts at nucleation_x, 

3035 nucleation_y. Times are given for discrete points with equal 

3036 horizontal and vertical spacing. 

3037 :type times: 

3038 optional, :py:class:`~numpy.ndarray` 

3039 

3040 :returns: 

3041 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

3042 rupture propagation time of discrete points. 

3043 :rtype: 

3044 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

3045 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

3046 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

3047 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

3048 ''' 

3049 nx, ny, delta, points, points_xy = self._discretize_points( 

3050 store, cs='xyz') 

3051 

3052 if vr is None or vr.shape != tuple((nx, ny)): 

3053 if vr: 

3054 logger.warning( 

3055 'Given rupture velocities are not in right shape: ' 

3056 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

3057 vr = self._discretize_rupture_v(store, interpolation, points)\ 

3058 .reshape(nx, ny) 

3059 

3060 if vr.shape != tuple((nx, ny)): 

3061 logger.warning( 

3062 'Given rupture velocities are not in right shape. Therefore' 

3063 ' standard rupture velocity array is used.') 

3064 

3065 def initialize_times(): 

3066 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

3067 

3068 if nucl_x.shape != nucl_y.shape: 

3069 raise ValueError( 

3070 'Nucleation coordinates have different shape.') 

3071 

3072 dist_points = num.array([ 

3073 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

3074 for x, y in zip(nucl_x, nucl_y)]) 

3075 nucl_indices = num.argmin(dist_points, axis=1) 

3076 

3077 if self.nucleation_time is None: 

3078 nucl_times = num.zeros_like(nucl_indices) 

3079 else: 

3080 if self.nucleation_time.shape == nucl_x.shape: 

3081 nucl_times = self.nucleation_time 

3082 else: 

3083 raise ValueError( 

3084 'Nucleation coordinates and times have different ' 

3085 'shapes') 

3086 

3087 t = num.full(nx * ny, -1.) 

3088 t[nucl_indices] = nucl_times 

3089 return t.reshape(nx, ny) 

3090 

3091 if times is None: 

3092 times = initialize_times() 

3093 elif times.shape != tuple((nx, ny)): 

3094 times = initialize_times() 

3095 logger.warning( 

3096 'Given times are not in right shape. Therefore standard time' 

3097 ' array is used.') 

3098 

3099 eikonal_ext.eikonal_solver_fmm_cartesian( 

3100 speeds=vr, times=times, delta=delta) 

3101 

3102 return points, points_xy, vr, times 

3103 

3104 def get_vr_time_interpolators( 

3105 self, store, interpolation='nearest_neighbor', force=False, 

3106 **kwargs): 

3107 ''' 

3108 Get interpolators for rupture velocity and rupture time. 

3109 

3110 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3111 

3112 :param store: 

3113 Green's function database (needs to cover whole region of the 

3114 source). 

3115 :type store: 

3116 :py:class:`~pyrocko.gf.store.Store` 

3117 

3118 :param interpolation: 

3119 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3120 and ``'multilinear'``). 

3121 :type interpolation: 

3122 optional, str 

3123 

3124 :param force: 

3125 Force recalculation of the interpolators (e.g. after change of 

3126 nucleation point locations/times). Default is ``False``. 

3127 :type force: 

3128 optional, bool 

3129 ''' 

3130 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3131 if interpolation not in interp_map: 

3132 raise TypeError( 

3133 'Interpolation method %s not available' % interpolation) 

3134 

3135 if not self._interpolators.get(interpolation, False) or force: 

3136 _, points_xy, vr, times = self.discretize_time( 

3137 store, **kwargs) 

3138 

3139 if self.length <= 0.: 

3140 raise ValueError( 

3141 'length must be larger then 0. not %g' % self.length) 

3142 

3143 if self.width <= 0.: 

3144 raise ValueError( 

3145 'width must be larger then 0. not %g' % self.width) 

3146 

3147 nx, ny = times.shape 

3148 anch_x, anch_y = map_anchor[self.anchor] 

3149 

3150 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3151 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3152 

3153 ascont = num.ascontiguousarray 

3154 

3155 self._interpolators[interpolation] = ( 

3156 nx, ny, times, vr, 

3157 RegularGridInterpolator( 

3158 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3159 times, 

3160 method=interp_map[interpolation]), 

3161 RegularGridInterpolator( 

3162 (ascont(points_xy[::ny, 0]), ascont(points_xy[:ny, 1])), 

3163 vr, 

3164 method=interp_map[interpolation])) 

3165 

3166 return self._interpolators[interpolation] 

3167 

3168 def discretize_patches( 

3169 self, store, interpolation='nearest_neighbor', force=False, 

3170 grid_shape=(), 

3171 **kwargs): 

3172 ''' 

3173 Get rupture start time and OkadaSource elements for points on rupture. 

3174 

3175 All source elements and their corresponding center points are 

3176 calculated and stored in the :py:attr:`patches` attribute. 

3177 

3178 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3179 

3180 :param store: 

3181 Green's function database (needs to cover whole region of the 

3182 source). 

3183 :type store: 

3184 :py:class:`~pyrocko.gf.store.Store` 

3185 

3186 :param interpolation: 

3187 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3188 and ``'multilinear'``). 

3189 :type interpolation: 

3190 optional, str 

3191 

3192 :param force: 

3193 Force recalculation of the vr and time interpolators ( e.g. after 

3194 change of nucleation point locations/times). Default is ``False``. 

3195 :type force: 

3196 optional, bool 

3197 

3198 :param grid_shape: 

3199 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3200 or grid_shape should be set. 

3201 :type grid_shape: 

3202 optional, tuple of int 

3203 ''' 

3204 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3205 self.get_vr_time_interpolators( 

3206 store, 

3207 interpolation=interpolation, force=force, **kwargs) 

3208 anch_x, anch_y = map_anchor[self.anchor] 

3209 

3210 al = self.length / 2. 

3211 aw = self.width / 2. 

3212 al1 = -(al + anch_x * al) 

3213 al2 = al - anch_x * al 

3214 aw1 = -aw + anch_y * aw 

3215 aw2 = aw + anch_y * aw 

3216 assert num.abs([al1, al2]).sum() == self.length 

3217 assert num.abs([aw1, aw2]).sum() == self.width 

3218 

3219 def get_lame(*a, **kw): 

3220 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3221 lamb = store.config.get_vp(*a, **kw)**2 \ 

3222 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3223 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3224 

3225 shear_mod, poisson = get_lame( 

3226 self.lat, self.lon, 

3227 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3228 interpolation=interpolation) 

3229 

3230 okada_src = OkadaSource( 

3231 lat=self.lat, lon=self.lon, 

3232 strike=self.strike, dip=self.dip, 

3233 north_shift=self.north_shift, east_shift=self.east_shift, 

3234 depth=self.depth, 

3235 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3236 poisson=poisson.mean(), 

3237 shearmod=shear_mod.mean(), 

3238 opening=kwargs.get('opening', 0.)) 

3239 

3240 if not (self.nx and self.ny): 

3241 if grid_shape: 

3242 self.nx, self.ny = grid_shape 

3243 else: 

3244 self.nx = nx 

3245 self.ny = ny 

3246 

3247 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3248 

3249 shear_mod, poisson = get_lame( 

3250 self.lat, self.lon, 

3251 num.array([src.source_patch()[:3] for src in source_disc]), 

3252 interpolation=interpolation) 

3253 

3254 if (self.nx, self.ny) != (nx, ny): 

3255 times_interp = time_interpolator( 

3256 num.ascontiguousarray(source_points[:, :2])) 

3257 vr_interp = vr_interpolator( 

3258 num.ascontiguousarray(source_points[:, :2])) 

3259 else: 

3260 times_interp = times.T.ravel() 

3261 vr_interp = vr.T.ravel() 

3262 

3263 for isrc, src in enumerate(source_disc): 

3264 src.vr = vr_interp[isrc] 

3265 src.time = times_interp[isrc] + self.time 

3266 

3267 self.patches = source_disc 

3268 

3269 def discretize_basesource(self, store, target=None): 

3270 ''' 

3271 Prepare source for synthetic waveform calculation. 

3272 

3273 :param store: 

3274 Green's function database (needs to cover whole region of the 

3275 source). 

3276 :type store: 

3277 :py:class:`~pyrocko.gf.store.Store` 

3278 

3279 :param target: 

3280 Target information. 

3281 :type target: 

3282 optional, :py:class:`~pyrocko.gf.targets.Target` 

3283 

3284 :returns: 

3285 Source discretized by a set of moment tensors and times. 

3286 :rtype: 

3287 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3288 ''' 

3289 if not target: 

3290 interpolation = 'nearest_neighbor' 

3291 else: 

3292 interpolation = target.interpolation 

3293 

3294 if not self.patches: 

3295 self.discretize_patches(store, interpolation) 

3296 

3297 if self.coef_mat is None: 

3298 self.calc_coef_mat() 

3299 

3300 delta_slip, slip_times = self.get_delta_slip(store) 

3301 npatches = self.nx * self.ny 

3302 ntimes = slip_times.size 

3303 

3304 anch_x, anch_y = map_anchor[self.anchor] 

3305 

3306 pln = self.length / self.nx 

3307 pwd = self.width / self.ny 

3308 

3309 patch_coords = num.array([ 

3310 (p.ix, p.iy) 

3311 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3312 

3313 # boundary condition is zero-slip 

3314 # is not valid to avoid unwished interpolation effects 

3315 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3316 slip_grid[1:-1, 1:-1, :, :] = \ 

3317 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3318 

3319 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3320 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3321 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3322 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3323 

3324 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3325 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3326 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3327 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3328 

3329 def make_grid(patch_parameter): 

3330 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3331 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3332 

3333 grid[0, 0] = grid[1, 1] 

3334 grid[0, -1] = grid[1, -2] 

3335 grid[-1, 0] = grid[-2, 1] 

3336 grid[-1, -1] = grid[-2, -2] 

3337 

3338 grid[1:-1, 0] = grid[1:-1, 1] 

3339 grid[1:-1, -1] = grid[1:-1, -2] 

3340 grid[0, 1:-1] = grid[1, 1:-1] 

3341 grid[-1, 1:-1] = grid[-2, 1:-1] 

3342 

3343 return grid 

3344 

3345 lamb = self.get_patch_attribute('lamb') 

3346 mu = self.get_patch_attribute('shearmod') 

3347 

3348 lamb_grid = make_grid(lamb) 

3349 mu_grid = make_grid(mu) 

3350 

3351 coords_x = num.zeros(self.nx + 2) 

3352 coords_x[1:-1] = patch_coords[:, 0, 0] 

3353 coords_x[0] = coords_x[1] - pln / 2 

3354 coords_x[-1] = coords_x[-2] + pln / 2 

3355 

3356 coords_y = num.zeros(self.ny + 2) 

3357 coords_y[1:-1] = patch_coords[0, :, 1] 

3358 coords_y[0] = coords_y[1] - pwd / 2 

3359 coords_y[-1] = coords_y[-2] + pwd / 2 

3360 

3361 slip_interp = RegularGridInterpolator( 

3362 (coords_x, coords_y, slip_times), 

3363 slip_grid, method='nearest') 

3364 

3365 lamb_interp = RegularGridInterpolator( 

3366 (coords_x, coords_y), 

3367 lamb_grid, method='nearest') 

3368 

3369 mu_interp = RegularGridInterpolator( 

3370 (coords_x, coords_y), 

3371 mu_grid, method='nearest') 

3372 

3373 # discretize basesources 

3374 mindeltagf = min(tuple( 

3375 (self.length / self.nx, self.width / self.ny) + 

3376 tuple(store.config.deltas))) 

3377 

3378 nl = int((1. / self.decimation_factor) * 

3379 num.ceil(pln / mindeltagf)) + 1 

3380 nw = int((1. / self.decimation_factor) * 

3381 num.ceil(pwd / mindeltagf)) + 1 

3382 nsrc_patch = int(nl * nw) 

3383 dl = pln / nl 

3384 dw = pwd / nw 

3385 

3386 patch_area = dl * dw 

3387 

3388 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3389 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3390 

3391 base_coords = num.zeros((nsrc_patch, 3)) 

3392 base_coords[:, 0] = num.tile(xl, nw) 

3393 base_coords[:, 1] = num.repeat(xw, nl) 

3394 base_coords = num.tile(base_coords, (npatches, 1)) 

3395 

3396 center_coords = num.zeros((npatches, 3)) 

3397 center_coords[:, 0] = num.repeat( 

3398 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3399 center_coords[:, 1] = num.tile( 

3400 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3401 

3402 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3403 nbaselocs = base_coords.shape[0] 

3404 

3405 base_interp = base_coords.repeat(ntimes, axis=0) 

3406 

3407 base_times = num.tile(slip_times, nbaselocs) 

3408 base_interp[:, 0] -= anch_x * self.length / 2 

3409 base_interp[:, 1] -= anch_y * self.width / 2 

3410 base_interp[:, 2] = base_times 

3411 

3412 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3413 store, interpolation=interpolation) 

3414 

3415 time_eikonal_max = time_interpolator.values.max() 

3416 

3417 nbasesrcs = base_interp.shape[0] 

3418 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3419 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3420 mu = mu_interp(base_interp[:, :2]).ravel() 

3421 

3422 if False: 

3423 try: 

3424 import matplotlib.pyplot as plt 

3425 coords = base_coords.copy() 

3426 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3427 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3428 plt.show() 

3429 except AttributeError: 

3430 pass 

3431 

3432 base_interp[:, 2] = 0. 

3433 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

3434 base_interp = num.dot(rotmat.T, base_interp.T).T 

3435 base_interp[:, 0] += self.north_shift 

3436 base_interp[:, 1] += self.east_shift 

3437 base_interp[:, 2] += self.depth 

3438 

3439 slip_strike = delta_slip[:, :, 0].ravel() 

3440 slip_dip = delta_slip[:, :, 1].ravel() 

3441 slip_norm = delta_slip[:, :, 2].ravel() 

3442 

3443 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3444 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3445 

3446 m6s = okada_ext.patch2m6( 

3447 strikes=num.full(nbasesrcs, self.strike, dtype=float), 

3448 dips=num.full(nbasesrcs, self.dip, dtype=float), 

3449 rakes=slip_rake, 

3450 disl_shear=slip_shear, 

3451 disl_norm=slip_norm, 

3452 lamb=lamb, 

3453 mu=mu, 

3454 nthreads=self.nthreads) 

3455 

3456 m6s *= patch_area 

3457 

3458 dl = -self.patches[0].al1 + self.patches[0].al2 

3459 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3460 

3461 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3462 

3463 ds = meta.DiscretizedMTSource( 

3464 lat=self.lat, 

3465 lon=self.lon, 

3466 times=base_times + self.time, 

3467 north_shifts=base_interp[:, 0], 

3468 east_shifts=base_interp[:, 1], 

3469 depths=base_interp[:, 2], 

3470 m6s=m6s, 

3471 dl=dl, 

3472 dw=dw, 

3473 nl=self.nx, 

3474 nw=self.ny) 

3475 

3476 return ds 

3477 

3478 def calc_coef_mat(self): 

3479 ''' 

3480 Calculate coefficients connecting tractions and dislocations. 

3481 ''' 

3482 if not self.patches: 

3483 raise ValueError( 

3484 'Patches are needed. Please calculate them first.') 

3485 

3486 self.coef_mat = make_okada_coefficient_matrix( 

3487 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3488 

3489 def get_patch_attribute(self, attr): 

3490 ''' 

3491 Get patch attributes. 

3492 

3493 :param attr: 

3494 Name of selected attribute (see 

3495 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3496 :type attr: 

3497 str 

3498 

3499 :returns: 

3500 Array with attribute value for each fault patch. 

3501 :rtype: 

3502 :py:class:`~numpy.ndarray` 

3503 

3504 ''' 

3505 if not self.patches: 

3506 raise ValueError( 

3507 'Patches are needed. Please calculate them first.') 

3508 return num.array([getattr(p, attr) for p in self.patches]) 

3509 

3510 def get_slip( 

3511 self, 

3512 time=None, 

3513 scale_slip=True, 

3514 interpolation='nearest_neighbor', 

3515 **kwargs): 

3516 ''' 

3517 Get slip per subfault patch for given time after rupture start. 

3518 

3519 :param time: 

3520 Time after origin [s], for which slip is computed. If not 

3521 given, final static slip is returned. 

3522 :type time: 

3523 optional, float > 0. 

3524 

3525 :param scale_slip: 

3526 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3527 to fit the given maximum slip. 

3528 :type scale_slip: 

3529 optional, bool 

3530 

3531 :param interpolation: 

3532 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3533 and ``'multilinear'``). 

3534 :type interpolation: 

3535 optional, str 

3536 

3537 :returns: 

3538 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3539 for each source patch. 

3540 :rtype: 

3541 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3542 ''' 

3543 

3544 if self.patches is None: 

3545 raise ValueError( 

3546 'Please discretize the source first (discretize_patches())') 

3547 npatches = len(self.patches) 

3548 tractions = self.get_tractions() 

3549 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3550 

3551 time_patch = time 

3552 if time is None: 

3553 time_patch = time_patch_max 

3554 

3555 if self.coef_mat is None: 

3556 self.calc_coef_mat() 

3557 

3558 if tractions.shape != (npatches, 3): 

3559 raise AttributeError( 

3560 'The traction vector is of invalid shape.' 

3561 ' Required shape is (npatches, 3)') 

3562 

3563 patch_mask = num.ones(npatches, dtype=bool) 

3564 if self.patch_mask is not None: 

3565 patch_mask = self.patch_mask 

3566 

3567 times = self.get_patch_attribute('time') - self.time 

3568 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3569 relevant_sources = num.nonzero(times <= time_patch)[0] 

3570 disloc_est = num.zeros_like(tractions) 

3571 

3572 if self.smooth_rupture: 

3573 patch_activation = num.zeros(npatches) 

3574 

3575 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3576 self.get_vr_time_interpolators( 

3577 store, interpolation=interpolation) 

3578 

3579 # Getting the native Eikonal grid, bit hackish 

3580 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3581 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3582 times_eikonal = time_interpolator.values 

3583 

3584 time_max = time 

3585 if time is None: 

3586 time_max = times_eikonal.max() 

3587 

3588 for ip, p in enumerate(self.patches): 

3589 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3590 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3591 

3592 idx_length = num.logical_and( 

3593 points_x >= ul[0], points_x <= lr[0]) 

3594 idx_width = num.logical_and( 

3595 points_y >= ul[1], points_y <= lr[1]) 

3596 

3597 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3598 if times_patch.size == 0: 

3599 raise AttributeError('could not use smooth_rupture') 

3600 

3601 patch_activation[ip] = \ 

3602 (times_patch <= time_max).sum() / times_patch.size 

3603 

3604 if time_patch == 0 and time_patch != time_patch_max: 

3605 patch_activation[ip] = 0. 

3606 

3607 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3608 

3609 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3610 

3611 if relevant_sources.size == 0: 

3612 return disloc_est 

3613 

3614 indices_disl = num.repeat(relevant_sources * 3, 3) 

3615 indices_disl[1::3] += 1 

3616 indices_disl[2::3] += 2 

3617 

3618 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3619 stress_field=tractions[relevant_sources, :].ravel(), 

3620 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3621 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3622 epsilon=None, 

3623 **kwargs) 

3624 

3625 if self.smooth_rupture: 

3626 disloc_est *= patch_activation[:, num.newaxis] 

3627 

3628 if scale_slip and self.slip is not None: 

3629 disloc_tmax = num.zeros(npatches) 

3630 

3631 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3632 indices_disl[1::3] += 1 

3633 indices_disl[2::3] += 2 

3634 

3635 disloc_tmax[patch_mask] = num.linalg.norm( 

3636 invert_fault_dislocations_bem( 

3637 stress_field=tractions[patch_mask, :].ravel(), 

3638 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3639 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3640 epsilon=None, 

3641 **kwargs), axis=1) 

3642 

3643 disloc_tmax_max = disloc_tmax.max() 

3644 if disloc_tmax_max == 0.: 

3645 logger.warning( 

3646 'slip scaling not performed. Maximum slip is 0.') 

3647 

3648 disloc_est *= self.slip / disloc_tmax_max 

3649 

3650 return disloc_est 

3651 

3652 def get_delta_slip( 

3653 self, 

3654 store=None, 

3655 deltat=None, 

3656 delta=True, 

3657 interpolation='nearest_neighbor', 

3658 **kwargs): 

3659 ''' 

3660 Get slip change snapshots. 

3661 

3662 The time interval, within which the slip changes are computed is 

3663 determined by the sampling rate of the Green's function database or 

3664 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3665 

3666 :param store: 

3667 Green's function database (needs to cover whole region of of the 

3668 source). Its sampling interval is used as time increment for slip 

3669 difference calculation. Either ``deltat`` or ``store`` should be 

3670 given. 

3671 :type store: 

3672 optional, :py:class:`~pyrocko.gf.store.Store` 

3673 

3674 :param deltat: 

3675 Time interval for slip difference calculation [s]. Either 

3676 ``deltat`` or ``store`` should be given. 

3677 :type deltat: 

3678 optional, float 

3679 

3680 :param delta: 

3681 If ``True``, slip differences between two time steps are given. If 

3682 ``False``, cumulative slip for all time steps. 

3683 :type delta: 

3684 optional, bool 

3685 

3686 :param interpolation: 

3687 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3688 and ``'multilinear'``). 

3689 :type interpolation: 

3690 optional, str 

3691 

3692 :returns: 

3693 Displacement changes(:math:`\\Delta u_{strike}, 

3694 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3695 time; corner times, for which delta slip is computed. The order of 

3696 displacement changes array is: 

3697 

3698 .. math:: 

3699 

3700 &[[\\\\ 

3701 &[\\Delta u_{strike, patch1, t1}, 

3702 \\Delta u_{dip, patch1, t1}, 

3703 \\Delta u_{tensile, patch1, t1}],\\\\ 

3704 &[\\Delta u_{strike, patch1, t2}, 

3705 \\Delta u_{dip, patch1, t2}, 

3706 \\Delta u_{tensile, patch1, t2}]\\\\ 

3707 &], [\\\\ 

3708 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3709 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3710 

3711 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3712 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3713 ''' 

3714 if store and deltat: 

3715 raise AttributeError( 

3716 'Argument collision. ' 

3717 'Please define only the store or the deltat argument.') 

3718 

3719 if store: 

3720 deltat = store.config.deltat 

3721 

3722 if not deltat: 

3723 raise AttributeError('Please give a GF store or set deltat.') 

3724 

3725 npatches = len(self.patches) 

3726 

3727 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3728 store, interpolation=interpolation) 

3729 tmax = time_interpolator.values.max() 

3730 

3731 calc_times = num.arange(0., tmax + deltat, deltat) 

3732 calc_times[calc_times > tmax] = tmax 

3733 

3734 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3735 

3736 for itime, t in enumerate(calc_times): 

3737 disloc_est[:, itime, :] = self.get_slip( 

3738 time=t, scale_slip=False, **kwargs) 

3739 

3740 if self.slip: 

3741 disloc_tmax = num.linalg.norm( 

3742 self.get_slip(scale_slip=False, time=tmax), 

3743 axis=1) 

3744 

3745 disloc_tmax_max = disloc_tmax.max() 

3746 if disloc_tmax_max == 0.: 

3747 logger.warning( 

3748 'Slip scaling not performed. Maximum slip is 0.') 

3749 else: 

3750 disloc_est *= self.slip / disloc_tmax_max 

3751 

3752 if not delta: 

3753 return disloc_est, calc_times 

3754 

3755 # if we have only one timestep there is no gradient 

3756 if calc_times.size > 1: 

3757 disloc_init = disloc_est[:, 0, :] 

3758 disloc_est = num.diff(disloc_est, axis=1) 

3759 disloc_est = num.concatenate(( 

3760 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3761 

3762 calc_times = calc_times 

3763 

3764 return disloc_est, calc_times 

3765 

3766 def get_slip_rate(self, *args, **kwargs): 

3767 ''' 

3768 Get slip rate inverted from patches. 

3769 

3770 The time interval, within which the slip rates are computed is 

3771 determined by the sampling rate of the Green's function database or 

3772 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3773 :py:meth:`get_delta_slip`. 

3774 

3775 :returns: 

3776 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3777 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3778 for each source patch and time; corner times, for which slip rate 

3779 is computed. The order of sliprate array is: 

3780 

3781 .. math:: 

3782 

3783 &[[\\\\ 

3784 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3785 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3786 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3787 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3788 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3789 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3790 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3791 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3792 

3793 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3794 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3795 ''' 

3796 ddisloc_est, calc_times = self.get_delta_slip( 

3797 *args, delta=True, **kwargs) 

3798 

3799 dt = num.concatenate( 

3800 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3801 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3802 

3803 return slip_rate, calc_times 

3804 

3805 def get_moment_rate_patches(self, *args, **kwargs): 

3806 ''' 

3807 Get scalar seismic moment rate for each patch individually. 

3808 

3809 Additional ``*args`` and ``**kwargs`` are passed to 

3810 :py:meth:`get_slip_rate`. 

3811 

3812 :returns: 

3813 Seismic moment rate for each source patch and time; corner times, 

3814 for which patch moment rate is computed based on slip rate. The 

3815 order of the moment rate array is: 

3816 

3817 .. math:: 

3818 

3819 &[\\\\ 

3820 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3821 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3822 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3823 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3824 &[...]]\\\\ 

3825 

3826 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3827 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3828 ''' 

3829 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3830 

3831 shear_mod = self.get_patch_attribute('shearmod') 

3832 p_length = self.get_patch_attribute('length') 

3833 p_width = self.get_patch_attribute('width') 

3834 

3835 dA = p_length * p_width 

3836 

3837 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3838 

3839 return mom_rate, calc_times 

3840 

3841 def get_moment_rate(self, store, target=None, deltat=None): 

3842 ''' 

3843 Get seismic source moment rate for the total source (STF). 

3844 

3845 :param store: 

3846 Green's function database (needs to cover whole region of of the 

3847 source). Its ``deltat`` [s] is used as time increment for slip 

3848 difference calculation. Either ``deltat`` or ``store`` should be 

3849 given. 

3850 :type store: 

3851 :py:class:`~pyrocko.gf.store.Store` 

3852 

3853 :param target: 

3854 Target information, needed for interpolation method. 

3855 :type target: 

3856 optional, :py:class:`~pyrocko.gf.targets.Target` 

3857 

3858 :param deltat: 

3859 Time increment for slip difference calculation [s]. If not given 

3860 ``store.deltat`` is used. 

3861 :type deltat: 

3862 optional, float 

3863 

3864 :return: 

3865 Seismic moment rate [Nm/s] for each time; corner times, for which 

3866 moment rate is computed. The order of the moment rate array is: 

3867 

3868 .. math:: 

3869 

3870 &[\\\\ 

3871 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3872 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3873 &...]\\\\ 

3874 

3875 :rtype: 

3876 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3877 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3878 ''' 

3879 if not deltat: 

3880 deltat = store.config.deltat 

3881 return self.discretize_basesource( 

3882 store, target=target).get_moment_rate(deltat) 

3883 

3884 def get_moment(self, *args, **kwargs): 

3885 ''' 

3886 Get seismic cumulative moment. 

3887 

3888 Additional ``*args`` and ``**kwargs`` are passed to 

3889 :py:meth:`get_magnitude`. 

3890 

3891 :returns: 

3892 Cumulative seismic moment in [Nm]. 

3893 :rtype: 

3894 float 

3895 ''' 

3896 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3897 *args, **kwargs))) 

3898 

3899 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3900 ''' 

3901 Rescale source slip based on given target magnitude or seismic moment. 

3902 

3903 Rescale the maximum source slip to fit the source moment magnitude or 

3904 seismic moment to the given target values. Either ``magnitude`` or 

3905 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3906 :py:meth:`get_moment`. 

3907 

3908 :param magnitude: 

3909 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3910 [Hanks and Kanamori, 1979] 

3911 :type magnitude: 

3912 optional, float 

3913 

3914 :param moment: 

3915 Target seismic moment :math:`M_0` [Nm]. 

3916 :type moment: 

3917 optional, float 

3918 ''' 

3919 if self.slip is None: 

3920 self.slip = 1. 

3921 logger.warning('No slip found for rescaling. ' 

3922 'An initial slip of 1 m is assumed.') 

3923 

3924 if magnitude is None and moment is None: 

3925 raise ValueError( 

3926 'Either target magnitude or moment need to be given.') 

3927 

3928 moment_init = self.get_moment(**kwargs) 

3929 

3930 if magnitude is not None: 

3931 moment = pmt.magnitude_to_moment(magnitude) 

3932 

3933 self.slip *= moment / moment_init 

3934 

3935 def get_centroid(self, store, *args, **kwargs): 

3936 ''' 

3937 Centroid of the pseudo dynamic rupture model. 

3938 

3939 The centroid location and time are derived from the locations and times 

3940 of the individual patches weighted with their moment contribution. 

3941 Additional ``**kwargs`` are passed to :py:meth:`pyrocko_moment_tensor`. 

3942 

3943 :param store: 

3944 Green's function database (needs to cover whole region of of the 

3945 source). Its ``deltat`` [s] is used as time increment for slip 

3946 difference calculation. Either ``deltat`` or ``store`` should be 

3947 given. 

3948 :type store: 

3949 :py:class:`~pyrocko.gf.store.Store` 

3950 

3951 :returns: 

3952 The centroid location and associated moment tensor. 

3953 :rtype: 

3954 :py:class:`pyrocko.model.Event` 

3955 ''' 

3956 _, _, _, _, time, _ = self.get_vr_time_interpolators(store) 

3957 t_max = time.values.max() 

3958 

3959 moment_rate, times = self.get_moment_rate_patches(deltat=t_max) 

3960 

3961 moment = num.sum(moment_rate * times, axis=1) 

3962 weights = moment / moment.sum() 

3963 

3964 norths = self.get_patch_attribute('north_shift') 

3965 easts = self.get_patch_attribute('east_shift') 

3966 depths = self.get_patch_attribute('depth') 

3967 

3968 centroid_n = num.sum(weights * norths) 

3969 centroid_e = num.sum(weights * easts) 

3970 centroid_d = num.sum(weights * depths) 

3971 

3972 centroid_lat, centroid_lon = ne_to_latlon( 

3973 self.lat, self.lon, centroid_n, centroid_e) 

3974 

3975 moment_rate_, times = self.get_moment_rate(store) 

3976 delta_times = num.concatenate(( 

3977 [times[1] - times[0]], 

3978 num.diff(times))) 

3979 moment_src = delta_times * moment_rate 

3980 

3981 centroid_t = num.sum( 

3982 moment_src / num.sum(moment_src) * times) + self.time 

3983 

3984 mt = self.pyrocko_moment_tensor(store, *args, **kwargs) 

3985 

3986 return model.Event( 

3987 lat=centroid_lat, 

3988 lon=centroid_lon, 

3989 depth=centroid_d, 

3990 time=centroid_t, 

3991 moment_tensor=mt, 

3992 magnitude=mt.magnitude, 

3993 duration=t_max) 

3994 

3995 

3996class DoubleDCSource(SourceWithMagnitude): 

3997 ''' 

3998 Two double-couple point sources separated in space and time. 

3999 Moment share between the sub-sources is controlled by the 

4000 parameter mix. 

4001 The position of the subsources is dependent on the moment 

4002 distribution between the two sources. Depth, east and north 

4003 shift are given for the centroid between the two double-couples. 

4004 The subsources will positioned according to their moment shares 

4005 around this centroid position. 

4006 This is done according to their delta parameters, which are 

4007 therefore in relation to that centroid. 

4008 Note that depth of the subsources therefore can be 

4009 depth+/-delta_depth. For shallow earthquakes therefore 

4010 the depth has to be chosen deeper to avoid sampling 

4011 above surface. 

4012 ''' 

4013 

4014 strike1 = Float.T( 

4015 default=0.0, 

4016 help='strike direction in [deg], measured clockwise from north') 

4017 

4018 dip1 = Float.T( 

4019 default=90.0, 

4020 help='dip angle in [deg], measured downward from horizontal') 

4021 

4022 azimuth = Float.T( 

4023 default=0.0, 

4024 help='azimuth to second double-couple [deg], ' 

4025 'measured at first, clockwise from north') 

4026 

4027 rake1 = Float.T( 

4028 default=0.0, 

4029 help='rake angle in [deg], ' 

4030 'measured counter-clockwise from right-horizontal ' 

4031 'in on-plane view') 

4032 

4033 strike2 = Float.T( 

4034 default=0.0, 

4035 help='strike direction in [deg], measured clockwise from north') 

4036 

4037 dip2 = Float.T( 

4038 default=90.0, 

4039 help='dip angle in [deg], measured downward from horizontal') 

4040 

4041 rake2 = Float.T( 

4042 default=0.0, 

4043 help='rake angle in [deg], ' 

4044 'measured counter-clockwise from right-horizontal ' 

4045 'in on-plane view') 

4046 

4047 delta_time = Float.T( 

4048 default=0.0, 

4049 help='separation of double-couples in time (t2-t1) [s]') 

4050 

4051 delta_depth = Float.T( 

4052 default=0.0, 

4053 help='difference in depth (z2-z1) [m]') 

4054 

4055 distance = Float.T( 

4056 default=0.0, 

4057 help='distance between the two double-couples [m]') 

4058 

4059 mix = Float.T( 

4060 default=0.5, 

4061 help='how to distribute the moment to the two doublecouples ' 

4062 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

4063 

4064 stf1 = STF.T( 

4065 optional=True, 

4066 help='Source time function of subsource 1 ' 

4067 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4068 

4069 stf2 = STF.T( 

4070 optional=True, 

4071 help='Source time function of subsource 2 ' 

4072 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4073 

4074 discretized_source_class = meta.DiscretizedMTSource 

4075 

4076 def base_key(self): 

4077 return ( 

4078 self.time, self.depth, self.lat, self.north_shift, 

4079 self.lon, self.east_shift, type(self).__name__) + \ 

4080 self.effective_stf1_pre().base_key() + \ 

4081 self.effective_stf2_pre().base_key() + ( 

4082 self.strike1, self.dip1, self.rake1, 

4083 self.strike2, self.dip2, self.rake2, 

4084 self.delta_time, self.delta_depth, 

4085 self.azimuth, self.distance, self.mix) 

4086 

4087 def get_factor(self): 

4088 return self.moment 

4089 

4090 def effective_stf1_pre(self): 

4091 return self.stf1 or self.stf or g_unit_pulse 

4092 

4093 def effective_stf2_pre(self): 

4094 return self.stf2 or self.stf or g_unit_pulse 

4095 

4096 def effective_stf_post(self): 

4097 return g_unit_pulse 

4098 

4099 def split(self): 

4100 a1 = 1.0 - self.mix 

4101 a2 = self.mix 

4102 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4103 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4104 

4105 dc1 = DCSource( 

4106 lat=self.lat, 

4107 lon=self.lon, 

4108 time=self.time - self.delta_time * a2, 

4109 north_shift=self.north_shift - delta_north * a2, 

4110 east_shift=self.east_shift - delta_east * a2, 

4111 depth=self.depth - self.delta_depth * a2, 

4112 moment=self.moment * a1, 

4113 strike=self.strike1, 

4114 dip=self.dip1, 

4115 rake=self.rake1, 

4116 stf=self.stf1 or self.stf) 

4117 

4118 dc2 = DCSource( 

4119 lat=self.lat, 

4120 lon=self.lon, 

4121 time=self.time + self.delta_time * a1, 

4122 north_shift=self.north_shift + delta_north * a1, 

4123 east_shift=self.east_shift + delta_east * a1, 

4124 depth=self.depth + self.delta_depth * a1, 

4125 moment=self.moment * a2, 

4126 strike=self.strike2, 

4127 dip=self.dip2, 

4128 rake=self.rake2, 

4129 stf=self.stf2 or self.stf) 

4130 

4131 return [dc1, dc2] 

4132 

4133 def discretize_basesource(self, store, target=None): 

4134 a1 = 1.0 - self.mix 

4135 a2 = self.mix 

4136 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4137 rake=self.rake1, scalar_moment=a1) 

4138 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4139 rake=self.rake2, scalar_moment=a2) 

4140 

4141 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4142 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4143 

4144 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4145 store.config.deltat, self.time - self.delta_time * a2) 

4146 

4147 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4148 store.config.deltat, self.time + self.delta_time * a1) 

4149 

4150 nt1 = times1.size 

4151 nt2 = times2.size 

4152 

4153 ds = meta.DiscretizedMTSource( 

4154 lat=self.lat, 

4155 lon=self.lon, 

4156 times=num.concatenate((times1, times2)), 

4157 north_shifts=num.concatenate(( 

4158 num.repeat(self.north_shift - delta_north * a2, nt1), 

4159 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4160 east_shifts=num.concatenate(( 

4161 num.repeat(self.east_shift - delta_east * a2, nt1), 

4162 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4163 depths=num.concatenate(( 

4164 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4165 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4166 m6s=num.vstack(( 

4167 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4168 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4169 

4170 return ds 

4171 

4172 def pyrocko_moment_tensor(self, store=None, target=None): 

4173 a1 = 1.0 - self.mix 

4174 a2 = self.mix 

4175 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4176 rake=self.rake1, 

4177 scalar_moment=a1 * self.moment) 

4178 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4179 rake=self.rake2, 

4180 scalar_moment=a2 * self.moment) 

4181 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4182 

4183 def pyrocko_event(self, store=None, target=None, **kwargs): 

4184 return SourceWithMagnitude.pyrocko_event( 

4185 self, store, target, 

4186 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4187 **kwargs) 

4188 

4189 @classmethod 

4190 def from_pyrocko_event(cls, ev, **kwargs): 

4191 d = {} 

4192 mt = ev.moment_tensor 

4193 if mt: 

4194 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4195 d.update( 

4196 strike1=float(strike), 

4197 dip1=float(dip), 

4198 rake1=float(rake), 

4199 strike2=float(strike), 

4200 dip2=float(dip), 

4201 rake2=float(rake), 

4202 mix=0.0, 

4203 magnitude=float(mt.moment_magnitude())) 

4204 

4205 d.update(kwargs) 

4206 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4207 source.stf1 = source.stf 

4208 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4209 source.stf = None 

4210 return source 

4211 

4212 

4213class RingfaultSource(SourceWithMagnitude): 

4214 ''' 

4215 A ring fault with vertical doublecouples. 

4216 ''' 

4217 

4218 diameter = Float.T( 

4219 default=1.0, 

4220 help='diameter of the ring in [m]') 

4221 

4222 sign = Float.T( 

4223 default=1.0, 

4224 help='inside of the ring moves up (+1) or down (-1)') 

4225 

4226 strike = Float.T( 

4227 default=0.0, 

4228 help='strike direction of the ring plane, clockwise from north,' 

4229 ' in [deg]') 

4230 

4231 dip = Float.T( 

4232 default=0.0, 

4233 help='dip angle of the ring plane from horizontal in [deg]') 

4234 

4235 npointsources = Int.T( 

4236 default=360, 

4237 help='number of point sources to use') 

4238 

4239 discretized_source_class = meta.DiscretizedMTSource 

4240 

4241 def base_key(self): 

4242 return Source.base_key(self) + ( 

4243 self.strike, self.dip, self.diameter, self.npointsources) 

4244 

4245 def get_factor(self): 

4246 return self.sign * self.moment 

4247 

4248 def discretize_basesource(self, store=None, target=None): 

4249 n = self.npointsources 

4250 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4251 

4252 points = num.zeros((n, 3)) 

4253 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4254 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4255 

4256 rotmat = pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0) 

4257 points = num.dot(rotmat.T, points.T).T # !!! ? 

4258 

4259 points[:, 0] += self.north_shift 

4260 points[:, 1] += self.east_shift 

4261 points[:, 2] += self.depth 

4262 

4263 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4264 scalar_moment=1.0 / n).m()) 

4265 

4266 rotmats = num.transpose( 

4267 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4268 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4269 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4270 

4271 ms = num.zeros((n, 3, 3)) 

4272 for i in range(n): 

4273 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4274 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4275 

4276 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4277 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4278 

4279 times, amplitudes = self.effective_stf_pre().discretize_t( 

4280 store.config.deltat, self.time) 

4281 

4282 nt = times.size 

4283 

4284 return meta.DiscretizedMTSource( 

4285 times=num.tile(times, n), 

4286 lat=self.lat, 

4287 lon=self.lon, 

4288 north_shifts=num.repeat(points[:, 0], nt), 

4289 east_shifts=num.repeat(points[:, 1], nt), 

4290 depths=num.repeat(points[:, 2], nt), 

4291 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4292 amplitudes, n)[:, num.newaxis]) 

4293 

4294 

4295class CombiSource(Source): 

4296 ''' 

4297 Composite source model. 

4298 ''' 

4299 

4300 discretized_source_class = meta.DiscretizedMTSource 

4301 

4302 subsources = List.T(Source.T()) 

4303 

4304 def __init__(self, subsources=[], **kwargs): 

4305 if not subsources: 

4306 raise BadRequest( 

4307 'Need at least one sub-source to create a CombiSource object.') 

4308 

4309 lats = num.array( 

4310 [subsource.lat for subsource in subsources], dtype=float) 

4311 lons = num.array( 

4312 [subsource.lon for subsource in subsources], dtype=float) 

4313 

4314 lat, lon = lats[0], lons[0] 

4315 if not num.all(lats == lat) and num.all(lons == lon): 

4316 subsources = [s.clone() for s in subsources] 

4317 for subsource in subsources[1:]: 

4318 subsource.set_origin(lat, lon) 

4319 

4320 depth = float(num.mean([p.depth for p in subsources])) 

4321 time = float(num.mean([p.time for p in subsources])) 

4322 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4323 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4324 kwargs.update( 

4325 time=time, 

4326 lat=float(lat), 

4327 lon=float(lon), 

4328 north_shift=north_shift, 

4329 east_shift=east_shift, 

4330 depth=depth) 

4331 

4332 Source.__init__(self, subsources=subsources, **kwargs) 

4333 

4334 def get_factor(self): 

4335 return 1.0 

4336 

4337 def discretize_basesource(self, store, target=None): 

4338 dsources = [] 

4339 for sf in self.subsources: 

4340 ds = sf.discretize_basesource(store, target) 

4341 ds.m6s *= sf.get_factor() 

4342 dsources.append(ds) 

4343 

4344 return meta.DiscretizedMTSource.combine(dsources) 

4345 

4346 

4347class SFSource(Source): 

4348 ''' 

4349 A single force point source. 

4350 

4351 Supported GF schemes: `'elastic5'`. 

4352 ''' 

4353 

4354 discretized_source_class = meta.DiscretizedSFSource 

4355 

4356 fn = Float.T( 

4357 default=0., 

4358 help='northward component of single force [N]') 

4359 

4360 fe = Float.T( 

4361 default=0., 

4362 help='eastward component of single force [N]') 

4363 

4364 fd = Float.T( 

4365 default=0., 

4366 help='downward component of single force [N]') 

4367 

4368 def __init__(self, **kwargs): 

4369 Source.__init__(self, **kwargs) 

4370 

4371 def base_key(self): 

4372 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4373 

4374 def get_factor(self): 

4375 return 1.0 

4376 

4377 def discretize_basesource(self, store, target=None): 

4378 times, amplitudes = self.effective_stf_pre().discretize_t( 

4379 store.config.deltat, self.time) 

4380 forces = amplitudes[:, num.newaxis] * num.array( 

4381 [[self.fn, self.fe, self.fd]], dtype=float) 

4382 

4383 return meta.DiscretizedSFSource(forces=forces, 

4384 **self._dparams_base_repeated(times)) 

4385 

4386 def pyrocko_event(self, store=None, target=None, **kwargs): 

4387 return Source.pyrocko_event( 

4388 self, store, target, 

4389 **kwargs) 

4390 

4391 @classmethod 

4392 def from_pyrocko_event(cls, ev, **kwargs): 

4393 d = {} 

4394 d.update(kwargs) 

4395 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4396 

4397 

4398class PorePressurePointSource(Source): 

4399 ''' 

4400 Excess pore pressure point source. 

4401 

4402 For poro-elastic initial value problem where an excess pore pressure is 

4403 brought into a small source volume. 

4404 ''' 

4405 

4406 discretized_source_class = meta.DiscretizedPorePressureSource 

4407 

4408 pp = Float.T( 

4409 default=1.0, 

4410 help='initial excess pore pressure in [Pa]') 

4411 

4412 def base_key(self): 

4413 return Source.base_key(self) 

4414 

4415 def get_factor(self): 

4416 return self.pp 

4417 

4418 def discretize_basesource(self, store, target=None): 

4419 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4420 **self._dparams_base()) 

4421 

4422 

4423class PorePressureLineSource(Source): 

4424 ''' 

4425 Excess pore pressure line source. 

4426 

4427 The line source is centered at (north_shift, east_shift, depth). 

4428 ''' 

4429 

4430 discretized_source_class = meta.DiscretizedPorePressureSource 

4431 

4432 pp = Float.T( 

4433 default=1.0, 

4434 help='initial excess pore pressure in [Pa]') 

4435 

4436 length = Float.T( 

4437 default=0.0, 

4438 help='length of the line source [m]') 

4439 

4440 azimuth = Float.T( 

4441 default=0.0, 

4442 help='azimuth direction, clockwise from north [deg]') 

4443 

4444 dip = Float.T( 

4445 default=90., 

4446 help='dip direction, downward from horizontal [deg]') 

4447 

4448 def base_key(self): 

4449 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4450 

4451 def get_factor(self): 

4452 return self.pp 

4453 

4454 def discretize_basesource(self, store, target=None): 

4455 

4456 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4457 

4458 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4459 

4460 sa = math.sin(self.azimuth * d2r) 

4461 ca = math.cos(self.azimuth * d2r) 

4462 sd = math.sin(self.dip * d2r) 

4463 cd = math.cos(self.dip * d2r) 

4464 

4465 points = num.zeros((n, 3)) 

4466 points[:, 0] = self.north_shift + a * ca * cd 

4467 points[:, 1] = self.east_shift + a * sa * cd 

4468 points[:, 2] = self.depth + a * sd 

4469 

4470 return meta.DiscretizedPorePressureSource( 

4471 times=util.num_full(n, self.time), 

4472 lat=self.lat, 

4473 lon=self.lon, 

4474 north_shifts=points[:, 0], 

4475 east_shifts=points[:, 1], 

4476 depths=points[:, 2], 

4477 pp=num.ones(n) / n) 

4478 

4479 

4480class Request(Object): 

4481 ''' 

4482 Synthetic seismogram computation request. 

4483 

4484 :: 

4485 

4486 Request(**kwargs) 

4487 Request(sources, targets, **kwargs) 

4488 ''' 

4489 

4490 sources = List.T( 

4491 Source.T(), 

4492 help='list of sources for which to produce synthetics.') 

4493 

4494 targets = List.T( 

4495 Target.T(), 

4496 help='list of targets for which to produce synthetics.') 

4497 

4498 @classmethod 

4499 def args2kwargs(cls, args): 

4500 if len(args) not in (0, 2, 3): 

4501 raise BadRequest('Invalid arguments.') 

4502 

4503 if len(args) == 2: 

4504 return dict(sources=args[0], targets=args[1]) 

4505 else: 

4506 return {} 

4507 

4508 def __init__(self, *args, **kwargs): 

4509 kwargs.update(self.args2kwargs(args)) 

4510 sources = kwargs.pop('sources', []) 

4511 targets = kwargs.pop('targets', []) 

4512 

4513 if isinstance(sources, Source): 

4514 sources = [sources] 

4515 

4516 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4517 targets = [targets] 

4518 

4519 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4520 

4521 @property 

4522 def targets_dynamic(self): 

4523 return [t for t in self.targets if isinstance(t, Target)] 

4524 

4525 @property 

4526 def targets_static(self): 

4527 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4528 

4529 @property 

4530 def has_dynamic(self): 

4531 return True if len(self.targets_dynamic) > 0 else False 

4532 

4533 @property 

4534 def has_statics(self): 

4535 return True if len(self.targets_static) > 0 else False 

4536 

4537 def subsources_map(self): 

4538 m = defaultdict(list) 

4539 for source in self.sources: 

4540 m[source.base_key()].append(source) 

4541 

4542 return m 

4543 

4544 def subtargets_map(self): 

4545 m = defaultdict(list) 

4546 for target in self.targets: 

4547 m[target.base_key()].append(target) 

4548 

4549 return m 

4550 

4551 def subrequest_map(self): 

4552 ms = self.subsources_map() 

4553 mt = self.subtargets_map() 

4554 m = {} 

4555 for (ks, ls) in ms.items(): 

4556 for (kt, lt) in mt.items(): 

4557 m[ks, kt] = (ls, lt) 

4558 

4559 return m 

4560 

4561 

4562class ProcessingStats(Object): 

4563 t_perc_get_store_and_receiver = Float.T(default=0.) 

4564 t_perc_discretize_source = Float.T(default=0.) 

4565 t_perc_make_base_seismogram = Float.T(default=0.) 

4566 t_perc_make_same_span = Float.T(default=0.) 

4567 t_perc_post_process = Float.T(default=0.) 

4568 t_perc_optimize = Float.T(default=0.) 

4569 t_perc_stack = Float.T(default=0.) 

4570 t_perc_static_get_store = Float.T(default=0.) 

4571 t_perc_static_discretize_basesource = Float.T(default=0.) 

4572 t_perc_static_sum_statics = Float.T(default=0.) 

4573 t_perc_static_post_process = Float.T(default=0.) 

4574 t_wallclock = Float.T(default=0.) 

4575 t_cpu = Float.T(default=0.) 

4576 n_read_blocks = Int.T(default=0) 

4577 n_results = Int.T(default=0) 

4578 n_subrequests = Int.T(default=0) 

4579 n_stores = Int.T(default=0) 

4580 n_records_stacked = Int.T(default=0) 

4581 

4582 

4583class Response(Object): 

4584 ''' 

4585 Resonse object to a synthetic seismogram computation request. 

4586 ''' 

4587 

4588 request = Request.T() 

4589 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4590 stats = ProcessingStats.T() 

4591 

4592 def pyrocko_traces(self): 

4593 ''' 

4594 Return a list of requested 

4595 :class:`~pyrocko.trace.Trace` instances. 

4596 ''' 

4597 

4598 traces = [] 

4599 for results in self.results_list: 

4600 for result in results: 

4601 if not isinstance(result, meta.Result): 

4602 continue 

4603 traces.append(result.trace.pyrocko_trace()) 

4604 

4605 return traces 

4606 

4607 def kite_scenes(self): 

4608 ''' 

4609 Return a list of requested 

4610 :class:`~kite.scenes` instances. 

4611 ''' 

4612 kite_scenes = [] 

4613 for results in self.results_list: 

4614 for result in results: 

4615 if isinstance(result, meta.KiteSceneResult): 

4616 sc = result.get_scene() 

4617 kite_scenes.append(sc) 

4618 

4619 return kite_scenes 

4620 

4621 def static_results(self): 

4622 ''' 

4623 Return a list of requested 

4624 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4625 ''' 

4626 statics = [] 

4627 for results in self.results_list: 

4628 for result in results: 

4629 if not isinstance(result, meta.StaticResult): 

4630 continue 

4631 statics.append(result) 

4632 

4633 return statics 

4634 

4635 def iter_results(self, get='pyrocko_traces'): 

4636 ''' 

4637 Generator function to iterate over results of request. 

4638 

4639 Yields associated :py:class:`Source`, 

4640 :class:`~pyrocko.gf.targets.Target`, 

4641 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4642 ''' 

4643 

4644 for isource, source in enumerate(self.request.sources): 

4645 for itarget, target in enumerate(self.request.targets): 

4646 result = self.results_list[isource][itarget] 

4647 if get == 'pyrocko_traces': 

4648 yield source, target, result.trace.pyrocko_trace() 

4649 elif get == 'results': 

4650 yield source, target, result 

4651 

4652 def snuffle(self, **kwargs): 

4653 ''' 

4654 Open *snuffler* with requested traces. 

4655 ''' 

4656 

4657 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4658 

4659 

4660class Engine(Object): 

4661 ''' 

4662 Base class for synthetic seismogram calculators. 

4663 ''' 

4664 

4665 def get_store_ids(self): 

4666 ''' 

4667 Get list of available GF store IDs 

4668 ''' 

4669 

4670 return [] 

4671 

4672 

4673class Rule(object): 

4674 pass 

4675 

4676 

4677class VectorRule(Rule): 

4678 

4679 def __init__(self, quantity, differentiate=0, integrate=0): 

4680 self.components = [quantity + '.' + c for c in 'ned'] 

4681 self.differentiate = differentiate 

4682 self.integrate = integrate 

4683 

4684 def required_components(self, target): 

4685 n, e, d = self.components 

4686 sa, ca, sd, cd = target.get_sin_cos_factors() 

4687 

4688 comps = [] 

4689 if nonzero(ca * cd): 

4690 comps.append(n) 

4691 

4692 if nonzero(sa * cd): 

4693 comps.append(e) 

4694 

4695 if nonzero(sd): 

4696 comps.append(d) 

4697 

4698 return tuple(comps) 

4699 

4700 def apply_(self, target, base_seismogram): 

4701 n, e, d = self.components 

4702 sa, ca, sd, cd = target.get_sin_cos_factors() 

4703 

4704 if nonzero(ca * cd): 

4705 data = base_seismogram[n].data * (ca * cd) 

4706 deltat = base_seismogram[n].deltat 

4707 else: 

4708 data = 0.0 

4709 

4710 if nonzero(sa * cd): 

4711 data = data + base_seismogram[e].data * (sa * cd) 

4712 deltat = base_seismogram[e].deltat 

4713 

4714 if nonzero(sd): 

4715 data = data + base_seismogram[d].data * sd 

4716 deltat = base_seismogram[d].deltat 

4717 

4718 if self.differentiate: 

4719 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4720 

4721 if self.integrate: 

4722 raise NotImplementedError('Integration is not implemented yet.') 

4723 

4724 return data 

4725 

4726 

4727class HorizontalVectorRule(Rule): 

4728 

4729 def __init__(self, quantity, differentiate=0, integrate=0): 

4730 self.components = [quantity + '.' + c for c in 'ne'] 

4731 self.differentiate = differentiate 

4732 self.integrate = integrate 

4733 

4734 def required_components(self, target): 

4735 n, e = self.components 

4736 sa, ca, _, _ = target.get_sin_cos_factors() 

4737 

4738 comps = [] 

4739 if nonzero(ca): 

4740 comps.append(n) 

4741 

4742 if nonzero(sa): 

4743 comps.append(e) 

4744 

4745 return tuple(comps) 

4746 

4747 def apply_(self, target, base_seismogram): 

4748 n, e = self.components 

4749 sa, ca, _, _ = target.get_sin_cos_factors() 

4750 

4751 if nonzero(ca): 

4752 data = base_seismogram[n].data * ca 

4753 else: 

4754 data = 0.0 

4755 

4756 if nonzero(sa): 

4757 data = data + base_seismogram[e].data * sa 

4758 

4759 if self.differentiate: 

4760 deltat = base_seismogram[e].deltat 

4761 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4762 

4763 if self.integrate: 

4764 raise NotImplementedError('Integration is not implemented yet.') 

4765 

4766 return data 

4767 

4768 

4769class ScalarRule(Rule): 

4770 

4771 def __init__(self, quantity, differentiate=0): 

4772 self.c = quantity 

4773 

4774 def required_components(self, target): 

4775 return (self.c, ) 

4776 

4777 def apply_(self, target, base_seismogram): 

4778 data = base_seismogram[self.c].data.copy() 

4779 deltat = base_seismogram[self.c].deltat 

4780 if self.differentiate: 

4781 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4782 

4783 return data 

4784 

4785 

4786class StaticDisplacement(Rule): 

4787 

4788 def required_components(self, target): 

4789 return tuple(['displacement.%s' % c for c in list('ned')]) 

4790 

4791 def apply_(self, target, base_statics): 

4792 if isinstance(target, SatelliteTarget): 

4793 los_fac = target.get_los_factors() 

4794 base_statics['displacement.los'] =\ 

4795 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4796 los_fac[:, 1] * base_statics['displacement.e'] + 

4797 los_fac[:, 2] * base_statics['displacement.n']) 

4798 return base_statics 

4799 

4800 

4801channel_rules = { 

4802 'displacement': [VectorRule('displacement')], 

4803 'rotation': [VectorRule('rotation')], 

4804 'velocity': [ 

4805 VectorRule('velocity'), 

4806 VectorRule('displacement', differentiate=1)], 

4807 'acceleration': [ 

4808 VectorRule('acceleration'), 

4809 VectorRule('velocity', differentiate=1), 

4810 VectorRule('displacement', differentiate=2)], 

4811 'pore_pressure': [ScalarRule('pore_pressure')], 

4812 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4813 'darcy_velocity': [VectorRule('darcy_velocity')], 

4814} 

4815 

4816static_rules = { 

4817 'displacement': [StaticDisplacement()] 

4818} 

4819 

4820 

4821class OutOfBoundsContext(Object): 

4822 source = Source.T() 

4823 target = Target.T() 

4824 distance = Float.T() 

4825 components = List.T(String.T()) 

4826 

4827 

4828def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4829 dsource_cache = {} 

4830 tcounters = list(range(6)) 

4831 

4832 store_ids = set() 

4833 sources = set() 

4834 targets = set() 

4835 

4836 for itarget, target in enumerate(ptargets): 

4837 target._id = itarget 

4838 

4839 for w in work: 

4840 _, _, isources, itargets = w 

4841 

4842 sources.update([psources[isource] for isource in isources]) 

4843 targets.update([ptargets[itarget] for itarget in itargets]) 

4844 

4845 store_ids = set([t.store_id for t in targets]) 

4846 

4847 for isource, source in enumerate(psources): 

4848 

4849 components = set() 

4850 for itarget, target in enumerate(targets): 

4851 rule = engine.get_rule(source, target) 

4852 components.update(rule.required_components(target)) 

4853 

4854 for store_id in store_ids: 

4855 store_targets = [t for t in targets if t.store_id == store_id] 

4856 

4857 sample_rates = set([t.sample_rate for t in store_targets]) 

4858 interpolations = set([t.interpolation for t in store_targets]) 

4859 

4860 base_seismograms = [] 

4861 store_targets_out = [] 

4862 

4863 for samp_rate in sample_rates: 

4864 for interp in interpolations: 

4865 engine_targets = [ 

4866 t for t in store_targets if t.sample_rate == samp_rate 

4867 and t.interpolation == interp] 

4868 

4869 if not engine_targets: 

4870 continue 

4871 

4872 store_targets_out += engine_targets 

4873 

4874 base_seismograms += engine.base_seismograms( 

4875 source, 

4876 engine_targets, 

4877 components, 

4878 dsource_cache, 

4879 nthreads) 

4880 

4881 for iseis, seismogram in enumerate(base_seismograms): 

4882 for tr in seismogram.values(): 

4883 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4884 e = SeismosizerError( 

4885 'Seismosizer failed with return code %i\n%s' % ( 

4886 tr.err, str( 

4887 OutOfBoundsContext( 

4888 source=source, 

4889 target=store_targets[iseis], 

4890 distance=source.distance_to( 

4891 store_targets[iseis]), 

4892 components=components)))) 

4893 raise e 

4894 

4895 for seismogram, target in zip(base_seismograms, store_targets_out): 

4896 

4897 try: 

4898 result = engine._post_process_dynamic( 

4899 seismogram, source, target) 

4900 except SeismosizerError as e: 

4901 result = e 

4902 

4903 yield (isource, target._id, result), tcounters 

4904 

4905 

4906def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4907 dsource_cache = {} 

4908 

4909 for w in work: 

4910 _, _, isources, itargets = w 

4911 

4912 sources = [psources[isource] for isource in isources] 

4913 targets = [ptargets[itarget] for itarget in itargets] 

4914 

4915 components = set() 

4916 for target in targets: 

4917 rule = engine.get_rule(sources[0], target) 

4918 components.update(rule.required_components(target)) 

4919 

4920 for isource, source in zip(isources, sources): 

4921 for itarget, target in zip(itargets, targets): 

4922 

4923 try: 

4924 base_seismogram, tcounters = engine.base_seismogram( 

4925 source, target, components, dsource_cache, nthreads) 

4926 except meta.OutOfBounds as e: 

4927 e.context = OutOfBoundsContext( 

4928 source=sources[0], 

4929 target=targets[0], 

4930 distance=sources[0].distance_to(targets[0]), 

4931 components=components) 

4932 raise 

4933 

4934 n_records_stacked = 0 

4935 t_optimize = 0.0 

4936 t_stack = 0.0 

4937 

4938 for _, tr in base_seismogram.items(): 

4939 n_records_stacked += tr.n_records_stacked 

4940 t_optimize += tr.t_optimize 

4941 t_stack += tr.t_stack 

4942 

4943 try: 

4944 result = engine._post_process_dynamic( 

4945 base_seismogram, source, target) 

4946 result.n_records_stacked = n_records_stacked 

4947 result.n_shared_stacking = len(sources) *\ 

4948 len(targets) 

4949 result.t_optimize = t_optimize 

4950 result.t_stack = t_stack 

4951 except SeismosizerError as e: 

4952 result = e 

4953 

4954 tcounters.append(xtime()) 

4955 yield (isource, itarget, result), tcounters 

4956 

4957 

4958def process_static(work, psources, ptargets, engine, nthreads=0): 

4959 for w in work: 

4960 _, _, isources, itargets = w 

4961 

4962 sources = [psources[isource] for isource in isources] 

4963 targets = [ptargets[itarget] for itarget in itargets] 

4964 

4965 for isource, source in zip(isources, sources): 

4966 for itarget, target in zip(itargets, targets): 

4967 components = engine.get_rule(source, target)\ 

4968 .required_components(target) 

4969 

4970 try: 

4971 base_statics, tcounters = engine.base_statics( 

4972 source, target, components, nthreads) 

4973 except meta.OutOfBounds as e: 

4974 e.context = OutOfBoundsContext( 

4975 source=sources[0], 

4976 target=targets[0], 

4977 distance=float('nan'), 

4978 components=components) 

4979 raise 

4980 result = engine._post_process_statics( 

4981 base_statics, source, target) 

4982 tcounters.append(xtime()) 

4983 

4984 yield (isource, itarget, result), tcounters 

4985 

4986 

4987class LocalEngine(Engine): 

4988 ''' 

4989 Offline synthetic seismogram calculator. 

4990 

4991 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4992 :py:attr:`store_dirs` with paths set in environment variables 

4993 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4994 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4995 :py:attr:`store_dirs` with paths set in the user's config file. 

4996 

4997 The config file can be found at :file:`~/.pyrocko/config.pf` 

4998 

4999 .. code-block :: python 

5000 

5001 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

5002 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

5003 ''' 

5004 

5005 store_superdirs = List.T( 

5006 String.T(), 

5007 help="directories which are searched for Green's function stores") 

5008 

5009 store_dirs = List.T( 

5010 String.T(), 

5011 help="additional individual Green's function store directories") 

5012 

5013 default_store_id = String.T( 

5014 optional=True, 

5015 help='default store ID to be used when a request does not provide ' 

5016 'one') 

5017 

5018 def __init__(self, **kwargs): 

5019 use_env = kwargs.pop('use_env', False) 

5020 use_config = kwargs.pop('use_config', False) 

5021 Engine.__init__(self, **kwargs) 

5022 if use_env: 

5023 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

5024 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

5025 if env_store_superdirs: 

5026 self.store_superdirs.extend(env_store_superdirs.split(':')) 

5027 

5028 if env_store_dirs: 

5029 self.store_dirs.extend(env_store_dirs.split(':')) 

5030 

5031 if use_config: 

5032 c = config.config() 

5033 self.store_superdirs.extend(c.gf_store_superdirs) 

5034 self.store_dirs.extend(c.gf_store_dirs) 

5035 

5036 self._check_store_dirs_type() 

5037 self._id_to_store_dir = {} 

5038 self._open_stores = {} 

5039 self._effective_default_store_id = None 

5040 

5041 def _check_store_dirs_type(self): 

5042 for sdir in ['store_dirs', 'store_superdirs']: 

5043 if not isinstance(self.__getattribute__(sdir), list): 

5044 raise TypeError('{} of {} is not of type list'.format( 

5045 sdir, self.__class__.__name__)) 

5046 

5047 def _get_store_id(self, store_dir): 

5048 store_ = store.Store(store_dir) 

5049 store_id = store_.config.id 

5050 store_.close() 

5051 return store_id 

5052 

5053 def _looks_like_store_dir(self, store_dir): 

5054 return os.path.isdir(store_dir) and \ 

5055 all(os.path.isfile(pjoin(store_dir, x)) for x in 

5056 ('index', 'traces', 'config')) 

5057 

5058 def iter_store_dirs(self): 

5059 store_dirs = set() 

5060 for d in self.store_superdirs: 

5061 if not os.path.exists(d): 

5062 logger.warning('store_superdir not available: %s' % d) 

5063 continue 

5064 

5065 for entry in os.listdir(d): 

5066 store_dir = os.path.realpath(pjoin(d, entry)) 

5067 if self._looks_like_store_dir(store_dir): 

5068 store_dirs.add(store_dir) 

5069 

5070 for store_dir in self.store_dirs: 

5071 store_dirs.add(os.path.realpath(store_dir)) 

5072 

5073 return store_dirs 

5074 

5075 def _scan_stores(self): 

5076 for store_dir in self.iter_store_dirs(): 

5077 store_id = self._get_store_id(store_dir) 

5078 if store_id not in self._id_to_store_dir: 

5079 self._id_to_store_dir[store_id] = store_dir 

5080 else: 

5081 if store_dir != self._id_to_store_dir[store_id]: 

5082 raise DuplicateStoreId( 

5083 'GF store ID %s is used in (at least) two ' 

5084 'different stores. Locations are: %s and %s' % 

5085 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5086 

5087 def get_store_dir(self, store_id): 

5088 ''' 

5089 Lookup directory given a GF store ID. 

5090 ''' 

5091 

5092 if store_id not in self._id_to_store_dir: 

5093 self._scan_stores() 

5094 

5095 if store_id not in self._id_to_store_dir: 

5096 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5097 

5098 return self._id_to_store_dir[store_id] 

5099 

5100 def get_store_ids(self): 

5101 ''' 

5102 Get list of available store IDs. 

5103 ''' 

5104 

5105 self._scan_stores() 

5106 return sorted(self._id_to_store_dir.keys()) 

5107 

5108 def effective_default_store_id(self): 

5109 if self._effective_default_store_id is None: 

5110 if self.default_store_id is None: 

5111 store_ids = self.get_store_ids() 

5112 if len(store_ids) == 1: 

5113 self._effective_default_store_id = self.get_store_ids()[0] 

5114 else: 

5115 raise NoDefaultStoreSet() 

5116 else: 

5117 self._effective_default_store_id = self.default_store_id 

5118 

5119 return self._effective_default_store_id 

5120 

5121 def get_store(self, store_id=None): 

5122 ''' 

5123 Get a store from the engine. 

5124 

5125 :param store_id: identifier of the store (optional) 

5126 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5127 

5128 If no ``store_id`` is provided the store 

5129 associated with the :py:gattr:`default_store_id` is returned. 

5130 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5131 undefined. 

5132 ''' 

5133 

5134 if store_id is None: 

5135 store_id = self.effective_default_store_id() 

5136 

5137 if store_id not in self._open_stores: 

5138 store_dir = self.get_store_dir(store_id) 

5139 self._open_stores[store_id] = store.Store(store_dir) 

5140 

5141 return self._open_stores[store_id] 

5142 

5143 def get_store_config(self, store_id): 

5144 store = self.get_store(store_id) 

5145 return store.config 

5146 

5147 def get_store_extra(self, store_id, key): 

5148 store = self.get_store(store_id) 

5149 return store.get_extra(key) 

5150 

5151 def close_cashed_stores(self): 

5152 ''' 

5153 Close and remove ids from cashed stores. 

5154 ''' 

5155 store_ids = [] 

5156 for store_id, store_ in self._open_stores.items(): 

5157 store_.close() 

5158 store_ids.append(store_id) 

5159 

5160 for store_id in store_ids: 

5161 self._open_stores.pop(store_id) 

5162 

5163 def get_rule(self, source, target): 

5164 cprovided = self.get_store(target.store_id).get_provided_components() 

5165 

5166 if isinstance(target, StaticTarget): 

5167 quantity = target.quantity 

5168 available_rules = static_rules 

5169 elif isinstance(target, Target): 

5170 quantity = target.effective_quantity() 

5171 available_rules = channel_rules 

5172 

5173 try: 

5174 for rule in available_rules[quantity]: 

5175 cneeded = rule.required_components(target) 

5176 if all(c in cprovided for c in cneeded): 

5177 return rule 

5178 

5179 except KeyError: 

5180 pass 

5181 

5182 raise BadRequest( 

5183 'No rule to calculate "%s" with GFs from store "%s" ' 

5184 'for source model "%s".' % ( 

5185 target.effective_quantity(), 

5186 target.store_id, 

5187 source.__class__.__name__)) 

5188 

5189 def _cached_discretize_basesource(self, source, store, cache, target): 

5190 if (source, store) not in cache: 

5191 cache[source, store] = source.discretize_basesource(store, target) 

5192 

5193 return cache[source, store] 

5194 

5195 def base_seismograms(self, source, targets, components, dsource_cache, 

5196 nthreads=0): 

5197 

5198 target = targets[0] 

5199 

5200 interp = set([t.interpolation for t in targets]) 

5201 if len(interp) > 1: 

5202 raise BadRequest('Targets have different interpolation schemes.') 

5203 

5204 rates = set([t.sample_rate for t in targets]) 

5205 if len(rates) > 1: 

5206 raise BadRequest('Targets have different sample rates.') 

5207 

5208 store_ = self.get_store(target.store_id) 

5209 receivers = [t.receiver(store_) for t in targets] 

5210 

5211 if target.sample_rate is not None: 

5212 deltat = 1. / target.sample_rate 

5213 rate = target.sample_rate 

5214 else: 

5215 deltat = None 

5216 rate = store_.config.sample_rate 

5217 

5218 tmin = num.fromiter( 

5219 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5220 tmax = num.fromiter( 

5221 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5222 

5223 mask = num.logical_and(num.isfinite(tmin), num.isfinite(tmax)) 

5224 

5225 itmin = num.zeros_like(tmin, dtype=num.int64) 

5226 itmax = num.zeros_like(tmin, dtype=num.int64) 

5227 nsamples = num.full_like(tmin, -1, dtype=num.int64) 

5228 

5229 itmin[mask] = num.floor(tmin[mask] * rate).astype(num.int64) 

5230 itmax[mask] = num.ceil(tmax[mask] * rate).astype(num.int64) 

5231 nsamples = itmax - itmin + 1 

5232 nsamples[num.logical_not(mask)] = -1 

5233 

5234 base_source = self._cached_discretize_basesource( 

5235 source, store_, dsource_cache, target) 

5236 

5237 base_seismograms = store_.calc_seismograms( 

5238 base_source, receivers, components, 

5239 deltat=deltat, 

5240 itmin=itmin, nsamples=nsamples, 

5241 interpolation=target.interpolation, 

5242 optimization=target.optimization, 

5243 nthreads=nthreads) 

5244 

5245 for i, base_seismogram in enumerate(base_seismograms): 

5246 base_seismograms[i] = store.make_same_span(base_seismogram) 

5247 

5248 return base_seismograms 

5249 

5250 def base_seismogram(self, source, target, components, dsource_cache, 

5251 nthreads): 

5252 

5253 tcounters = [xtime()] 

5254 

5255 store_ = self.get_store(target.store_id) 

5256 receiver = target.receiver(store_) 

5257 

5258 if target.tmin and target.tmax is not None: 

5259 rate = store_.config.sample_rate 

5260 itmin = int(num.floor(target.tmin * rate)) 

5261 itmax = int(num.ceil(target.tmax * rate)) 

5262 nsamples = itmax - itmin + 1 

5263 else: 

5264 itmin = None 

5265 nsamples = None 

5266 

5267 tcounters.append(xtime()) 

5268 base_source = self._cached_discretize_basesource( 

5269 source, store_, dsource_cache, target) 

5270 

5271 tcounters.append(xtime()) 

5272 

5273 if target.sample_rate is not None: 

5274 deltat = 1. / target.sample_rate 

5275 else: 

5276 deltat = None 

5277 

5278 base_seismogram = store_.seismogram( 

5279 base_source, receiver, components, 

5280 deltat=deltat, 

5281 itmin=itmin, nsamples=nsamples, 

5282 interpolation=target.interpolation, 

5283 optimization=target.optimization, 

5284 nthreads=nthreads) 

5285 

5286 tcounters.append(xtime()) 

5287 

5288 base_seismogram = store.make_same_span(base_seismogram) 

5289 

5290 tcounters.append(xtime()) 

5291 

5292 return base_seismogram, tcounters 

5293 

5294 def base_statics(self, source, target, components, nthreads): 

5295 tcounters = [xtime()] 

5296 store_ = self.get_store(target.store_id) 

5297 

5298 if target.tsnapshot is not None: 

5299 rate = store_.config.sample_rate 

5300 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5301 else: 

5302 itsnapshot = None 

5303 tcounters.append(xtime()) 

5304 

5305 base_source = source.discretize_basesource(store_, target=target) 

5306 

5307 tcounters.append(xtime()) 

5308 

5309 base_statics = store_.statics( 

5310 base_source, 

5311 target, 

5312 itsnapshot, 

5313 components, 

5314 target.interpolation, 

5315 nthreads) 

5316 

5317 tcounters.append(xtime()) 

5318 

5319 return base_statics, tcounters 

5320 

5321 def _post_process_dynamic(self, base_seismogram, source, target): 

5322 base_any = next(iter(base_seismogram.values())) 

5323 deltat = base_any.deltat 

5324 itmin = base_any.itmin 

5325 

5326 rule = self.get_rule(source, target) 

5327 data = rule.apply_(target, base_seismogram) 

5328 

5329 factor = source.get_factor() * target.get_factor() 

5330 if factor != 1.0: 

5331 data = data * factor 

5332 

5333 stf = source.effective_stf_post() 

5334 

5335 times, amplitudes = stf.discretize_t( 

5336 deltat, 0.0) 

5337 

5338 # repeat end point to prevent boundary effects 

5339 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5340 padded_data[:data.size] = data 

5341 padded_data[data.size:] = data[-1] 

5342 data = num.convolve(amplitudes, padded_data) 

5343 

5344 tmin = itmin * deltat + times[0] 

5345 

5346 tr = meta.SeismosizerTrace( 

5347 codes=target.codes, 

5348 data=data[:-amplitudes.size], 

5349 deltat=deltat, 

5350 tmin=tmin) 

5351 

5352 return target.post_process(self, source, tr) 

5353 

5354 def _post_process_statics(self, base_statics, source, starget): 

5355 rule = self.get_rule(source, starget) 

5356 data = rule.apply_(starget, base_statics) 

5357 

5358 factor = source.get_factor() 

5359 if factor != 1.0: 

5360 for v in data.values(): 

5361 v *= factor 

5362 

5363 return starget.post_process(self, source, base_statics) 

5364 

5365 def process(self, *args, **kwargs): 

5366 ''' 

5367 Process a request. 

5368 

5369 :: 

5370 

5371 process(**kwargs) 

5372 process(request, **kwargs) 

5373 process(sources, targets, **kwargs) 

5374 

5375 The request can be given a a :py:class:`Request` object, or such an 

5376 object is created using ``Request(**kwargs)`` for convenience. 

5377 

5378 :returns: :py:class:`Response` object 

5379 ''' 

5380 

5381 if len(args) not in (0, 1, 2): 

5382 raise BadRequest('Invalid arguments.') 

5383 

5384 if len(args) == 1: 

5385 kwargs['request'] = args[0] 

5386 

5387 elif len(args) == 2: 

5388 kwargs.update(Request.args2kwargs(args)) 

5389 

5390 request = kwargs.pop('request', None) 

5391 status_callback = kwargs.pop('status_callback', None) 

5392 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5393 

5394 nprocs = kwargs.pop('nprocs', None) 

5395 nthreads = kwargs.pop('nthreads', 1) 

5396 if nprocs is not None: 

5397 nthreads = nprocs 

5398 

5399 if request is None: 

5400 request = Request(**kwargs) 

5401 

5402 if resource: 

5403 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5404 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5405 tt0 = xtime() 

5406 

5407 # make sure stores are open before fork() 

5408 store_ids = set(target.store_id for target in request.targets) 

5409 for store_id in store_ids: 

5410 self.get_store(store_id) 

5411 

5412 source_index = dict((x, i) for (i, x) in 

5413 enumerate(request.sources)) 

5414 target_index = dict((x, i) for (i, x) in 

5415 enumerate(request.targets)) 

5416 

5417 m = request.subrequest_map() 

5418 

5419 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5420 results_list = [] 

5421 

5422 for i in range(len(request.sources)): 

5423 results_list.append([None] * len(request.targets)) 

5424 

5425 tcounters_dyn_list = [] 

5426 tcounters_static_list = [] 

5427 nsub = len(skeys) 

5428 isub = 0 

5429 

5430 # Processing dynamic targets through 

5431 # parimap(process_subrequest_dynamic) 

5432 

5433 if calc_timeseries: 

5434 _process_dynamic = process_dynamic_timeseries 

5435 else: 

5436 _process_dynamic = process_dynamic 

5437 

5438 if request.has_dynamic: 

5439 work_dynamic = [ 

5440 (i, nsub, 

5441 [source_index[source] for source in m[k][0]], 

5442 [target_index[target] for target in m[k][1] 

5443 if not isinstance(target, StaticTarget)]) 

5444 for (i, k) in enumerate(skeys)] 

5445 

5446 for ii_results, tcounters_dyn in _process_dynamic( 

5447 work_dynamic, request.sources, request.targets, self, 

5448 nthreads): 

5449 

5450 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5451 isource, itarget, result = ii_results 

5452 results_list[isource][itarget] = result 

5453 

5454 if status_callback: 

5455 status_callback(isub, nsub) 

5456 

5457 isub += 1 

5458 

5459 # Processing static targets through process_static 

5460 if request.has_statics: 

5461 work_static = [ 

5462 (i, nsub, 

5463 [source_index[source] for source in m[k][0]], 

5464 [target_index[target] for target in m[k][1] 

5465 if isinstance(target, StaticTarget)]) 

5466 for (i, k) in enumerate(skeys)] 

5467 

5468 for ii_results, tcounters_static in process_static( 

5469 work_static, request.sources, request.targets, self, 

5470 nthreads=nthreads): 

5471 

5472 tcounters_static_list.append(num.diff(tcounters_static)) 

5473 isource, itarget, result = ii_results 

5474 results_list[isource][itarget] = result 

5475 

5476 if status_callback: 

5477 status_callback(isub, nsub) 

5478 

5479 isub += 1 

5480 

5481 if status_callback: 

5482 status_callback(nsub, nsub) 

5483 

5484 tt1 = time.time() 

5485 if resource: 

5486 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5487 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5488 

5489 s = ProcessingStats() 

5490 

5491 if request.has_dynamic: 

5492 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5493 t_dyn = float(num.sum(tcumu_dyn)) 

5494 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5495 (s.t_perc_get_store_and_receiver, 

5496 s.t_perc_discretize_source, 

5497 s.t_perc_make_base_seismogram, 

5498 s.t_perc_make_same_span, 

5499 s.t_perc_post_process) = perc_dyn 

5500 else: 

5501 t_dyn = 0. 

5502 

5503 if request.has_statics: 

5504 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5505 t_static = num.sum(tcumu_static) 

5506 perc_static = map(float, tcumu_static / t_static * 100.) 

5507 (s.t_perc_static_get_store, 

5508 s.t_perc_static_discretize_basesource, 

5509 s.t_perc_static_sum_statics, 

5510 s.t_perc_static_post_process) = perc_static 

5511 

5512 s.t_wallclock = tt1 - tt0 

5513 if resource: 

5514 s.t_cpu = ( 

5515 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5516 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5517 s.n_read_blocks = ( 

5518 (rs1.ru_inblock + rc1.ru_inblock) - 

5519 (rs0.ru_inblock + rc0.ru_inblock)) 

5520 

5521 n_records_stacked = 0. 

5522 for results in results_list: 

5523 for result in results: 

5524 if not isinstance(result, meta.Result): 

5525 continue 

5526 shr = float(result.n_shared_stacking) 

5527 n_records_stacked += result.n_records_stacked / shr 

5528 s.t_perc_optimize += result.t_optimize / shr 

5529 s.t_perc_stack += result.t_stack / shr 

5530 s.n_records_stacked = int(n_records_stacked) 

5531 if t_dyn != 0.: 

5532 s.t_perc_optimize /= t_dyn * 100 

5533 s.t_perc_stack /= t_dyn * 100 

5534 

5535 return Response( 

5536 request=request, 

5537 results_list=results_list, 

5538 stats=s) 

5539 

5540 

5541class RemoteEngine(Engine): 

5542 ''' 

5543 Client for remote synthetic seismogram calculator. 

5544 ''' 

5545 

5546 site = String.T(default=ws.g_default_site, optional=True) 

5547 url = String.T(default=ws.g_url, optional=True) 

5548 

5549 def process(self, request=None, status_callback=None, **kwargs): 

5550 

5551 if request is None: 

5552 request = Request(**kwargs) 

5553 

5554 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5555 

5556 

5557g_engine = None 

5558 

5559 

5560def get_engine(store_superdirs=[]): 

5561 global g_engine 

5562 if g_engine is None: 

5563 g_engine = LocalEngine(use_env=True, use_config=True) 

5564 

5565 for d in store_superdirs: 

5566 if d not in g_engine.store_superdirs: 

5567 g_engine.store_superdirs.append(d) 

5568 

5569 return g_engine 

5570 

5571 

5572class SourceGroup(Object): 

5573 

5574 def __getattr__(self, k): 

5575 return num.fromiter((getattr(s, k) for s in self), 

5576 dtype=float) 

5577 

5578 def __iter__(self): 

5579 raise NotImplementedError( 

5580 'This method should be implemented in subclass.') 

5581 

5582 def __len__(self): 

5583 raise NotImplementedError( 

5584 'This method should be implemented in subclass.') 

5585 

5586 

5587class SourceList(SourceGroup): 

5588 sources = List.T(Source.T()) 

5589 

5590 def append(self, s): 

5591 self.sources.append(s) 

5592 

5593 def __iter__(self): 

5594 return iter(self.sources) 

5595 

5596 def __len__(self): 

5597 return len(self.sources) 

5598 

5599 

5600class SourceGrid(SourceGroup): 

5601 

5602 base = Source.T() 

5603 variables = Dict.T(String.T(), Range.T()) 

5604 order = List.T(String.T()) 

5605 

5606 def __len__(self): 

5607 n = 1 

5608 for (k, v) in self.make_coords(self.base): 

5609 n *= len(list(v)) 

5610 

5611 return n 

5612 

5613 def __iter__(self): 

5614 for items in permudef(self.make_coords(self.base)): 

5615 s = self.base.clone(**{k: v for (k, v) in items}) 

5616 s.regularize() 

5617 yield s 

5618 

5619 def ordered_params(self): 

5620 ks = list(self.variables.keys()) 

5621 for k in self.order + list(self.base.keys()): 

5622 if k in ks: 

5623 yield k 

5624 ks.remove(k) 

5625 if ks: 

5626 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5627 (ks[0], self.base.__class__.__name__)) 

5628 

5629 def make_coords(self, base): 

5630 return [(param, self.variables[param].make(base=base[param])) 

5631 for param in self.ordered_params()] 

5632 

5633 

5634source_classes = [ 

5635 Source, 

5636 SourceWithMagnitude, 

5637 SourceWithDerivedMagnitude, 

5638 ExplosionSource, 

5639 RectangularExplosionSource, 

5640 DCSource, 

5641 CLVDSource, 

5642 VLVDSource, 

5643 MTSource, 

5644 RectangularSource, 

5645 PseudoDynamicRupture, 

5646 DoubleDCSource, 

5647 RingfaultSource, 

5648 CombiSource, 

5649 SFSource, 

5650 PorePressurePointSource, 

5651 PorePressureLineSource, 

5652] 

5653 

5654stf_classes = [ 

5655 STF, 

5656 BoxcarSTF, 

5657 TriangularSTF, 

5658 HalfSinusoidSTF, 

5659 ResonatorSTF, 

5660] 

5661 

5662__all__ = ''' 

5663SeismosizerError 

5664BadRequest 

5665NoSuchStore 

5666DerivedMagnitudeError 

5667STFMode 

5668'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5669Request 

5670ProcessingStats 

5671Response 

5672Engine 

5673LocalEngine 

5674RemoteEngine 

5675source_classes 

5676get_engine 

5677Range 

5678SourceGroup 

5679SourceList 

5680SourceGrid 

5681map_anchor 

5682'''.split()