1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7 

8 

9 

10.. _coordinate-system-names: 

11 

12Coordinate systems 

13.................. 

14 

15Coordinate system names commonly used in source models. 

16 

17================= ============================================ 

18Name Description 

19================= ============================================ 

20``'xyz'`` northing, easting, depth in [m] 

21``'xy'`` northing, easting in [m] 

22``'latlon'`` latitude, longitude in [deg] 

23``'lonlat'`` longitude, latitude in [deg] 

24``'latlondepth'`` latitude, longitude in [deg], depth in [m] 

25================= ============================================ 

26''' 

27 

28 

29from __future__ import absolute_import, division, print_function 

30 

31from collections import defaultdict 

32from functools import cmp_to_key 

33import time 

34import math 

35import os 

36import re 

37import logging 

38try: 

39 import resource 

40except ImportError: 

41 resource = None 

42from hashlib import sha1 

43 

44import numpy as num 

45from scipy.interpolate import RegularGridInterpolator 

46 

47from pyrocko.guts import (Object, Float, String, StringChoice, List, 

48 Timestamp, Int, SObject, ArgumentError, Dict, 

49 ValidationError, Bool) 

50from pyrocko.guts_array import Array 

51 

52from pyrocko import moment_tensor as pmt 

53from pyrocko import trace, util, config, model, eikonal_ext 

54from pyrocko.orthodrome import ne_to_latlon 

55from pyrocko.model import Location 

56from pyrocko.modelling import OkadaSource, make_okada_coefficient_matrix, \ 

57 okada_ext, invert_fault_dislocations_bem 

58 

59from . import meta, store, ws 

60from .tractions import TractionField, DirectedTractions 

61from .targets import Target, StaticTarget, SatelliteTarget 

62 

63pjoin = os.path.join 

64 

65guts_prefix = 'pf' 

66 

67d2r = math.pi / 180. 

68r2d = 180. / math.pi 

69km = 1e3 

70 

71logger = logging.getLogger('pyrocko.gf.seismosizer') 

72 

73 

74def cmp_none_aware(a, b): 

75 if isinstance(a, tuple) and isinstance(b, tuple): 

76 for xa, xb in zip(a, b): 

77 rv = cmp_none_aware(xa, xb) 

78 if rv != 0: 

79 return rv 

80 

81 return 0 

82 

83 anone = a is None 

84 bnone = b is None 

85 

86 if anone and bnone: 

87 return 0 

88 

89 if anone: 

90 return -1 

91 

92 if bnone: 

93 return 1 

94 

95 return bool(a > b) - bool(a < b) 

96 

97 

98def xtime(): 

99 return time.time() 

100 

101 

102class SeismosizerError(Exception): 

103 pass 

104 

105 

106class BadRequest(SeismosizerError): 

107 pass 

108 

109 

110class DuplicateStoreId(Exception): 

111 pass 

112 

113 

114class NoDefaultStoreSet(Exception): 

115 pass 

116 

117 

118class ConversionError(Exception): 

119 pass 

120 

121 

122class NoSuchStore(BadRequest): 

123 

124 def __init__(self, store_id=None, dirs=None): 

125 BadRequest.__init__(self) 

126 self.store_id = store_id 

127 self.dirs = dirs 

128 

129 def __str__(self): 

130 if self.store_id is not None: 

131 rstr = 'no GF store with id "%s" found.' % self.store_id 

132 else: 

133 rstr = 'GF store not found.' 

134 

135 if self.dirs is not None: 

136 rstr += ' Searched folders:\n %s' % '\n '.join(sorted(self.dirs)) 

137 return rstr 

138 

139 

140def ufloat(s): 

141 units = { 

142 'k': 1e3, 

143 'M': 1e6, 

144 } 

145 

146 factor = 1.0 

147 if s and s[-1] in units: 

148 factor = units[s[-1]] 

149 s = s[:-1] 

150 if not s: 

151 raise ValueError('unit without a number: \'%s\'' % s) 

152 

153 return float(s) * factor 

154 

155 

156def ufloat_or_none(s): 

157 if s: 

158 return ufloat(s) 

159 else: 

160 return None 

161 

162 

163def int_or_none(s): 

164 if s: 

165 return int(s) 

166 else: 

167 return None 

168 

169 

170def nonzero(x, eps=1e-15): 

171 return abs(x) > eps 

172 

173 

174def permudef(ln, j=0): 

175 if j < len(ln): 

176 k, v = ln[j] 

177 for y in v: 

178 ln[j] = k, y 

179 for s in permudef(ln, j + 1): 

180 yield s 

181 

182 ln[j] = k, v 

183 return 

184 else: 

185 yield ln 

186 

187 

188def arr(x): 

189 return num.atleast_1d(num.asarray(x)) 

190 

191 

192def discretize_rect_source(deltas, deltat, time, north, east, depth, 

193 strike, dip, length, width, 

194 anchor, velocity=None, stf=None, 

195 nucleation_x=None, nucleation_y=None, 

196 decimation_factor=1, pointsonly=False, 

197 plane_coords=False, 

198 aggressive_oversampling=False): 

199 

200 if stf is None: 

201 stf = STF() 

202 

203 if not velocity and not pointsonly: 

204 raise AttributeError('velocity is required in time mode') 

205 

206 mindeltagf = float(num.min(deltas)) 

207 if velocity: 

208 mindeltagf = min(mindeltagf, deltat * velocity) 

209 

210 ln = length 

211 wd = width 

212 

213 if aggressive_oversampling: 

214 nl = int((2. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

215 nw = int((2. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

216 else: 

217 nl = int((1. / decimation_factor) * num.ceil(ln / mindeltagf)) + 1 

218 nw = int((1. / decimation_factor) * num.ceil(wd / mindeltagf)) + 1 

219 

220 n = int(nl * nw) 

221 

222 dl = ln / nl 

223 dw = wd / nw 

224 

225 xl = num.linspace(-0.5 * (ln - dl), 0.5 * (ln - dl), nl) 

226 xw = num.linspace(-0.5 * (wd - dw), 0.5 * (wd - dw), nw) 

227 

228 points = num.zeros((n, 3), dtype=num.float) 

229 points[:, 0] = num.tile(xl, nw) 

230 points[:, 1] = num.repeat(xw, nl) 

231 

232 if nucleation_x is not None: 

233 dist_x = num.abs(nucleation_x - points[:, 0]) 

234 else: 

235 dist_x = num.zeros(n) 

236 

237 if nucleation_y is not None: 

238 dist_y = num.abs(nucleation_y - points[:, 1]) 

239 else: 

240 dist_y = num.zeros(n) 

241 

242 dist = num.sqrt(dist_x**2 + dist_y**2) 

243 times = dist / velocity 

244 

245 anch_x, anch_y = map_anchor[anchor] 

246 

247 points[:, 0] -= anch_x * 0.5 * length 

248 points[:, 1] -= anch_y * 0.5 * width 

249 

250 if plane_coords: 

251 return points, dl, dw, nl, nw 

252 

253 rotmat = num.asarray( 

254 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

255 points = num.dot(rotmat.T, points.T).T 

256 

257 points[:, 0] += north 

258 points[:, 1] += east 

259 points[:, 2] += depth 

260 

261 if pointsonly: 

262 return points, dl, dw, nl, nw 

263 

264 xtau, amplitudes = stf.discretize_t(deltat, time) 

265 nt = xtau.size 

266 

267 points2 = num.repeat(points, nt, axis=0) 

268 times2 = (times[:, num.newaxis] + xtau[num.newaxis, :]).ravel() 

269 amplitudes2 = num.tile(amplitudes, n) 

270 

271 return points2, times2, amplitudes2, dl, dw, nl, nw 

272 

273 

274def check_rect_source_discretisation(points2, nl, nw, store): 

275 # We assume a non-rotated fault plane 

276 N_CRITICAL = 8 

277 points = points2.T.reshape((3, nl, nw)) 

278 if points.size <= N_CRITICAL: 

279 logger.warning('RectangularSource is defined by only %d sub-sources!' 

280 % points.size) 

281 return True 

282 

283 distances = num.sqrt( 

284 (points[0, 0, :] - points[0, 1, :])**2 + 

285 (points[1, 0, :] - points[1, 1, :])**2 + 

286 (points[2, 0, :] - points[2, 1, :])**2) 

287 

288 depths = points[2, 0, :] 

289 vs_profile = store.config.get_vs( 

290 lat=0., lon=0., 

291 points=num.repeat(depths[:, num.newaxis], 3, axis=1), 

292 interpolation='multilinear') 

293 

294 min_wavelength = vs_profile * (store.config.deltat * 2) 

295 if not num.all(min_wavelength > distances / 2): 

296 return False 

297 return True 

298 

299 

300def outline_rect_source(strike, dip, length, width, anchor): 

301 ln = length 

302 wd = width 

303 points = num.array( 

304 [[-0.5 * ln, -0.5 * wd, 0.], 

305 [0.5 * ln, -0.5 * wd, 0.], 

306 [0.5 * ln, 0.5 * wd, 0.], 

307 [-0.5 * ln, 0.5 * wd, 0.], 

308 [-0.5 * ln, -0.5 * wd, 0.]]) 

309 

310 anch_x, anch_y = map_anchor[anchor] 

311 points[:, 0] -= anch_x * 0.5 * length 

312 points[:, 1] -= anch_y * 0.5 * width 

313 

314 rotmat = num.asarray( 

315 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

316 

317 return num.dot(rotmat.T, points.T).T 

318 

319 

320def from_plane_coords( 

321 strike, dip, length, width, depth, x_plane_coords, y_plane_coords, 

322 lat=0., lon=0., 

323 north_shift=0, east_shift=0, 

324 anchor='top', cs='xy'): 

325 

326 ln = length 

327 wd = width 

328 x_abs = [] 

329 y_abs = [] 

330 if not isinstance(x_plane_coords, list): 

331 x_plane_coords = [x_plane_coords] 

332 y_plane_coords = [y_plane_coords] 

333 

334 for x_plane, y_plane in zip(x_plane_coords, y_plane_coords): 

335 points = num.array( 

336 [[-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

337 [0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.], 

338 [0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

339 [-0.5 * ln * x_plane, 0.5 * wd * y_plane, 0.], 

340 [-0.5 * ln * x_plane, -0.5 * wd * y_plane, 0.]]) 

341 

342 anch_x, anch_y = map_anchor[anchor] 

343 points[:, 0] -= anch_x * 0.5 * length 

344 points[:, 1] -= anch_y * 0.5 * width 

345 

346 rotmat = num.asarray( 

347 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

348 

349 points = num.dot(rotmat.T, points.T).T 

350 points[:, 0] += north_shift 

351 points[:, 1] += east_shift 

352 points[:, 2] += depth 

353 if cs in ('latlon', 'lonlat'): 

354 latlon = ne_to_latlon(lat, lon, 

355 points[:, 0], points[:, 1]) 

356 latlon = num.array(latlon).T 

357 x_abs.append(latlon[1:2, 1]) 

358 y_abs.append(latlon[2:3, 0]) 

359 if cs == 'xy': 

360 x_abs.append(points[1:2, 1]) 

361 y_abs.append(points[2:3, 0]) 

362 

363 if cs == 'lonlat': 

364 return y_abs, x_abs 

365 else: 

366 return x_abs, y_abs 

367 

368 

369def points_on_rect_source( 

370 strike, dip, length, width, anchor, 

371 discretized_basesource=None, points_x=None, points_y=None): 

372 

373 ln = length 

374 wd = width 

375 

376 if isinstance(points_x, list) or isinstance(points_x, float): 

377 points_x = num.array([points_x]) 

378 if isinstance(points_y, list) or isinstance(points_y, float): 

379 points_y = num.array([points_y]) 

380 

381 if discretized_basesource: 

382 ds = discretized_basesource 

383 

384 nl_patches = ds.nl + 1 

385 nw_patches = ds.nw + 1 

386 

387 npoints = nl_patches * nw_patches 

388 points = num.zeros((npoints, 3)) 

389 ln_patches = num.array([il for il in range(nl_patches)]) 

390 wd_patches = num.array([iw for iw in range(nw_patches)]) 

391 

392 points_ln =\ 

393 2 * ((ln_patches - num.min(ln_patches)) / num.ptp(ln_patches)) - 1 

394 points_wd =\ 

395 2 * ((wd_patches - num.min(wd_patches)) / num.ptp(wd_patches)) - 1 

396 

397 for il in range(nl_patches): 

398 for iw in range(nw_patches): 

399 points[il * nw_patches + iw, :] = num.array([ 

400 points_ln[il] * ln * 0.5, 

401 points_wd[iw] * wd * 0.5, 0.0]) 

402 

403 elif points_x.any() and points_y.any(): 

404 points = num.zeros(shape=((len(points_x), 3))) 

405 for i, (x, y) in enumerate(zip(points_x, points_y)): 

406 points[i, :] = num.array( 

407 [x * 0.5 * ln, y * 0.5 * wd, 0.0]) 

408 

409 anch_x, anch_y = map_anchor[anchor] 

410 

411 points[:, 0] -= anch_x * 0.5 * ln 

412 points[:, 1] -= anch_y * 0.5 * wd 

413 

414 rotmat = num.asarray( 

415 pmt.euler_to_matrix(dip * d2r, strike * d2r, 0.0)) 

416 

417 return num.dot(rotmat.T, points.T).T 

418 

419 

420class InvalidGridDef(Exception): 

421 pass 

422 

423 

424class Range(SObject): 

425 ''' 

426 Convenient range specification. 

427 

428 Equivalent ways to sepecify the range [ 0., 1000., ... 10000. ]:: 

429 

430 Range('0 .. 10k : 1k') 

431 Range(start=0., stop=10e3, step=1e3) 

432 Range(0, 10e3, 1e3) 

433 Range('0 .. 10k @ 11') 

434 Range(start=0., stop=10*km, n=11) 

435 

436 Range(0, 10e3, n=11) 

437 Range(values=[x*1e3 for x in range(11)]) 

438 

439 Depending on the use context, it can be possible to omit any part of the 

440 specification. E.g. in the context of extracting a subset of an already 

441 existing range, the existing range's specification values would be filled 

442 in where missing. 

443 

444 The values are distributed with equal spacing, unless the ``spacing`` 

445 argument is modified. The values can be created offset or relative to an 

446 external base value with the ``relative`` argument if the use context 

447 supports this. 

448 

449 The range specification can be expressed with a short string 

450 representation:: 

451 

452 'start .. stop @ num | spacing, relative' 

453 'start .. stop : step | spacing, relative' 

454 

455 most parts of the expression can be omitted if not needed. Whitespace is 

456 allowed for readability but can also be omitted. 

457 ''' 

458 

459 start = Float.T(optional=True) 

460 stop = Float.T(optional=True) 

461 step = Float.T(optional=True) 

462 n = Int.T(optional=True) 

463 values = Array.T(optional=True, dtype=float, shape=(None,)) 

464 

465 spacing = StringChoice.T( 

466 choices=['lin', 'log', 'symlog'], 

467 default='lin', 

468 optional=True) 

469 

470 relative = StringChoice.T( 

471 choices=['', 'add', 'mult'], 

472 default='', 

473 optional=True) 

474 

475 pattern = re.compile(r'^((?P<start>.*)\.\.(?P<stop>[^@|:]*))?' 

476 r'(@(?P<n>[^|]+)|:(?P<step>[^|]+))?' 

477 r'(\|(?P<stuff>.+))?$') 

478 

479 def __init__(self, *args, **kwargs): 

480 d = {} 

481 if len(args) == 1: 

482 d = self.parse(args[0]) 

483 elif len(args) in (2, 3): 

484 d['start'], d['stop'] = [float(x) for x in args[:2]] 

485 if len(args) == 3: 

486 d['step'] = float(args[2]) 

487 

488 for k, v in kwargs.items(): 

489 if k in d: 

490 raise ArgumentError('%s specified more than once' % k) 

491 

492 d[k] = v 

493 

494 SObject.__init__(self, **d) 

495 

496 def __str__(self): 

497 def sfloat(x): 

498 if x is not None: 

499 return '%g' % x 

500 else: 

501 return '' 

502 

503 if self.values: 

504 return ','.join('%g' % x for x in self.values) 

505 

506 if self.start is None and self.stop is None: 

507 s0 = '' 

508 else: 

509 s0 = '%s .. %s' % (sfloat(self.start), sfloat(self.stop)) 

510 

511 s1 = '' 

512 if self.step is not None: 

513 s1 = [' : %g', ':%g'][s0 == ''] % self.step 

514 elif self.n is not None: 

515 s1 = [' @ %i', '@%i'][s0 == ''] % self.n 

516 

517 if self.spacing == 'lin' and self.relative == '': 

518 s2 = '' 

519 else: 

520 x = [] 

521 if self.spacing != 'lin': 

522 x.append(self.spacing) 

523 

524 if self.relative != '': 

525 x.append(self.relative) 

526 

527 s2 = ' | %s' % ','.join(x) 

528 

529 return s0 + s1 + s2 

530 

531 @classmethod 

532 def parse(cls, s): 

533 s = re.sub(r'\s+', '', s) 

534 m = cls.pattern.match(s) 

535 if not m: 

536 try: 

537 vals = [ufloat(x) for x in s.split(',')] 

538 except Exception: 

539 raise InvalidGridDef( 

540 '"%s" is not a valid range specification' % s) 

541 

542 return dict(values=num.array(vals, dtype=float)) 

543 

544 d = m.groupdict() 

545 try: 

546 start = ufloat_or_none(d['start']) 

547 stop = ufloat_or_none(d['stop']) 

548 step = ufloat_or_none(d['step']) 

549 n = int_or_none(d['n']) 

550 except Exception: 

551 raise InvalidGridDef( 

552 '"%s" is not a valid range specification' % s) 

553 

554 spacing = 'lin' 

555 relative = '' 

556 

557 if d['stuff'] is not None: 

558 t = d['stuff'].split(',') 

559 for x in t: 

560 if x in cls.spacing.choices: 

561 spacing = x 

562 elif x and x in cls.relative.choices: 

563 relative = x 

564 else: 

565 raise InvalidGridDef( 

566 '"%s" is not a valid range specification' % s) 

567 

568 return dict(start=start, stop=stop, step=step, n=n, spacing=spacing, 

569 relative=relative) 

570 

571 def make(self, mi=None, ma=None, inc=None, base=None, eps=1e-5): 

572 if self.values: 

573 return self.values 

574 

575 start = self.start 

576 stop = self.stop 

577 step = self.step 

578 n = self.n 

579 

580 swap = step is not None and step < 0. 

581 if start is None: 

582 start = [mi, ma][swap] 

583 if stop is None: 

584 stop = [ma, mi][swap] 

585 if step is None and inc is not None: 

586 step = [inc, -inc][ma < mi] 

587 

588 if start is None or stop is None: 

589 raise InvalidGridDef( 

590 'Cannot use range specification "%s" without start ' 

591 'and stop in this context' % self) 

592 

593 if step is None and n is None: 

594 step = stop - start 

595 

596 if n is None: 

597 if (step < 0) != (stop - start < 0): 

598 raise InvalidGridDef( 

599 'Range specification "%s" has inconsistent ordering ' 

600 '(step < 0 => stop > start)' % self) 

601 

602 n = int(round((stop - start) / step)) + 1 

603 stop2 = start + (n - 1) * step 

604 if abs(stop - stop2) > eps: 

605 n = int(math.floor((stop - start) / step)) + 1 

606 stop = start + (n - 1) * step 

607 else: 

608 stop = stop2 

609 

610 if start == stop: 

611 n = 1 

612 

613 if self.spacing == 'lin': 

614 vals = num.linspace(start, stop, n) 

615 

616 elif self.spacing in ('log', 'symlog'): 

617 if start > 0. and stop > 0.: 

618 vals = num.exp(num.linspace(num.log(start), 

619 num.log(stop), n)) 

620 elif start < 0. and stop < 0.: 

621 vals = -num.exp(num.linspace(num.log(-start), 

622 num.log(-stop), n)) 

623 else: 

624 raise InvalidGridDef( 

625 'Log ranges should not include or cross zero ' 

626 '(in range specification "%s").' % self) 

627 

628 if self.spacing == 'symlog': 

629 nvals = - vals 

630 vals = num.concatenate((nvals[::-1], vals)) 

631 

632 if self.relative in ('add', 'mult') and base is None: 

633 raise InvalidGridDef( 

634 'Cannot use relative range specification in this context.') 

635 

636 vals = self.make_relative(base, vals) 

637 

638 return list(map(float, vals)) 

639 

640 def make_relative(self, base, vals): 

641 if self.relative == 'add': 

642 vals += base 

643 

644 if self.relative == 'mult': 

645 vals *= base 

646 

647 return vals 

648 

649 

650class GridDefElement(Object): 

651 

652 param = meta.StringID.T() 

653 rs = Range.T() 

654 

655 def __init__(self, shorthand=None, **kwargs): 

656 if shorthand is not None: 

657 t = shorthand.split('=') 

658 if len(t) != 2: 

659 raise InvalidGridDef( 

660 'Invalid grid specification element: %s' % shorthand) 

661 

662 sp, sr = t[0].strip(), t[1].strip() 

663 

664 kwargs['param'] = sp 

665 kwargs['rs'] = Range(sr) 

666 

667 Object.__init__(self, **kwargs) 

668 

669 def shorthand(self): 

670 return self.param + ' = ' + str(self.rs) 

671 

672 

673class GridDef(Object): 

674 

675 elements = List.T(GridDefElement.T()) 

676 

677 def __init__(self, shorthand=None, **kwargs): 

678 if shorthand is not None: 

679 t = shorthand.splitlines() 

680 tt = [] 

681 for x in t: 

682 x = x.strip() 

683 if x: 

684 tt.extend(x.split(';')) 

685 

686 elements = [] 

687 for se in tt: 

688 elements.append(GridDef(se)) 

689 

690 kwargs['elements'] = elements 

691 

692 Object.__init__(self, **kwargs) 

693 

694 def shorthand(self): 

695 return '; '.join(str(x) for x in self.elements) 

696 

697 

698class Cloneable(object): 

699 

700 def __iter__(self): 

701 return iter(self.T.propnames) 

702 

703 def __getitem__(self, k): 

704 if k not in self.keys(): 

705 raise KeyError(k) 

706 

707 return getattr(self, k) 

708 

709 def __setitem__(self, k, v): 

710 if k not in self.keys(): 

711 raise KeyError(k) 

712 

713 return setattr(self, k, v) 

714 

715 def clone(self, **kwargs): 

716 ''' 

717 Make a copy of the object. 

718 

719 A new object of the same class is created and initialized with the 

720 parameters of the object on which this method is called on. If 

721 ``kwargs`` are given, these are used to override any of the 

722 initialization parameters. 

723 ''' 

724 

725 d = dict(self) 

726 for k in d: 

727 v = d[k] 

728 if isinstance(v, Cloneable): 

729 d[k] = v.clone() 

730 

731 d.update(kwargs) 

732 return self.__class__(**d) 

733 

734 @classmethod 

735 def keys(cls): 

736 ''' 

737 Get list of the source model's parameter names. 

738 ''' 

739 

740 return cls.T.propnames 

741 

742 

743class STF(Object, Cloneable): 

744 

745 ''' 

746 Base class for source time functions. 

747 ''' 

748 

749 def __init__(self, effective_duration=None, **kwargs): 

750 if effective_duration is not None: 

751 kwargs['duration'] = effective_duration / \ 

752 self.factor_duration_to_effective() 

753 

754 Object.__init__(self, **kwargs) 

755 

756 @classmethod 

757 def factor_duration_to_effective(cls): 

758 return 1.0 

759 

760 def centroid_time(self, tref): 

761 return tref 

762 

763 @property 

764 def effective_duration(self): 

765 return self.duration * self.factor_duration_to_effective() 

766 

767 def discretize_t(self, deltat, tref): 

768 tl = math.floor(tref / deltat) * deltat 

769 th = math.ceil(tref / deltat) * deltat 

770 if tl == th: 

771 return num.array([tl], dtype=float), num.ones(1) 

772 else: 

773 return ( 

774 num.array([tl, th], dtype=float), 

775 num.array([th - tref, tref - tl], dtype=float) / deltat) 

776 

777 def base_key(self): 

778 return (type(self).__name__,) 

779 

780 

781g_unit_pulse = STF() 

782 

783 

784def sshift(times, amplitudes, tshift, deltat): 

785 

786 t0 = math.floor(tshift / deltat) * deltat 

787 t1 = math.ceil(tshift / deltat) * deltat 

788 if t0 == t1: 

789 return times, amplitudes 

790 

791 amplitudes2 = num.zeros(amplitudes.size + 1, dtype=float) 

792 

793 amplitudes2[:-1] += (t1 - tshift) / deltat * amplitudes 

794 amplitudes2[1:] += (tshift - t0) / deltat * amplitudes 

795 

796 times2 = num.arange(times.size + 1, dtype=float) * \ 

797 deltat + times[0] + t0 

798 

799 return times2, amplitudes2 

800 

801 

802class BoxcarSTF(STF): 

803 

804 ''' 

805 Boxcar type source time function. 

806 

807 .. figure :: /static/stf-BoxcarSTF.svg 

808 :width: 40% 

809 :align: center 

810 :alt: boxcar source time function 

811 ''' 

812 

813 duration = Float.T( 

814 default=0.0, 

815 help='duration of the boxcar') 

816 

817 anchor = Float.T( 

818 default=0.0, 

819 help='anchor point with respect to source.time: (' 

820 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

821 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

822 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

823 

824 @classmethod 

825 def factor_duration_to_effective(cls): 

826 return 1.0 

827 

828 def centroid_time(self, tref): 

829 return tref - 0.5 * self.duration * self.anchor 

830 

831 def discretize_t(self, deltat, tref): 

832 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

833 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

834 tmin = round(tmin_stf / deltat) * deltat 

835 tmax = round(tmax_stf / deltat) * deltat 

836 nt = int(round((tmax - tmin) / deltat)) + 1 

837 times = num.linspace(tmin, tmax, nt) 

838 amplitudes = num.ones_like(times) 

839 if times.size > 1: 

840 t_edges = num.linspace( 

841 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

842 t = tmin_stf + self.duration * num.array( 

843 [0.0, 0.0, 1.0, 1.0], dtype=float) 

844 f = num.array([0., 1., 1., 0.], dtype=float) 

845 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

846 amplitudes /= num.sum(amplitudes) 

847 

848 tshift = (num.sum(amplitudes * times) - self.centroid_time(tref)) 

849 

850 return sshift(times, amplitudes, -tshift, deltat) 

851 

852 def base_key(self): 

853 return (type(self).__name__, self.duration, self.anchor) 

854 

855 

856class TriangularSTF(STF): 

857 

858 ''' 

859 Triangular type source time function. 

860 

861 .. figure :: /static/stf-TriangularSTF.svg 

862 :width: 40% 

863 :align: center 

864 :alt: triangular source time function 

865 ''' 

866 

867 duration = Float.T( 

868 default=0.0, 

869 help='baseline of the triangle') 

870 

871 peak_ratio = Float.T( 

872 default=0.5, 

873 help='fraction of time compared to duration, ' 

874 'when the maximum amplitude is reached') 

875 

876 anchor = Float.T( 

877 default=0.0, 

878 help='anchor point with respect to source.time: (' 

879 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

880 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

881 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

882 

883 @classmethod 

884 def factor_duration_to_effective(cls, peak_ratio=None): 

885 if peak_ratio is None: 

886 peak_ratio = cls.peak_ratio.default() 

887 

888 return math.sqrt((peak_ratio**2 - peak_ratio + 1.0) * 2.0 / 3.0) 

889 

890 def __init__(self, effective_duration=None, **kwargs): 

891 if effective_duration is not None: 

892 kwargs['duration'] = effective_duration / \ 

893 self.factor_duration_to_effective( 

894 kwargs.get('peak_ratio', None)) 

895 

896 STF.__init__(self, **kwargs) 

897 

898 @property 

899 def centroid_ratio(self): 

900 ra = self.peak_ratio 

901 rb = 1.0 - ra 

902 return self.peak_ratio + (rb**2 / 3. - ra**2 / 3.) / (ra + rb) 

903 

904 def centroid_time(self, tref): 

905 ca = self.centroid_ratio 

906 cb = 1.0 - ca 

907 if self.anchor <= 0.: 

908 return tref - ca * self.duration * self.anchor 

909 else: 

910 return tref - cb * self.duration * self.anchor 

911 

912 @property 

913 def effective_duration(self): 

914 return self.duration * self.factor_duration_to_effective( 

915 self.peak_ratio) 

916 

917 def tminmax_stf(self, tref): 

918 ca = self.centroid_ratio 

919 cb = 1.0 - ca 

920 if self.anchor <= 0.: 

921 tmin_stf = tref - ca * self.duration * (self.anchor + 1.) 

922 tmax_stf = tmin_stf + self.duration 

923 else: 

924 tmax_stf = tref + cb * self.duration * (1. - self.anchor) 

925 tmin_stf = tmax_stf - self.duration 

926 

927 return tmin_stf, tmax_stf 

928 

929 def discretize_t(self, deltat, tref): 

930 tmin_stf, tmax_stf = self.tminmax_stf(tref) 

931 

932 tmin = round(tmin_stf / deltat) * deltat 

933 tmax = round(tmax_stf / deltat) * deltat 

934 nt = int(round((tmax - tmin) / deltat)) + 1 

935 if nt > 1: 

936 t_edges = num.linspace( 

937 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1) 

938 t = tmin_stf + self.duration * num.array( 

939 [0.0, self.peak_ratio, 1.0], dtype=float) 

940 f = num.array([0., 1., 0.], dtype=float) 

941 amplitudes = util.plf_integrate_piecewise(t_edges, t, f) 

942 amplitudes /= num.sum(amplitudes) 

943 else: 

944 amplitudes = num.ones(1) 

945 

946 times = num.linspace(tmin, tmax, nt) 

947 return times, amplitudes 

948 

949 def base_key(self): 

950 return ( 

951 type(self).__name__, self.duration, self.peak_ratio, self.anchor) 

952 

953 

954class HalfSinusoidSTF(STF): 

955 

956 ''' 

957 Half sinusoid type source time function. 

958 

959 .. figure :: /static/stf-HalfSinusoidSTF.svg 

960 :width: 40% 

961 :align: center 

962 :alt: half-sinusouid source time function 

963 ''' 

964 

965 duration = Float.T( 

966 default=0.0, 

967 help='duration of the half-sinusoid (baseline)') 

968 

969 anchor = Float.T( 

970 default=0.0, 

971 help='anchor point with respect to source.time: (' 

972 '-1.0: left -> source duration [0, T] ~ hypocenter time, ' 

973 ' 0.0: center -> source duration [-T/2, T/2] ~ centroid time, ' 

974 '+1.0: right -> source duration [-T, 0] ~ rupture end time)') 

975 

976 exponent = Int.T( 

977 default=1, 

978 help='set to 2 to use square of the half-period sinusoidal function.') 

979 

980 def __init__(self, effective_duration=None, **kwargs): 

981 if effective_duration is not None: 

982 kwargs['duration'] = effective_duration / \ 

983 self.factor_duration_to_effective( 

984 kwargs.get('exponent', 1)) 

985 

986 STF.__init__(self, **kwargs) 

987 

988 @classmethod 

989 def factor_duration_to_effective(cls, exponent): 

990 if exponent == 1: 

991 return math.sqrt(3.0 * math.pi**2 - 24.0) / math.pi 

992 elif exponent == 2: 

993 return math.sqrt(math.pi**2 - 6) / math.pi 

994 else: 

995 raise ValueError('Exponent for HalfSinusoidSTF must be 1 or 2.') 

996 

997 @property 

998 def effective_duration(self): 

999 return self.duration * self.factor_duration_to_effective(self.exponent) 

1000 

1001 def centroid_time(self, tref): 

1002 return tref - 0.5 * self.duration * self.anchor 

1003 

1004 def discretize_t(self, deltat, tref): 

1005 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1006 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1007 tmin = round(tmin_stf / deltat) * deltat 

1008 tmax = round(tmax_stf / deltat) * deltat 

1009 nt = int(round((tmax - tmin) / deltat)) + 1 

1010 if nt > 1: 

1011 t_edges = num.maximum(tmin_stf, num.minimum(tmax_stf, num.linspace( 

1012 tmin - 0.5 * deltat, tmax + 0.5 * deltat, nt + 1))) 

1013 

1014 if self.exponent == 1: 

1015 fint = -num.cos( 

1016 (t_edges - tmin_stf) * (math.pi / self.duration)) 

1017 

1018 elif self.exponent == 2: 

1019 fint = (t_edges - tmin_stf) / self.duration \ 

1020 - 1.0 / (2.0 * math.pi) * num.sin( 

1021 (t_edges - tmin_stf) * (2.0 * math.pi / self.duration)) 

1022 else: 

1023 raise ValueError( 

1024 'Exponent for HalfSinusoidSTF must be 1 or 2.') 

1025 

1026 amplitudes = fint[1:] - fint[:-1] 

1027 amplitudes /= num.sum(amplitudes) 

1028 else: 

1029 amplitudes = num.ones(1) 

1030 

1031 times = num.linspace(tmin, tmax, nt) 

1032 return times, amplitudes 

1033 

1034 def base_key(self): 

1035 return (type(self).__name__, self.duration, self.anchor) 

1036 

1037 

1038class SmoothRampSTF(STF): 

1039 ''' 

1040 Smooth-ramp type source time function for near-field displacement. 

1041 Based on moment function of double-couple point source proposed by Bruestle 

1042 and Mueller (PEPI, 1983). 

1043 

1044 .. [1] W. Bruestle, G. Mueller (1983), Moment and duration of shallow 

1045 earthquakes from Love-wave modelling for regional distances, PEPI 32, 

1046 312-324. 

1047 

1048 .. figure :: /static/stf-SmoothRampSTF.svg 

1049 :width: 40% 

1050 :alt: smooth ramp source time function 

1051 ''' 

1052 duration = Float.T( 

1053 default=0.0, 

1054 help='duration of the ramp (baseline)') 

1055 

1056 rise_ratio = Float.T( 

1057 default=0.5, 

1058 help='fraction of time compared to duration, ' 

1059 'when the maximum amplitude is reached') 

1060 

1061 anchor = Float.T( 

1062 default=0.0, 

1063 help='anchor point with respect to source.time: (' 

1064 '-1.0: left -> source duration ``[0, T]`` ~ hypocenter time, ' 

1065 '0.0: center -> source duration ``[-T/2, T/2]`` ~ centroid time, ' 

1066 '+1.0: right -> source duration ``[-T, 0]`` ~ rupture end time)') 

1067 

1068 def discretize_t(self, deltat, tref): 

1069 tmin_stf = tref - self.duration * (self.anchor + 1.) * 0.5 

1070 tmax_stf = tref + self.duration * (1. - self.anchor) * 0.5 

1071 tmin = round(tmin_stf / deltat) * deltat 

1072 tmax = round(tmax_stf / deltat) * deltat 

1073 D = round((tmax - tmin) / deltat) * deltat 

1074 nt = int(round(D / deltat)) + 1 

1075 times = num.linspace(tmin, tmax, nt) 

1076 if nt > 1: 

1077 rise_time = self.rise_ratio * self.duration 

1078 amplitudes = num.ones_like(times) 

1079 tp = tmin + rise_time 

1080 ii = num.where(times <= tp) 

1081 t_inc = times[ii] 

1082 a = num.cos(num.pi * (t_inc - tmin_stf) / rise_time) 

1083 b = num.cos(3 * num.pi * (t_inc - tmin_stf) / rise_time) - 1.0 

1084 amplitudes[ii] = (9. / 16.) * (1 - a + (1. / 9.) * b) 

1085 

1086 amplitudes /= num.sum(amplitudes) 

1087 else: 

1088 amplitudes = num.ones(1) 

1089 

1090 return times, amplitudes 

1091 

1092 def base_key(self): 

1093 return (type(self).__name__, 

1094 self.duration, self.rise_ratio, self.anchor) 

1095 

1096 

1097class ResonatorSTF(STF): 

1098 ''' 

1099 Simple resonator like source time function. 

1100 

1101 .. math :: 

1102 

1103 f(t) = 0 for t < 0 

1104 f(t) = e^{-t/tau} * sin(2 * pi * f * t) 

1105 

1106 

1107 .. figure :: /static/stf-SmoothRampSTF.svg 

1108 :width: 40% 

1109 :alt: smooth ramp source time function 

1110 

1111 ''' 

1112 

1113 duration = Float.T( 

1114 default=0.0, 

1115 help='decay time') 

1116 

1117 frequency = Float.T( 

1118 default=1.0, 

1119 help='resonance frequency') 

1120 

1121 def discretize_t(self, deltat, tref): 

1122 tmin_stf = tref 

1123 tmax_stf = tref + self.duration * 3 

1124 tmin = math.floor(tmin_stf / deltat) * deltat 

1125 tmax = math.ceil(tmax_stf / deltat) * deltat 

1126 times = util.arange2(tmin, tmax, deltat) 

1127 amplitudes = num.exp(-(times - tref) / self.duration) \ 

1128 * num.sin(2.0 * num.pi * self.frequency * (times - tref)) 

1129 

1130 return times, amplitudes 

1131 

1132 def base_key(self): 

1133 return (type(self).__name__, 

1134 self.duration, self.frequency) 

1135 

1136 

1137class STFMode(StringChoice): 

1138 choices = ['pre', 'post'] 

1139 

1140 

1141class Source(Location, Cloneable): 

1142 ''' 

1143 Base class for all source models. 

1144 ''' 

1145 

1146 name = String.T(optional=True, default='') 

1147 

1148 time = Timestamp.T( 

1149 default=Timestamp.D('1970-01-01 00:00:00'), 

1150 help='source origin time.') 

1151 

1152 stf = STF.T( 

1153 optional=True, 

1154 help='source time function.') 

1155 

1156 stf_mode = STFMode.T( 

1157 default='post', 

1158 help='whether to apply source time function in pre or ' 

1159 'post-processing.') 

1160 

1161 def __init__(self, **kwargs): 

1162 Location.__init__(self, **kwargs) 

1163 

1164 def update(self, **kwargs): 

1165 ''' 

1166 Change some of the source models parameters. 

1167 

1168 Example:: 

1169 

1170 >>> from pyrocko import gf 

1171 >>> s = gf.DCSource() 

1172 >>> s.update(strike=66., dip=33.) 

1173 >>> print(s) 

1174 --- !pf.DCSource 

1175 depth: 0.0 

1176 time: 1970-01-01 00:00:00 

1177 magnitude: 6.0 

1178 strike: 66.0 

1179 dip: 33.0 

1180 rake: 0.0 

1181 

1182 ''' 

1183 

1184 for (k, v) in kwargs.items(): 

1185 self[k] = v 

1186 

1187 def grid(self, **variables): 

1188 ''' 

1189 Create grid of source model variations. 

1190 

1191 :returns: :py:class:`SourceGrid` instance. 

1192 

1193 Example:: 

1194 

1195 >>> from pyrocko import gf 

1196 >>> base = DCSource() 

1197 >>> R = gf.Range 

1198 >>> for s in base.grid(R(' 

1199 

1200 ''' 

1201 return SourceGrid(base=self, variables=variables) 

1202 

1203 def base_key(self): 

1204 ''' 

1205 Get key to decide about source discretization / GF stack sharing. 

1206 

1207 When two source models differ only in amplitude and origin time, the 

1208 discretization and the GF stacking can be done only once for a unit 

1209 amplitude and a zero origin time and the amplitude and origin times of 

1210 the seismograms can be applied during post-processing of the synthetic 

1211 seismogram. 

1212 

1213 For any derived parameterized source model, this method is called to 

1214 decide if discretization and stacking of the source should be shared. 

1215 When two source models return an equal vector of values discretization 

1216 is shared. 

1217 ''' 

1218 return (self.depth, self.lat, self.north_shift, 

1219 self.lon, self.east_shift, self.time, type(self).__name__) + \ 

1220 self.effective_stf_pre().base_key() 

1221 

1222 def get_factor(self): 

1223 ''' 

1224 Get the scaling factor to be applied during post-processing. 

1225 

1226 Discretization of the base seismogram is usually done for a unit 

1227 amplitude, because a common factor can be efficiently multiplied to 

1228 final seismograms. This eliminates to do repeat the stacking when 

1229 creating seismograms for a series of source models only differing in 

1230 amplitude. 

1231 

1232 This method should return the scaling factor to apply in the 

1233 post-processing (often this is simply the scalar moment of the source). 

1234 ''' 

1235 

1236 return 1.0 

1237 

1238 def effective_stf_pre(self): 

1239 ''' 

1240 Return the STF applied before stacking of the Green's functions. 

1241 

1242 This STF is used during discretization of the parameterized source 

1243 models, i.e. to produce a temporal distribution of point sources. 

1244 

1245 Handling of the STF before stacking of the GFs is less efficient but 

1246 allows to use different source time functions for different parts of 

1247 the source. 

1248 ''' 

1249 

1250 if self.stf is not None and self.stf_mode == 'pre': 

1251 return self.stf 

1252 else: 

1253 return g_unit_pulse 

1254 

1255 def effective_stf_post(self): 

1256 ''' 

1257 Return the STF applied after stacking of the Green's fuctions. 

1258 

1259 This STF is used in the post-processing of the synthetic seismograms. 

1260 

1261 Handling of the STF after stacking of the GFs is usually more efficient 

1262 but is only possible when a common STF is used for all subsources. 

1263 ''' 

1264 

1265 if self.stf is not None and self.stf_mode == 'post': 

1266 return self.stf 

1267 else: 

1268 return g_unit_pulse 

1269 

1270 def _dparams_base(self): 

1271 return dict(times=arr(self.time), 

1272 lat=self.lat, lon=self.lon, 

1273 north_shifts=arr(self.north_shift), 

1274 east_shifts=arr(self.east_shift), 

1275 depths=arr(self.depth)) 

1276 

1277 def _hash(self): 

1278 sha = sha1() 

1279 for k in self.base_key(): 

1280 sha.update(str(k).encode()) 

1281 return sha.hexdigest() 

1282 

1283 def _dparams_base_repeated(self, times): 

1284 if times is None: 

1285 return self._dparams_base() 

1286 

1287 nt = times.size 

1288 north_shifts = num.repeat(self.north_shift, nt) 

1289 east_shifts = num.repeat(self.east_shift, nt) 

1290 depths = num.repeat(self.depth, nt) 

1291 return dict(times=times, 

1292 lat=self.lat, lon=self.lon, 

1293 north_shifts=north_shifts, 

1294 east_shifts=east_shifts, 

1295 depths=depths) 

1296 

1297 def pyrocko_event(self, store=None, target=None, **kwargs): 

1298 duration = None 

1299 if self.stf: 

1300 duration = self.stf.effective_duration 

1301 

1302 return model.Event( 

1303 lat=self.lat, 

1304 lon=self.lon, 

1305 north_shift=self.north_shift, 

1306 east_shift=self.east_shift, 

1307 time=self.time, 

1308 name=self.name, 

1309 depth=self.depth, 

1310 duration=duration, 

1311 **kwargs) 

1312 

1313 def outline(self, cs='xyz'): 

1314 points = num.atleast_2d(num.zeros([1, 3])) 

1315 

1316 points[:, 0] += self.north_shift 

1317 points[:, 1] += self.east_shift 

1318 points[:, 2] += self.depth 

1319 if cs == 'xyz': 

1320 return points 

1321 elif cs == 'xy': 

1322 return points[:, :2] 

1323 elif cs in ('latlon', 'lonlat'): 

1324 latlon = ne_to_latlon( 

1325 self.lat, self.lon, points[:, 0], points[:, 1]) 

1326 

1327 latlon = num.array(latlon).T 

1328 if cs == 'latlon': 

1329 return latlon 

1330 else: 

1331 return latlon[:, ::-1] 

1332 

1333 @classmethod 

1334 def from_pyrocko_event(cls, ev, **kwargs): 

1335 if ev.depth is None: 

1336 raise ConversionError( 

1337 'Cannot convert event object to source object: ' 

1338 'no depth information available') 

1339 

1340 stf = None 

1341 if ev.duration is not None: 

1342 stf = HalfSinusoidSTF(effective_duration=ev.duration) 

1343 

1344 d = dict( 

1345 name=ev.name, 

1346 time=ev.time, 

1347 lat=ev.lat, 

1348 lon=ev.lon, 

1349 north_shift=ev.north_shift, 

1350 east_shift=ev.east_shift, 

1351 depth=ev.depth, 

1352 stf=stf) 

1353 d.update(kwargs) 

1354 return cls(**d) 

1355 

1356 def get_magnitude(self): 

1357 raise NotImplementedError( 

1358 '%s does not implement get_magnitude()' 

1359 % self.__class__.__name__) 

1360 

1361 

1362class SourceWithMagnitude(Source): 

1363 ''' 

1364 Base class for sources containing a moment magnitude. 

1365 ''' 

1366 

1367 magnitude = Float.T( 

1368 default=6.0, 

1369 help='Moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1370 

1371 def __init__(self, **kwargs): 

1372 if 'moment' in kwargs: 

1373 mom = kwargs.pop('moment') 

1374 if 'magnitude' not in kwargs: 

1375 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1376 

1377 Source.__init__(self, **kwargs) 

1378 

1379 @property 

1380 def moment(self): 

1381 return float(pmt.magnitude_to_moment(self.magnitude)) 

1382 

1383 @moment.setter 

1384 def moment(self, value): 

1385 self.magnitude = float(pmt.moment_to_magnitude(value)) 

1386 

1387 def pyrocko_event(self, store=None, target=None, **kwargs): 

1388 return Source.pyrocko_event( 

1389 self, store, target, 

1390 magnitude=self.magnitude, 

1391 **kwargs) 

1392 

1393 @classmethod 

1394 def from_pyrocko_event(cls, ev, **kwargs): 

1395 d = {} 

1396 if ev.magnitude: 

1397 d.update(magnitude=ev.magnitude) 

1398 

1399 d.update(kwargs) 

1400 return super(SourceWithMagnitude, cls).from_pyrocko_event(ev, **d) 

1401 

1402 def get_magnitude(self): 

1403 return self.magnitude 

1404 

1405 

1406class DerivedMagnitudeError(ValidationError): 

1407 pass 

1408 

1409 

1410class SourceWithDerivedMagnitude(Source): 

1411 

1412 class __T(Source.T): 

1413 

1414 def validate_extra(self, val): 

1415 Source.T.validate_extra(self, val) 

1416 val.check_conflicts() 

1417 

1418 def check_conflicts(self): 

1419 ''' 

1420 Check for parameter conflicts. 

1421 

1422 To be overloaded in subclasses. Raises :py:exc:`DerivedMagnitudeError` 

1423 on conflicts. 

1424 ''' 

1425 pass 

1426 

1427 def get_magnitude(self, store=None, target=None): 

1428 raise DerivedMagnitudeError('No magnitude set.') 

1429 

1430 def get_moment(self, store=None, target=None): 

1431 return float(pmt.magnitude_to_moment( 

1432 self.get_magnitude(store, target))) 

1433 

1434 def pyrocko_moment_tensor(self, store=None, target=None): 

1435 raise NotImplementedError( 

1436 '%s does not implement pyrocko_moment_tensor()' 

1437 % self.__class__.__name__) 

1438 

1439 def pyrocko_event(self, store=None, target=None, **kwargs): 

1440 try: 

1441 mt = self.pyrocko_moment_tensor(store, target) 

1442 magnitude = self.get_magnitude() 

1443 except (DerivedMagnitudeError, NotImplementedError): 

1444 mt = None 

1445 magnitude = None 

1446 

1447 return Source.pyrocko_event( 

1448 self, store, target, 

1449 moment_tensor=mt, 

1450 magnitude=magnitude, 

1451 **kwargs) 

1452 

1453 

1454class ExplosionLineSource(SourceWithMagnitude): 

1455 

1456 length = Float.T( 

1457 default=0.0, 

1458 help='length of the line source [m]') 

1459 

1460 azimuth = Float.T( 

1461 default=0.0, 

1462 help='azimuth direction, clockwise from north [deg]') 

1463 

1464 dip = Float.T( 

1465 default=0., 

1466 help='dip direction, downward from horizontal [deg]') 

1467 

1468 anchor = StringChoice.T( 

1469 default='center', 

1470 choices=['center', 'start'], 

1471 help='anchor point of the line source, default \'center\'' 

1472 ) 

1473 

1474 subsource_oversampling = Int.T( 

1475 default=1, 

1476 help='Spatial oversampling of the subsources in space' 

1477 ) 

1478 

1479 v_start = Float.T( 

1480 optional=True, 

1481 help='Propagation Speed of the explosion line in [m/s]' 

1482 ) 

1483 

1484 v_end = Float.T( 

1485 optional=True, 

1486 help='acceleration (> 0.0) or deceleration (< 0.0) of the explosion' 

1487 ) 

1488 

1489 acceleration_exp = Float.T( 

1490 default=1., 

1491 help='exponent for acceleration or deceleration' 

1492 ' between v_start and v_end. 1.0 is linear.' 

1493 ) 

1494 

1495 def get_magnitude(self, store=None, target=None): 

1496 return self.magnitude 

1497 

1498 def get_moment(self, store=None, target=None): 

1499 return pmt.magnitude_to_moment(self.magnitude) 

1500 

1501 def pyrocko_moment_tensor(self, store=None, target=None): 

1502 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1503 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1504 

1505 def discretize_basesource(self, store, target=None): 

1506 

1507 delta_spatial = num.min(store.config.deltas) \ 

1508 / self.subsource_oversampling 

1509 nsources = 2 * int(math.ceil(self.length / delta_spatial)) + 1 

1510 

1511 line_points_asc = num.linspace(0, self.length, nsources) 

1512 

1513 if self.anchor == 'center': 

1514 line_points = num.linspace( 

1515 -0.5 * self.length, 0.5 * self.length, nsources) 

1516 elif self.anchor == 'start': 

1517 line_points = line_points_asc 

1518 else: 

1519 raise ValueError('Bad anchor %s' % self.anchor) 

1520 

1521 sin_azi = math.sin(self.azimuth * d2r) 

1522 cos_azi = math.cos(self.azimuth * d2r) 

1523 sin_dip = math.sin(self.dip * d2r) 

1524 cos_dip = math.cos(self.dip * d2r) 

1525 

1526 points = num.zeros((nsources, 3)) 

1527 points[:, 0] = self.north_shift + line_points * cos_azi * cos_dip 

1528 points[:, 1] = self.east_shift + line_points * sin_azi * cos_dip 

1529 points[:, 2] = self.depth + line_points * sin_dip 

1530 

1531 times = num.zeros(nsources) 

1532 

1533 if self.v_start: 

1534 v_start = self.v_start 

1535 v_end = self.v_end or v_start 

1536 

1537 v_weight = num.linspace(0, 1., nsources) \ 

1538 ** self.acceleration_exp 

1539 

1540 v_gradient = v_start * (1. - v_weight) + v_end * v_weight 

1541 v_points = num.gradient(line_points_asc) / v_gradient 

1542 times = num.cumsum(v_points) 

1543 

1544 times += self.time 

1545 

1546 amplitudes = num.full(nsources, self.moment / nsources) 

1547 

1548 return meta.DiscretizedExplosionSource( 

1549 times=times, 

1550 lat=self.lat, 

1551 lon=self.lon, 

1552 north_shifts=points[:, 0], 

1553 east_shifts=points[:, 1], 

1554 depths=points[:, 2], 

1555 m0s=amplitudes) 

1556 

1557 

1558class ExplosionSource(SourceWithDerivedMagnitude): 

1559 ''' 

1560 An isotropic explosion point source. 

1561 ''' 

1562 

1563 magnitude = Float.T( 

1564 optional=True, 

1565 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

1566 

1567 volume_change = Float.T( 

1568 optional=True, 

1569 help='volume change of the explosion/implosion or ' 

1570 'the contracting/extending magmatic source. [m^3]') 

1571 

1572 discretized_source_class = meta.DiscretizedExplosionSource 

1573 

1574 def __init__(self, **kwargs): 

1575 if 'moment' in kwargs: 

1576 mom = kwargs.pop('moment') 

1577 if 'magnitude' not in kwargs: 

1578 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

1579 

1580 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

1581 

1582 def base_key(self): 

1583 return SourceWithDerivedMagnitude.base_key(self) + \ 

1584 (self.volume_change,) 

1585 

1586 def check_conflicts(self): 

1587 if self.magnitude is not None and self.volume_change is not None: 

1588 raise DerivedMagnitudeError( 

1589 'Magnitude and volume_change are both defined.') 

1590 

1591 def get_magnitude(self, store=None, target=None): 

1592 self.check_conflicts() 

1593 

1594 if self.magnitude is not None: 

1595 return self.magnitude 

1596 

1597 elif self.volume_change is not None: 

1598 moment = self.volume_change * \ 

1599 self.get_moment_to_volume_change_ratio(store, target) 

1600 

1601 return float(pmt.moment_to_magnitude(abs(moment))) 

1602 else: 

1603 return float(pmt.moment_to_magnitude(1.0)) 

1604 

1605 def get_volume_change(self, store=None, target=None): 

1606 self.check_conflicts() 

1607 

1608 if self.volume_change is not None: 

1609 return self.volume_change 

1610 

1611 elif self.magnitude is not None: 

1612 moment = float(pmt.magnitude_to_moment(self.magnitude)) 

1613 return moment / self.get_moment_to_volume_change_ratio( 

1614 store, target) 

1615 

1616 else: 

1617 return 1.0 / self.get_moment_to_volume_change_ratio(store) 

1618 

1619 def get_moment_to_volume_change_ratio(self, store, target=None): 

1620 if store is None: 

1621 raise DerivedMagnitudeError( 

1622 'Need earth model to convert between volume change and ' 

1623 'magnitude.') 

1624 

1625 points = num.array( 

1626 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1627 

1628 interpolation = target.interpolation if target else 'multilinear' 

1629 try: 

1630 shear_moduli = store.config.get_shear_moduli( 

1631 self.lat, self.lon, 

1632 points=points, 

1633 interpolation=interpolation)[0] 

1634 except meta.OutOfBounds: 

1635 raise DerivedMagnitudeError( 

1636 'Could not get shear modulus at source position.') 

1637 

1638 return float(3. * shear_moduli) 

1639 

1640 def get_factor(self): 

1641 return 1.0 

1642 

1643 def discretize_basesource(self, store, target=None): 

1644 times, amplitudes = self.effective_stf_pre().discretize_t( 

1645 store.config.deltat, self.time) 

1646 

1647 amplitudes *= self.get_moment(store, target) * math.sqrt(2. / 3.) 

1648 

1649 if self.volume_change is not None: 

1650 if self.volume_change < 0.: 

1651 amplitudes *= -1 

1652 

1653 return meta.DiscretizedExplosionSource( 

1654 m0s=amplitudes, 

1655 **self._dparams_base_repeated(times)) 

1656 

1657 def pyrocko_moment_tensor(self, store=None, target=None): 

1658 a = self.get_moment(store, target) * math.sqrt(2. / 3.) 

1659 return pmt.MomentTensor(m=pmt.symmat6(a, a, a, 0., 0., 0.)) 

1660 

1661 

1662class RectangularExplosionSource(ExplosionSource): 

1663 ''' 

1664 Rectangular or line explosion source. 

1665 ''' 

1666 

1667 discretized_source_class = meta.DiscretizedExplosionSource 

1668 

1669 strike = Float.T( 

1670 default=0.0, 

1671 help='strike direction in [deg], measured clockwise from north') 

1672 

1673 dip = Float.T( 

1674 default=90.0, 

1675 help='dip angle in [deg], measured downward from horizontal') 

1676 

1677 length = Float.T( 

1678 default=0., 

1679 help='length of rectangular source area [m]') 

1680 

1681 width = Float.T( 

1682 default=0., 

1683 help='width of rectangular source area [m]') 

1684 

1685 anchor = StringChoice.T( 

1686 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

1687 'bottom_left', 'bottom_right'], 

1688 default='center', 

1689 optional=True, 

1690 help='Anchor point for positioning the plane, can be: top, center or' 

1691 'bottom and also top_left, top_right,bottom_left,' 

1692 'bottom_right, center_left and center right') 

1693 

1694 nucleation_x = Float.T( 

1695 optional=True, 

1696 help='horizontal position of rupture nucleation in normalized fault ' 

1697 'plane coordinates (-1 = left edge, +1 = right edge)') 

1698 

1699 nucleation_y = Float.T( 

1700 optional=True, 

1701 help='down-dip position of rupture nucleation in normalized fault ' 

1702 'plane coordinates (-1 = upper edge, +1 = lower edge)') 

1703 

1704 velocity = Float.T( 

1705 default=3500., 

1706 help='speed of explosion front [m/s]') 

1707 

1708 aggressive_oversampling = Bool.T( 

1709 default=False, 

1710 help='Aggressive oversampling for basesource discretization. ' 

1711 'When using \'multilinear\' interpolation oversampling has' 

1712 ' practically no effect.') 

1713 

1714 def base_key(self): 

1715 return Source.base_key(self) + (self.strike, self.dip, self.length, 

1716 self.width, self.nucleation_x, 

1717 self.nucleation_y, self.velocity, 

1718 self.anchor) 

1719 

1720 def discretize_basesource(self, store, target=None): 

1721 

1722 if self.nucleation_x is not None: 

1723 nucx = self.nucleation_x * 0.5 * self.length 

1724 else: 

1725 nucx = None 

1726 

1727 if self.nucleation_y is not None: 

1728 nucy = self.nucleation_y * 0.5 * self.width 

1729 else: 

1730 nucy = None 

1731 

1732 stf = self.effective_stf_pre() 

1733 

1734 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

1735 store.config.deltas, store.config.deltat, 

1736 self.time, self.north_shift, self.east_shift, self.depth, 

1737 self.strike, self.dip, self.length, self.width, self.anchor, 

1738 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy) 

1739 

1740 amplitudes /= num.sum(amplitudes) 

1741 amplitudes *= self.get_moment(store, target) 

1742 

1743 return meta.DiscretizedExplosionSource( 

1744 lat=self.lat, 

1745 lon=self.lon, 

1746 times=times, 

1747 north_shifts=points[:, 0], 

1748 east_shifts=points[:, 1], 

1749 depths=points[:, 2], 

1750 m0s=amplitudes) 

1751 

1752 def outline(self, cs='xyz'): 

1753 points = outline_rect_source(self.strike, self.dip, self.length, 

1754 self.width, self.anchor) 

1755 

1756 points[:, 0] += self.north_shift 

1757 points[:, 1] += self.east_shift 

1758 points[:, 2] += self.depth 

1759 if cs == 'xyz': 

1760 return points 

1761 elif cs == 'xy': 

1762 return points[:, :2] 

1763 elif cs in ('latlon', 'lonlat'): 

1764 latlon = ne_to_latlon( 

1765 self.lat, self.lon, points[:, 0], points[:, 1]) 

1766 

1767 latlon = num.array(latlon).T 

1768 if cs == 'latlon': 

1769 return latlon 

1770 else: 

1771 return latlon[:, ::-1] 

1772 

1773 def get_nucleation_abs_coord(self, cs='xy'): 

1774 

1775 if self.nucleation_x is None: 

1776 return None, None 

1777 

1778 coords = from_plane_coords(self.strike, self.dip, self.length, 

1779 self.width, self.depth, self.nucleation_x, 

1780 self.nucleation_y, lat=self.lat, 

1781 lon=self.lon, north_shift=self.north_shift, 

1782 east_shift=self.east_shift, cs=cs) 

1783 return coords 

1784 

1785 

1786class DCSource(SourceWithMagnitude): 

1787 ''' 

1788 A double-couple point source. 

1789 ''' 

1790 

1791 strike = Float.T( 

1792 default=0.0, 

1793 help='strike direction in [deg], measured clockwise from north') 

1794 

1795 dip = Float.T( 

1796 default=90.0, 

1797 help='dip angle in [deg], measured downward from horizontal') 

1798 

1799 rake = Float.T( 

1800 default=0.0, 

1801 help='rake angle in [deg], ' 

1802 'measured counter-clockwise from right-horizontal ' 

1803 'in on-plane view') 

1804 

1805 discretized_source_class = meta.DiscretizedMTSource 

1806 

1807 def base_key(self): 

1808 return Source.base_key(self) + (self.strike, self.dip, self.rake) 

1809 

1810 def get_factor(self): 

1811 return float(pmt.magnitude_to_moment(self.magnitude)) 

1812 

1813 def discretize_basesource(self, store, target=None): 

1814 mot = pmt.MomentTensor( 

1815 strike=self.strike, dip=self.dip, rake=self.rake) 

1816 

1817 times, amplitudes = self.effective_stf_pre().discretize_t( 

1818 store.config.deltat, self.time) 

1819 return meta.DiscretizedMTSource( 

1820 m6s=mot.m6()[num.newaxis, :] * amplitudes[:, num.newaxis], 

1821 **self._dparams_base_repeated(times)) 

1822 

1823 def pyrocko_moment_tensor(self, store=None, target=None): 

1824 return pmt.MomentTensor( 

1825 strike=self.strike, 

1826 dip=self.dip, 

1827 rake=self.rake, 

1828 scalar_moment=self.moment) 

1829 

1830 def pyrocko_event(self, store=None, target=None, **kwargs): 

1831 return SourceWithMagnitude.pyrocko_event( 

1832 self, store, target, 

1833 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1834 **kwargs) 

1835 

1836 @classmethod 

1837 def from_pyrocko_event(cls, ev, **kwargs): 

1838 d = {} 

1839 mt = ev.moment_tensor 

1840 if mt: 

1841 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

1842 d.update( 

1843 strike=float(strike), 

1844 dip=float(dip), 

1845 rake=float(rake), 

1846 magnitude=float(mt.moment_magnitude())) 

1847 

1848 d.update(kwargs) 

1849 return super(DCSource, cls).from_pyrocko_event(ev, **d) 

1850 

1851 

1852class CLVDSource(SourceWithMagnitude): 

1853 ''' 

1854 A pure CLVD point source. 

1855 ''' 

1856 

1857 discretized_source_class = meta.DiscretizedMTSource 

1858 

1859 azimuth = Float.T( 

1860 default=0.0, 

1861 help='azimuth direction of largest dipole, clockwise from north [deg]') 

1862 

1863 dip = Float.T( 

1864 default=90., 

1865 help='dip direction of largest dipole, downward from horizontal [deg]') 

1866 

1867 def base_key(self): 

1868 return Source.base_key(self) + (self.azimuth, self.dip) 

1869 

1870 def get_factor(self): 

1871 return float(pmt.magnitude_to_moment(self.magnitude)) 

1872 

1873 @property 

1874 def m6(self): 

1875 a = math.sqrt(4. / 3.) * self.get_factor() 

1876 m = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1877 rotmat1 = pmt.euler_to_matrix( 

1878 d2r * (self.dip - 90.), 

1879 d2r * (self.azimuth - 90.), 

1880 0.) 

1881 m = rotmat1.T * m * rotmat1 

1882 return pmt.to6(m) 

1883 

1884 @property 

1885 def m6_astuple(self): 

1886 return tuple(self.m6.tolist()) 

1887 

1888 def discretize_basesource(self, store, target=None): 

1889 factor = self.get_factor() 

1890 times, amplitudes = self.effective_stf_pre().discretize_t( 

1891 store.config.deltat, self.time) 

1892 return meta.DiscretizedMTSource( 

1893 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis] / factor, 

1894 **self._dparams_base_repeated(times)) 

1895 

1896 def pyrocko_moment_tensor(self, store=None, target=None): 

1897 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

1898 

1899 def pyrocko_event(self, store=None, target=None, **kwargs): 

1900 mt = self.pyrocko_moment_tensor(store, target) 

1901 return Source.pyrocko_event( 

1902 self, store, target, 

1903 moment_tensor=self.pyrocko_moment_tensor(store, target), 

1904 magnitude=float(mt.moment_magnitude()), 

1905 **kwargs) 

1906 

1907 

1908class VLVDSource(SourceWithDerivedMagnitude): 

1909 ''' 

1910 Volumetric linear vector dipole source. 

1911 

1912 This source is a parameterization for a restricted moment tensor point 

1913 source, useful to represent dyke or sill like inflation or deflation 

1914 sources. The restriction is such that the moment tensor is rotational 

1915 symmetric. It can be represented by a superposition of a linear vector 

1916 dipole (here we use a CLVD for convenience) and an isotropic component. The 

1917 restricted moment tensor has 4 degrees of freedom: 2 independent 

1918 eigenvalues and 2 rotation angles orienting the the symmetry axis. 

1919 

1920 In this parameterization, the isotropic component is controlled by 

1921 ``volume_change``. To define the moment tensor, it must be converted to the 

1922 scalar moment of the the MT's isotropic component. For the conversion, the 

1923 shear modulus at the source's position must be known. This value is 

1924 extracted from the earth model defined in the GF store in use. 

1925 

1926 The CLVD part by controlled by its scalar moment :math:`M_0`: 

1927 ``clvd_moment``. The sign of ``clvd_moment`` is used to switch between a 

1928 positiv or negativ CLVD (the sign of the largest eigenvalue). 

1929 ''' 

1930 

1931 discretized_source_class = meta.DiscretizedMTSource 

1932 

1933 azimuth = Float.T( 

1934 default=0.0, 

1935 help='azimuth direction of symmetry axis, clockwise from north [deg].') 

1936 

1937 dip = Float.T( 

1938 default=90., 

1939 help='dip direction of symmetry axis, downward from horizontal [deg].') 

1940 

1941 volume_change = Float.T( 

1942 default=0., 

1943 help='volume change of the inflation/deflation [m^3].') 

1944 

1945 clvd_moment = Float.T( 

1946 default=0., 

1947 help='scalar moment :math:`M_0` of the CLVD component [Nm]. The sign ' 

1948 'controls the sign of the CLVD (the sign of its largest ' 

1949 'eigenvalue).') 

1950 

1951 def get_moment_to_volume_change_ratio(self, store, target): 

1952 if store is None or target is None: 

1953 raise DerivedMagnitudeError( 

1954 'Need earth model to convert between volume change and ' 

1955 'magnitude.') 

1956 

1957 points = num.array( 

1958 [[self.north_shift, self.east_shift, self.depth]], dtype=float) 

1959 

1960 try: 

1961 shear_moduli = store.config.get_shear_moduli( 

1962 self.lat, self.lon, 

1963 points=points, 

1964 interpolation=target.interpolation)[0] 

1965 except meta.OutOfBounds: 

1966 raise DerivedMagnitudeError( 

1967 'Could not get shear modulus at source position.') 

1968 

1969 return float(3. * shear_moduli) 

1970 

1971 def base_key(self): 

1972 return Source.base_key(self) + \ 

1973 (self.azimuth, self.dip, self.volume_change, self.clvd_moment) 

1974 

1975 def get_magnitude(self, store=None, target=None): 

1976 mt = self.pyrocko_moment_tensor(store, target) 

1977 return float(pmt.moment_to_magnitude(mt.moment)) 

1978 

1979 def get_m6(self, store, target): 

1980 a = math.sqrt(4. / 3.) * self.clvd_moment 

1981 m_clvd = pmt.symmat6(-0.5 * a, -0.5 * a, a, 0., 0., 0.) 

1982 

1983 rotmat1 = pmt.euler_to_matrix( 

1984 d2r * (self.dip - 90.), 

1985 d2r * (self.azimuth - 90.), 

1986 0.) 

1987 m_clvd = rotmat1.T * m_clvd * rotmat1 

1988 

1989 m_iso = self.volume_change * \ 

1990 self.get_moment_to_volume_change_ratio(store, target) 

1991 

1992 m_iso = pmt.symmat6(m_iso, m_iso, m_iso, 0., 

1993 0., 0.,) * math.sqrt(2. / 3) 

1994 

1995 m = pmt.to6(m_clvd) + pmt.to6(m_iso) 

1996 return m 

1997 

1998 def get_moment(self, store=None, target=None): 

1999 return float(pmt.magnitude_to_moment( 

2000 self.get_magnitude(store, target))) 

2001 

2002 def get_m6_astuple(self, store, target): 

2003 m6 = self.get_m6(store, target) 

2004 return tuple(m6.tolist()) 

2005 

2006 def discretize_basesource(self, store, target=None): 

2007 times, amplitudes = self.effective_stf_pre().discretize_t( 

2008 store.config.deltat, self.time) 

2009 

2010 m6 = self.get_m6(store, target) 

2011 m6 *= amplitudes / self.get_factor() 

2012 

2013 return meta.DiscretizedMTSource( 

2014 m6s=m6[num.newaxis, :], 

2015 **self._dparams_base_repeated(times)) 

2016 

2017 def pyrocko_moment_tensor(self, store=None, target=None): 

2018 m6_astuple = self.get_m6_astuple(store, target) 

2019 return pmt.MomentTensor(m=pmt.symmat6(*m6_astuple)) 

2020 

2021 

2022class MTSource(Source): 

2023 ''' 

2024 A moment tensor point source. 

2025 ''' 

2026 

2027 discretized_source_class = meta.DiscretizedMTSource 

2028 

2029 mnn = Float.T( 

2030 default=1., 

2031 help='north-north component of moment tensor in [Nm]') 

2032 

2033 mee = Float.T( 

2034 default=1., 

2035 help='east-east component of moment tensor in [Nm]') 

2036 

2037 mdd = Float.T( 

2038 default=1., 

2039 help='down-down component of moment tensor in [Nm]') 

2040 

2041 mne = Float.T( 

2042 default=0., 

2043 help='north-east component of moment tensor in [Nm]') 

2044 

2045 mnd = Float.T( 

2046 default=0., 

2047 help='north-down component of moment tensor in [Nm]') 

2048 

2049 med = Float.T( 

2050 default=0., 

2051 help='east-down component of moment tensor in [Nm]') 

2052 

2053 def __init__(self, **kwargs): 

2054 if 'm6' in kwargs: 

2055 for (k, v) in zip('mnn mee mdd mne mnd med'.split(), 

2056 kwargs.pop('m6')): 

2057 kwargs[k] = float(v) 

2058 

2059 Source.__init__(self, **kwargs) 

2060 

2061 @property 

2062 def m6(self): 

2063 return num.array(self.m6_astuple) 

2064 

2065 @property 

2066 def m6_astuple(self): 

2067 return (self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med) 

2068 

2069 @m6.setter 

2070 def m6(self, value): 

2071 self.mnn, self.mee, self.mdd, self.mne, self.mnd, self.med = value 

2072 

2073 def base_key(self): 

2074 return Source.base_key(self) + self.m6_astuple 

2075 

2076 def discretize_basesource(self, store, target=None): 

2077 times, amplitudes = self.effective_stf_pre().discretize_t( 

2078 store.config.deltat, self.time) 

2079 return meta.DiscretizedMTSource( 

2080 m6s=self.m6[num.newaxis, :] * amplitudes[:, num.newaxis], 

2081 **self._dparams_base_repeated(times)) 

2082 

2083 def get_magnitude(self, store=None, target=None): 

2084 m6 = self.m6 

2085 return pmt.moment_to_magnitude( 

2086 math.sqrt(num.sum(m6[0:3]**2) + 2.0 * num.sum(m6[3:6]**2)) / 

2087 math.sqrt(2.)) 

2088 

2089 def pyrocko_moment_tensor(self, store=None, target=None): 

2090 return pmt.MomentTensor(m=pmt.symmat6(*self.m6_astuple)) 

2091 

2092 def pyrocko_event(self, store=None, target=None, **kwargs): 

2093 mt = self.pyrocko_moment_tensor(store, target) 

2094 return Source.pyrocko_event( 

2095 self, store, target, 

2096 moment_tensor=self.pyrocko_moment_tensor(store, target), 

2097 magnitude=float(mt.moment_magnitude()), 

2098 **kwargs) 

2099 

2100 @classmethod 

2101 def from_pyrocko_event(cls, ev, **kwargs): 

2102 d = {} 

2103 mt = ev.moment_tensor 

2104 if mt: 

2105 d.update(m6=tuple(map(float, mt.m6()))) 

2106 else: 

2107 if ev.magnitude is not None: 

2108 mom = pmt.magnitude_to_moment(ev.magnitude) 

2109 v = math.sqrt(2. / 3.) * mom 

2110 d.update(m6=(v, v, v, 0., 0., 0.)) 

2111 

2112 d.update(kwargs) 

2113 return super(MTSource, cls).from_pyrocko_event(ev, **d) 

2114 

2115 

2116map_anchor = { 

2117 'center': (0.0, 0.0), 

2118 'center_left': (-1.0, 0.0), 

2119 'center_right': (1.0, 0.0), 

2120 'top': (0.0, -1.0), 

2121 'top_left': (-1.0, -1.0), 

2122 'top_right': (1.0, -1.0), 

2123 'bottom': (0.0, 1.0), 

2124 'bottom_left': (-1.0, 1.0), 

2125 'bottom_right': (1.0, 1.0)} 

2126 

2127 

2128class RectangularSource(SourceWithDerivedMagnitude): 

2129 ''' 

2130 Classical Haskell source model modified for bilateral rupture. 

2131 ''' 

2132 

2133 discretized_source_class = meta.DiscretizedMTSource 

2134 

2135 magnitude = Float.T( 

2136 optional=True, 

2137 help='moment magnitude Mw as in [Hanks and Kanamori, 1979]') 

2138 

2139 strike = Float.T( 

2140 default=0.0, 

2141 help='strike direction in [deg], measured clockwise from north') 

2142 

2143 dip = Float.T( 

2144 default=90.0, 

2145 help='dip angle in [deg], measured downward from horizontal') 

2146 

2147 rake = Float.T( 

2148 default=0.0, 

2149 help='rake angle in [deg], ' 

2150 'measured counter-clockwise from right-horizontal ' 

2151 'in on-plane view') 

2152 

2153 length = Float.T( 

2154 default=0., 

2155 help='length of rectangular source area [m]') 

2156 

2157 width = Float.T( 

2158 default=0., 

2159 help='width of rectangular source area [m]') 

2160 

2161 anchor = StringChoice.T( 

2162 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2163 'bottom_left', 'bottom_right'], 

2164 default='center', 

2165 optional=True, 

2166 help='Anchor point for positioning the plane, can be: ``top, center ' 

2167 'bottom, top_left, top_right,bottom_left,' 

2168 'bottom_right, center_left, center right``.') 

2169 

2170 nucleation_x = Float.T( 

2171 optional=True, 

2172 help='horizontal position of rupture nucleation in normalized fault ' 

2173 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge)') 

2174 

2175 nucleation_y = Float.T( 

2176 optional=True, 

2177 help='down-dip position of rupture nucleation in normalized fault ' 

2178 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge)') 

2179 

2180 velocity = Float.T( 

2181 default=3500., 

2182 help='speed of rupture front [m/s]') 

2183 

2184 slip = Float.T( 

2185 optional=True, 

2186 help='Slip on the rectangular source area [m]') 

2187 

2188 opening_fraction = Float.T( 

2189 default=0., 

2190 help='Determines fraction of slip related to opening. ' 

2191 '(``-1``: pure tensile closing, ' 

2192 '``0``: pure shear, ' 

2193 '``1``: pure tensile opening)') 

2194 

2195 decimation_factor = Int.T( 

2196 optional=True, 

2197 default=1, 

2198 help='Sub-source decimation factor, a larger decimation will' 

2199 ' make the result inaccurate but shorten the necessary' 

2200 ' computation time (use for testing puposes only).') 

2201 

2202 aggressive_oversampling = Bool.T( 

2203 default=False, 

2204 help='Aggressive oversampling for basesource discretization. ' 

2205 'When using \'multilinear\' interpolation oversampling has' 

2206 ' practically no effect.') 

2207 

2208 def __init__(self, **kwargs): 

2209 if 'moment' in kwargs: 

2210 mom = kwargs.pop('moment') 

2211 if 'magnitude' not in kwargs: 

2212 kwargs['magnitude'] = float(pmt.moment_to_magnitude(mom)) 

2213 

2214 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2215 

2216 def base_key(self): 

2217 return SourceWithDerivedMagnitude.base_key(self) + ( 

2218 self.magnitude, 

2219 self.slip, 

2220 self.strike, 

2221 self.dip, 

2222 self.rake, 

2223 self.length, 

2224 self.width, 

2225 self.nucleation_x, 

2226 self.nucleation_y, 

2227 self.velocity, 

2228 self.decimation_factor, 

2229 self.anchor) 

2230 

2231 def check_conflicts(self): 

2232 if self.magnitude is not None and self.slip is not None: 

2233 raise DerivedMagnitudeError( 

2234 'Magnitude and slip are both defined.') 

2235 

2236 def get_magnitude(self, store=None, target=None): 

2237 self.check_conflicts() 

2238 if self.magnitude is not None: 

2239 return self.magnitude 

2240 

2241 elif self.slip is not None: 

2242 if None in (store, target): 

2243 raise DerivedMagnitudeError( 

2244 'Magnitude for a rectangular source with slip defined ' 

2245 'can only be derived when earth model and target ' 

2246 'interpolation method are available.') 

2247 

2248 amplitudes = self._discretize(store, target)[2] 

2249 if amplitudes.ndim == 2: 

2250 # CLVD component has no net moment, leave out 

2251 return float(pmt.moment_to_magnitude( 

2252 num.sum(num.abs(amplitudes[0:2, :]).sum()))) 

2253 else: 

2254 return float(pmt.moment_to_magnitude(num.sum(amplitudes))) 

2255 

2256 else: 

2257 return float(pmt.moment_to_magnitude(1.0)) 

2258 

2259 def get_factor(self): 

2260 return 1.0 

2261 

2262 def get_slip_tensile(self): 

2263 return self.slip * self.opening_fraction 

2264 

2265 def get_slip_shear(self): 

2266 return self.slip - abs(self.get_slip_tensile) 

2267 

2268 def _discretize(self, store, target): 

2269 if self.nucleation_x is not None: 

2270 nucx = self.nucleation_x * 0.5 * self.length 

2271 else: 

2272 nucx = None 

2273 

2274 if self.nucleation_y is not None: 

2275 nucy = self.nucleation_y * 0.5 * self.width 

2276 else: 

2277 nucy = None 

2278 

2279 stf = self.effective_stf_pre() 

2280 

2281 points, times, amplitudes, dl, dw, nl, nw = discretize_rect_source( 

2282 store.config.deltas, store.config.deltat, 

2283 self.time, self.north_shift, self.east_shift, self.depth, 

2284 self.strike, self.dip, self.length, self.width, self.anchor, 

2285 self.velocity, stf=stf, nucleation_x=nucx, nucleation_y=nucy, 

2286 decimation_factor=self.decimation_factor, 

2287 aggressive_oversampling=self.aggressive_oversampling) 

2288 

2289 if self.slip is not None: 

2290 if target is not None: 

2291 interpolation = target.interpolation 

2292 else: 

2293 interpolation = 'nearest_neighbor' 

2294 logger.warn( 

2295 'no target information available, will use ' 

2296 '"nearest_neighbor" interpolation when extracting shear ' 

2297 'modulus from earth model') 

2298 

2299 shear_moduli = store.config.get_shear_moduli( 

2300 self.lat, self.lon, 

2301 points=points, 

2302 interpolation=interpolation) 

2303 

2304 tensile_slip = self.get_slip_tensile() 

2305 shear_slip = self.slip - abs(tensile_slip) 

2306 

2307 amplitudes_total = [shear_moduli * shear_slip] 

2308 if tensile_slip != 0: 

2309 bulk_moduli = store.config.get_bulk_moduli( 

2310 self.lat, self.lon, 

2311 points=points, 

2312 interpolation=interpolation) 

2313 

2314 tensile_iso = bulk_moduli * tensile_slip 

2315 tensile_clvd = (2. / 3.) * shear_moduli * tensile_slip 

2316 

2317 amplitudes_total.extend([tensile_iso, tensile_clvd]) 

2318 

2319 amplitudes_total = num.vstack(amplitudes_total).squeeze() * \ 

2320 amplitudes * dl * dw 

2321 

2322 else: 

2323 # normalization to retain total moment 

2324 amplitudes_norm = amplitudes / num.sum(amplitudes) 

2325 moment = self.get_moment(store, target) 

2326 

2327 amplitudes_total = [ 

2328 amplitudes_norm * moment * (1 - abs(self.opening_fraction))] 

2329 if self.opening_fraction != 0.: 

2330 amplitudes_total.append( 

2331 amplitudes_norm * self.opening_fraction * moment) 

2332 

2333 amplitudes_total = num.vstack(amplitudes_total).squeeze() 

2334 

2335 return points, times, num.atleast_1d(amplitudes_total), dl, dw, nl, nw 

2336 

2337 def discretize_basesource(self, store, target=None): 

2338 

2339 points, times, amplitudes, dl, dw, nl, nw = self._discretize( 

2340 store, target) 

2341 

2342 mot = pmt.MomentTensor( 

2343 strike=self.strike, dip=self.dip, rake=self.rake) 

2344 m6s = num.repeat(mot.m6()[num.newaxis, :], times.size, axis=0) 

2345 

2346 if amplitudes.ndim == 1: 

2347 m6s[:, :] *= amplitudes[:, num.newaxis] 

2348 elif amplitudes.ndim == 2: 

2349 # shear MT components 

2350 rotmat1 = pmt.euler_to_matrix( 

2351 d2r * self.dip, d2r * self.strike, d2r * -self.rake) 

2352 m6s[:, :] *= amplitudes[0, :][:, num.newaxis] 

2353 

2354 if amplitudes.shape[0] == 2: 

2355 # tensile MT components - moment/magnitude input 

2356 tensile = pmt.symmat6(1., 1., 3., 0., 0., 0.) 

2357 rot_tensile = pmt.to6(rotmat1.T * tensile * rotmat1) 

2358 

2359 m6s_tensile = rot_tensile[ 

2360 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2361 m6s += m6s_tensile 

2362 

2363 elif amplitudes.shape[0] == 3: 

2364 # tensile MT components - slip input 

2365 iso = pmt.symmat6(1., 1., 1., 0., 0., 0.) 

2366 clvd = pmt.symmat6(-1., -1., 2., 0., 0., 0.) 

2367 

2368 rot_iso = pmt.to6(rotmat1.T * iso * rotmat1) 

2369 rot_clvd = pmt.to6(rotmat1.T * clvd * rotmat1) 

2370 

2371 m6s_iso = rot_iso[ 

2372 num.newaxis, :] * amplitudes[1, :][:, num.newaxis] 

2373 m6s_clvd = rot_clvd[ 

2374 num.newaxis, :] * amplitudes[2, :][:, num.newaxis] 

2375 m6s += m6s_iso + m6s_clvd 

2376 else: 

2377 raise ValueError('Unknwown amplitudes shape!') 

2378 else: 

2379 raise ValueError( 

2380 'Unexpected dimension of {}'.format(amplitudes.ndim)) 

2381 

2382 ds = meta.DiscretizedMTSource( 

2383 lat=self.lat, 

2384 lon=self.lon, 

2385 times=times, 

2386 north_shifts=points[:, 0], 

2387 east_shifts=points[:, 1], 

2388 depths=points[:, 2], 

2389 m6s=m6s, 

2390 dl=dl, 

2391 dw=dw, 

2392 nl=nl, 

2393 nw=nw) 

2394 

2395 return ds 

2396 

2397 def outline(self, cs='xyz'): 

2398 points = outline_rect_source(self.strike, self.dip, self.length, 

2399 self.width, self.anchor) 

2400 

2401 points[:, 0] += self.north_shift 

2402 points[:, 1] += self.east_shift 

2403 points[:, 2] += self.depth 

2404 if cs == 'xyz': 

2405 return points 

2406 elif cs == 'xy': 

2407 return points[:, :2] 

2408 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2409 latlon = ne_to_latlon( 

2410 self.lat, self.lon, points[:, 0], points[:, 1]) 

2411 

2412 latlon = num.array(latlon).T 

2413 if cs == 'latlon': 

2414 return latlon 

2415 elif cs == 'lonlat': 

2416 return latlon[:, ::-1] 

2417 else: 

2418 return num.concatenate( 

2419 (latlon, points[:, 2].reshape((len(points), 1))), 

2420 axis=1) 

2421 

2422 def points_on_source(self, cs='xyz', **kwargs): 

2423 

2424 points = points_on_rect_source( 

2425 self.strike, self.dip, self.length, self.width, 

2426 self.anchor, **kwargs) 

2427 

2428 points[:, 0] += self.north_shift 

2429 points[:, 1] += self.east_shift 

2430 points[:, 2] += self.depth 

2431 if cs == 'xyz': 

2432 return points 

2433 elif cs == 'xy': 

2434 return points[:, :2] 

2435 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2436 latlon = ne_to_latlon( 

2437 self.lat, self.lon, points[:, 0], points[:, 1]) 

2438 

2439 latlon = num.array(latlon).T 

2440 if cs == 'latlon': 

2441 return latlon 

2442 elif cs == 'lonlat': 

2443 return latlon[:, ::-1] 

2444 else: 

2445 return num.concatenate( 

2446 (latlon, points[:, 2].reshape((len(points), 1))), 

2447 axis=1) 

2448 

2449 def get_nucleation_abs_coord(self, cs='xy'): 

2450 

2451 if self.nucleation_x is None: 

2452 return None, None 

2453 

2454 coords = from_plane_coords(self.strike, self.dip, self.length, 

2455 self.width, self.depth, self.nucleation_x, 

2456 self.nucleation_y, lat=self.lat, 

2457 lon=self.lon, north_shift=self.north_shift, 

2458 east_shift=self.east_shift, cs=cs) 

2459 return coords 

2460 

2461 def pyrocko_moment_tensor(self, store=None, target=None): 

2462 return pmt.MomentTensor( 

2463 strike=self.strike, 

2464 dip=self.dip, 

2465 rake=self.rake, 

2466 scalar_moment=self.get_moment(store, target)) 

2467 

2468 def pyrocko_event(self, store=None, target=None, **kwargs): 

2469 return SourceWithDerivedMagnitude.pyrocko_event( 

2470 self, store, target, 

2471 **kwargs) 

2472 

2473 @classmethod 

2474 def from_pyrocko_event(cls, ev, **kwargs): 

2475 d = {} 

2476 mt = ev.moment_tensor 

2477 if mt: 

2478 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2479 d.update( 

2480 strike=float(strike), 

2481 dip=float(dip), 

2482 rake=float(rake), 

2483 magnitude=float(mt.moment_magnitude())) 

2484 

2485 d.update(kwargs) 

2486 return super(RectangularSource, cls).from_pyrocko_event(ev, **d) 

2487 

2488 

2489class PseudoDynamicRupture(SourceWithDerivedMagnitude): 

2490 ''' 

2491 Combined Eikonal and Okada quasi-dynamic rupture model. 

2492 

2493 Details are described in :doc:`/topics/pseudo-dynamic-rupture`. 

2494 Note: attribute `stf` is not used so far, but kept for future applications. 

2495 ''' 

2496 

2497 discretized_source_class = meta.DiscretizedMTSource 

2498 

2499 strike = Float.T( 

2500 default=0.0, 

2501 help='Strike direction in [deg], measured clockwise from north.') 

2502 

2503 dip = Float.T( 

2504 default=0.0, 

2505 help='Dip angle in [deg], measured downward from horizontal.') 

2506 

2507 length = Float.T( 

2508 default=10. * km, 

2509 help='Length of rectangular source area in [m].') 

2510 

2511 width = Float.T( 

2512 default=5. * km, 

2513 help='Width of rectangular source area in [m].') 

2514 

2515 anchor = StringChoice.T( 

2516 choices=['top', 'top_left', 'top_right', 'center', 'bottom', 

2517 'bottom_left', 'bottom_right'], 

2518 default='center', 

2519 optional=True, 

2520 help='Anchor point for positioning the plane, can be: ``top, center, ' 

2521 'bottom, top_left, top_right, bottom_left, ' 

2522 'bottom_right, center_left, center_right``.') 

2523 

2524 nucleation_x__ = Array.T( 

2525 default=num.array([0.]), 

2526 dtype=num.float, 

2527 serialize_as='list', 

2528 help='Horizontal position of rupture nucleation in normalized fault ' 

2529 'plane coordinates (``-1.`` = left edge, ``+1.`` = right edge).') 

2530 

2531 nucleation_y__ = Array.T( 

2532 default=num.array([0.]), 

2533 dtype=num.float, 

2534 serialize_as='list', 

2535 help='Down-dip position of rupture nucleation in normalized fault ' 

2536 'plane coordinates (``-1.`` = upper edge, ``+1.`` = lower edge).') 

2537 

2538 nucleation_time__ = Array.T( 

2539 optional=True, 

2540 help='Time in [s] after origin, when nucleation points defined by ' 

2541 '``nucleation_x`` and ``nucleation_y`` rupture.', 

2542 dtype=num.float, 

2543 serialize_as='list') 

2544 

2545 gamma = Float.T( 

2546 default=0.8, 

2547 help='Scaling factor between rupture velocity and S-wave velocity: ' 

2548 r':math:`v_r = \gamma * v_s`.') 

2549 

2550 nx = Int.T( 

2551 default=2, 

2552 help='Number of discrete source patches in x direction (along ' 

2553 'strike).') 

2554 

2555 ny = Int.T( 

2556 default=2, 

2557 help='Number of discrete source patches in y direction (down dip).') 

2558 

2559 slip = Float.T( 

2560 optional=True, 

2561 help='Maximum slip of the rectangular source [m]. ' 

2562 'Setting the slip the tractions/stress field ' 

2563 'will be normalized to accomodate the desired maximum slip.') 

2564 

2565 rake = Float.T( 

2566 optional=True, 

2567 help='Rake angle in [deg], ' 

2568 'measured counter-clockwise from right-horizontal ' 

2569 'in on-plane view. Rake is translated into homogenous tractions ' 

2570 'in strike and up-dip direction. ``rake`` is mutually exclusive ' 

2571 'with tractions parameter.') 

2572 

2573 patches = List.T( 

2574 OkadaSource.T(), 

2575 optional=True, 

2576 help='List of all boundary elements/sub faults/fault patches.') 

2577 

2578 patch_mask__ = Array.T( 

2579 dtype=num.bool, 

2580 serialize_as='list', 

2581 shape=(None,), 

2582 optional=True, 

2583 help='Mask for all boundary elements/sub faults/fault patches. True ' 

2584 'leaves the patch in the calculation, False excludes the patch.') 

2585 

2586 tractions = TractionField.T( 

2587 optional=True, 

2588 help='Traction field the rupture plane is exposed to. See the' 

2589 ':py:mod:`pyrocko.gf.tractions` module for more details. ' 

2590 'If ``tractions=None`` and ``rake`` is given' 

2591 ' :py:class:`~pyrocko.gf.tractions.DirectedTractions` will' 

2592 ' be used.') 

2593 

2594 coef_mat = Array.T( 

2595 optional=True, 

2596 help='Coefficient matrix linking traction and dislocation field.', 

2597 dtype=num.float, 

2598 shape=(None, None)) 

2599 

2600 eikonal_decimation = Int.T( 

2601 optional=True, 

2602 default=1, 

2603 help='Sub-source eikonal factor, a smaller eikonal factor will' 

2604 ' increase the accuracy of rupture front calculation but' 

2605 ' increases also the computation time.') 

2606 

2607 decimation_factor = Int.T( 

2608 optional=True, 

2609 default=1, 

2610 help='Sub-source decimation factor, a larger decimation will' 

2611 ' make the result inaccurate but shorten the necessary' 

2612 ' computation time (use for testing puposes only).') 

2613 

2614 nthreads = Int.T( 

2615 optional=True, 

2616 default=1, 

2617 help='Number of threads for Okada forward modelling, ' 

2618 'matrix inversion and calculation of point subsources. ' 

2619 'Note: for small/medium matrices 1 thread is most efficient.') 

2620 

2621 pure_shear = Bool.T( 

2622 optional=True, 

2623 default=False, 

2624 help='Calculate only shear tractions and omit tensile tractions.') 

2625 

2626 smooth_rupture = Bool.T( 

2627 default=True, 

2628 help='Smooth the tractions by weighting partially ruptured' 

2629 ' fault patches.') 

2630 

2631 aggressive_oversampling = Bool.T( 

2632 default=False, 

2633 help='Aggressive oversampling for basesource discretization. ' 

2634 'When using \'multilinear\' interpolation oversampling has' 

2635 ' practically no effect.') 

2636 

2637 def __init__(self, **kwargs): 

2638 SourceWithDerivedMagnitude.__init__(self, **kwargs) 

2639 self._interpolators = {} 

2640 self.check_conflicts() 

2641 

2642 @property 

2643 def nucleation_x(self): 

2644 return self.nucleation_x__ 

2645 

2646 @nucleation_x.setter 

2647 def nucleation_x(self, nucleation_x): 

2648 if isinstance(nucleation_x, list): 

2649 nucleation_x = num.array(nucleation_x) 

2650 

2651 elif not isinstance( 

2652 nucleation_x, num.ndarray) and nucleation_x is not None: 

2653 

2654 nucleation_x = num.array([nucleation_x]) 

2655 self.nucleation_x__ = nucleation_x 

2656 

2657 @property 

2658 def nucleation_y(self): 

2659 return self.nucleation_y__ 

2660 

2661 @nucleation_y.setter 

2662 def nucleation_y(self, nucleation_y): 

2663 if isinstance(nucleation_y, list): 

2664 nucleation_y = num.array(nucleation_y) 

2665 

2666 elif not isinstance(nucleation_y, num.ndarray) \ 

2667 and nucleation_y is not None: 

2668 nucleation_y = num.array([nucleation_y]) 

2669 

2670 self.nucleation_y__ = nucleation_y 

2671 

2672 @property 

2673 def nucleation(self): 

2674 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

2675 

2676 if (nucl_x is None) or (nucl_y is None): 

2677 return None 

2678 

2679 assert nucl_x.shape[0] == nucl_y.shape[0] 

2680 

2681 return num.concatenate( 

2682 (nucl_x[:, num.newaxis], nucl_y[:, num.newaxis]), axis=1) 

2683 

2684 @nucleation.setter 

2685 def nucleation(self, nucleation): 

2686 if isinstance(nucleation, list): 

2687 nucleation = num.array(nucleation) 

2688 

2689 assert nucleation.shape[1] == 2 

2690 

2691 self.nucleation_x = nucleation[:, 0] 

2692 self.nucleation_y = nucleation[:, 1] 

2693 

2694 @property 

2695 def nucleation_time(self): 

2696 return self.nucleation_time__ 

2697 

2698 @nucleation_time.setter 

2699 def nucleation_time(self, nucleation_time): 

2700 if not isinstance(nucleation_time, num.ndarray) \ 

2701 and nucleation_time is not None: 

2702 nucleation_time = num.array([nucleation_time]) 

2703 

2704 self.nucleation_time__ = nucleation_time 

2705 

2706 @property 

2707 def patch_mask(self): 

2708 if (self.patch_mask__ is not None and 

2709 self.patch_mask__.shape == (self.nx * self.ny,)): 

2710 

2711 return self.patch_mask__ 

2712 else: 

2713 return num.ones(self.nx * self.ny, dtype=bool) 

2714 

2715 @patch_mask.setter 

2716 def patch_mask(self, patch_mask): 

2717 if isinstance(patch_mask, list): 

2718 patch_mask = num.array(patch_mask) 

2719 

2720 self.patch_mask__ = patch_mask 

2721 

2722 def get_tractions(self): 

2723 ''' 

2724 Get source traction vectors. 

2725 

2726 If :py:attr:`rake` is given, unit length directed traction vectors 

2727 (:py:class:`~pyrocko.gf.tractions.DirectedTractions`) are returned, 

2728 else the given :py:attr:`tractions` are used. 

2729 

2730 :returns: 

2731 Traction vectors per patch. 

2732 :rtype: 

2733 :py:class:`~numpy.ndarray`: ``(n_patches, 3)``. 

2734 ''' 

2735 

2736 if self.rake is not None: 

2737 if num.isnan(self.rake): 

2738 raise ValueError('Rake must be a real number, not NaN.') 

2739 

2740 logger.warning( 

2741 'Tractions are derived based on the given source rake.') 

2742 tractions = DirectedTractions(rake=self.rake) 

2743 else: 

2744 tractions = self.tractions 

2745 return tractions.get_tractions(self.nx, self.ny, self.patches) 

2746 

2747 def base_key(self): 

2748 return SourceWithDerivedMagnitude.base_key(self) + ( 

2749 self.slip, 

2750 self.strike, 

2751 self.dip, 

2752 self.rake, 

2753 self.length, 

2754 self.width, 

2755 float(self.nucleation_x.mean()), 

2756 float(self.nucleation_y.mean()), 

2757 self.decimation_factor, 

2758 self.anchor, 

2759 self.pure_shear, 

2760 self.gamma, 

2761 tuple(self.patch_mask)) 

2762 

2763 def check_conflicts(self): 

2764 if self.tractions and self.rake: 

2765 raise AttributeError( 

2766 'Tractions and rake are mutually exclusive.') 

2767 if self.tractions is None and self.rake is None: 

2768 self.rake = 0. 

2769 

2770 def get_magnitude(self, store=None, target=None): 

2771 self.check_conflicts() 

2772 if self.slip is not None or self.tractions is not None: 

2773 if store is None: 

2774 raise DerivedMagnitudeError( 

2775 'Magnitude for a rectangular source with slip or ' 

2776 'tractions defined can only be derived when earth model ' 

2777 'is set.') 

2778 

2779 moment_rate, calc_times = self.discretize_basesource( 

2780 store, target=target).get_moment_rate(store.config.deltat) 

2781 

2782 deltat = num.concatenate(( 

2783 (num.diff(calc_times)[0],), 

2784 num.diff(calc_times))) 

2785 

2786 return float(pmt.moment_to_magnitude( 

2787 num.sum(moment_rate * deltat))) 

2788 

2789 else: 

2790 return float(pmt.moment_to_magnitude(1.0)) 

2791 

2792 def get_factor(self): 

2793 return 1.0 

2794 

2795 def outline(self, cs='xyz'): 

2796 ''' 

2797 Get source outline corner coordinates. 

2798 

2799 :param cs: 

2800 :ref:`Output coordinate system <coordinate-system-names>`. 

2801 :type cs: 

2802 optional, str 

2803 

2804 :returns: 

2805 Corner points in desired coordinate system. 

2806 :rtype: 

2807 :py:class:`~numpy.ndarray`: ``(5, [2, 3])``. 

2808 ''' 

2809 points = outline_rect_source(self.strike, self.dip, self.length, 

2810 self.width, self.anchor) 

2811 

2812 points[:, 0] += self.north_shift 

2813 points[:, 1] += self.east_shift 

2814 points[:, 2] += self.depth 

2815 if cs == 'xyz': 

2816 return points 

2817 elif cs == 'xy': 

2818 return points[:, :2] 

2819 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2820 latlon = ne_to_latlon( 

2821 self.lat, self.lon, points[:, 0], points[:, 1]) 

2822 

2823 latlon = num.array(latlon).T 

2824 if cs == 'latlon': 

2825 return latlon 

2826 elif cs == 'lonlat': 

2827 return latlon[:, ::-1] 

2828 else: 

2829 return num.concatenate( 

2830 (latlon, points[:, 2].reshape((len(points), 1))), 

2831 axis=1) 

2832 

2833 def points_on_source(self, cs='xyz', **kwargs): 

2834 ''' 

2835 Convert relative plane coordinates to geographical coordinates. 

2836 

2837 Given x and y coordinates (relative source coordinates between -1. 

2838 and 1.) are converted to desired geographical coordinates. Coordinates 

2839 need to be given as :py:class:`~numpy.ndarray` arguments ``points_x`` 

2840 and ``points_y``. 

2841 

2842 :param cs: 

2843 :ref:`Output coordinate system <coordinate-system-names>`. 

2844 :type cs: 

2845 optional, str 

2846 

2847 :returns: 

2848 Point coordinates in desired coordinate system. 

2849 :rtype: 

2850 :py:class:`~numpy.ndarray`: ``(n_points, [2, 3])``. 

2851 ''' 

2852 points = points_on_rect_source( 

2853 self.strike, self.dip, self.length, self.width, 

2854 self.anchor, **kwargs) 

2855 

2856 points[:, 0] += self.north_shift 

2857 points[:, 1] += self.east_shift 

2858 points[:, 2] += self.depth 

2859 if cs == 'xyz': 

2860 return points 

2861 elif cs == 'xy': 

2862 return points[:, :2] 

2863 elif cs in ('latlon', 'lonlat', 'latlondepth'): 

2864 latlon = ne_to_latlon( 

2865 self.lat, self.lon, points[:, 0], points[:, 1]) 

2866 

2867 latlon = num.array(latlon).T 

2868 if cs == 'latlon': 

2869 return latlon 

2870 elif cs == 'lonlat': 

2871 return latlon[:, ::-1] 

2872 else: 

2873 return num.concatenate( 

2874 (latlon, points[:, 2].reshape((len(points), 1))), 

2875 axis=1) 

2876 

2877 def pyrocko_moment_tensor(self, store=None, target=None): 

2878 # TODO: Now this should be slip, then it depends on the store. 

2879 # TODO: default to tractions is store is not given? 

2880 tractions = self.get_tractions() 

2881 tractions = tractions.mean(axis=0) 

2882 rake = num.arctan2(tractions[1], tractions[0]) # arctan2(dip, slip) 

2883 

2884 return pmt.MomentTensor( 

2885 strike=self.strike, 

2886 dip=self.dip, 

2887 rake=rake, 

2888 scalar_moment=self.get_moment(store, target)) 

2889 

2890 def pyrocko_event(self, store=None, target=None, **kwargs): 

2891 return SourceWithDerivedMagnitude.pyrocko_event( 

2892 self, store, target, 

2893 **kwargs) 

2894 

2895 @classmethod 

2896 def from_pyrocko_event(cls, ev, **kwargs): 

2897 d = {} 

2898 mt = ev.moment_tensor 

2899 if mt: 

2900 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

2901 d.update( 

2902 strike=float(strike), 

2903 dip=float(dip), 

2904 rake=float(rake)) 

2905 

2906 d.update(kwargs) 

2907 return super(PseudoDynamicRupture, cls).from_pyrocko_event(ev, **d) 

2908 

2909 def _discretize_points(self, store, *args, **kwargs): 

2910 ''' 

2911 Discretize source plane with equal vertical and horizontal spacing. 

2912 

2913 Additional ``*args`` and ``**kwargs`` are passed to 

2914 :py:meth:`points_on_source`. 

2915 

2916 :param store: 

2917 Green's function database (needs to cover whole region of the 

2918 source). 

2919 :type store: 

2920 :py:class:`~pyrocko.gf.store.Store` 

2921 

2922 :returns: 

2923 Number of points in strike and dip direction, distance 

2924 between adjacent points, coordinates (latlondepth) and coordinates 

2925 (xy on fault) for discrete points. 

2926 :rtype: 

2927 (int, int, float, :py:class:`~numpy.ndarray`, 

2928 :py:class:`~numpy.ndarray`). 

2929 ''' 

2930 anch_x, anch_y = map_anchor[self.anchor] 

2931 

2932 npoints = int(self.width // km) + 1 

2933 points = num.zeros((npoints, 3)) 

2934 points[:, 1] = num.linspace(-1., 1., npoints) 

2935 points[:, 1] = (points[:, 1] - anch_y) * self.width/2 

2936 

2937 rotmat = num.asarray( 

2938 pmt.euler_to_matrix(self.dip*d2r, self.strike*d2r, 0.0)) 

2939 points = num.dot(rotmat.T, points.T).T 

2940 points[:, 2] += self.depth 

2941 

2942 vs_min = store.config.get_vs( 

2943 self.lat, self.lon, points, 

2944 interpolation='nearest_neighbor') 

2945 vr_min = max(vs_min.min(), .5*km) * self.gamma 

2946 

2947 oversampling = 10. 

2948 delta_l = self.length / (self.nx * oversampling) 

2949 delta_w = self.width / (self.ny * oversampling) 

2950 

2951 delta = self.eikonal_decimation * num.min([ 

2952 store.config.deltat * vr_min / oversampling, 

2953 delta_l, delta_w] + [ 

2954 deltas for deltas in store.config.deltas]) 

2955 

2956 delta = delta_w / num.ceil(delta_w / delta) 

2957 

2958 nx = int(num.ceil(self.length / delta)) + 1 

2959 ny = int(num.ceil(self.width / delta)) + 1 

2960 

2961 rem_l = (nx-1)*delta - self.length 

2962 lim_x = rem_l / self.length 

2963 

2964 points_xy = num.zeros((nx * ny, 2)) 

2965 points_xy[:, 0] = num.repeat( 

2966 num.linspace(-1.-lim_x, 1.+lim_x, nx), ny) 

2967 points_xy[:, 1] = num.tile( 

2968 num.linspace(-1., 1., ny), nx) 

2969 

2970 points = self.points_on_source( 

2971 points_x=points_xy[:, 0], 

2972 points_y=points_xy[:, 1], 

2973 **kwargs) 

2974 

2975 return nx, ny, delta, points, points_xy 

2976 

2977 def _discretize_rupture_v(self, store, interpolation='nearest_neighbor', 

2978 points=None): 

2979 ''' 

2980 Get rupture velocity for discrete points on source plane. 

2981 

2982 :param store: 

2983 Green's function database (needs to cover the whole region of the 

2984 source) 

2985 :type store: 

2986 optional, :py:class:`~pyrocko.gf.store.Store` 

2987 

2988 :param interpolation: 

2989 Interpolation method to use (choose between ``'nearest_neighbor'`` 

2990 and ``'multilinear'``). 

2991 :type interpolation: 

2992 optional, str 

2993 

2994 :param points: 

2995 Coordinates on fault (-1.:1.) of discrete points. 

2996 :type points: 

2997 optional, :py:class:`~numpy.ndarray`: ``(n_points, 2)`` 

2998 

2999 :returns: 

3000 Rupture velocity assumed as :math:`v_s * \\gamma` for discrete 

3001 points. 

3002 :rtype: 

3003 :py:class:`~numpy.ndarray`: ``(n_points, )``. 

3004 ''' 

3005 

3006 if points is None: 

3007 _, _, _, points, _ = self._discretize_points(store, cs='xyz') 

3008 

3009 return store.config.get_vs( 

3010 self.lat, self.lon, 

3011 points=points, 

3012 interpolation=interpolation) * self.gamma 

3013 

3014 def discretize_time( 

3015 self, store, interpolation='nearest_neighbor', 

3016 vr=None, times=None, *args, **kwargs): 

3017 ''' 

3018 Get rupture start time for discrete points on source plane. 

3019 

3020 :param store: 

3021 Green's function database (needs to cover whole region of the 

3022 source) 

3023 :type store: 

3024 :py:class:`~pyrocko.gf.store.Store` 

3025 

3026 :param interpolation: 

3027 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3028 and ``'multilinear'``). 

3029 :type interpolation: 

3030 optional, str 

3031 

3032 :param vr: 

3033 Array, containing rupture user defined rupture velocity values. 

3034 :type vr: 

3035 optional, :py:class:`~numpy.ndarray` 

3036 

3037 :param times: 

3038 Array, containing zeros, where rupture is starting, real positive 

3039 numbers at later secondary nucleation points and -1, where time 

3040 will be calculated. If not given, rupture starts at nucleation_x, 

3041 nucleation_y. Times are given for discrete points with equal 

3042 horizontal and vertical spacing. 

3043 :type times: 

3044 optional, :py:class:`~numpy.ndarray` 

3045 

3046 :returns: 

3047 Coordinates (latlondepth), coordinates (xy), rupture velocity, 

3048 rupture propagation time of discrete points. 

3049 :rtype: 

3050 :py:class:`~numpy.ndarray`: ``(n_points, 3)``, 

3051 :py:class:`~numpy.ndarray`: ``(n_points, 2)``, 

3052 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``, 

3053 :py:class:`~numpy.ndarray`: ``(n_points_dip, n_points_strike)``. 

3054 ''' 

3055 nx, ny, delta, points, points_xy = self._discretize_points( 

3056 store, cs='xyz') 

3057 

3058 if vr is None or vr.shape != tuple((nx, ny)): 

3059 if vr: 

3060 logger.warning( 

3061 'Given rupture velocities are not in right shape: ' 

3062 '(%i, %i), but needed is (%i, %i).', *vr.shape + (nx, ny)) 

3063 vr = self._discretize_rupture_v(store, interpolation, points)\ 

3064 .reshape(nx, ny) 

3065 

3066 if vr.shape != tuple((nx, ny)): 

3067 logger.warning( 

3068 'Given rupture velocities are not in right shape. Therefore' 

3069 ' standard rupture velocity array is used.') 

3070 

3071 def initialize_times(): 

3072 nucl_x, nucl_y = self.nucleation_x, self.nucleation_y 

3073 

3074 if nucl_x.shape != nucl_y.shape: 

3075 raise ValueError( 

3076 'Nucleation coordinates have different shape.') 

3077 

3078 dist_points = num.array([ 

3079 num.linalg.norm(points_xy - num.array([x, y]).ravel(), axis=1) 

3080 for x, y in zip(nucl_x, nucl_y)]) 

3081 nucl_indices = num.argmin(dist_points, axis=1) 

3082 

3083 if self.nucleation_time is None: 

3084 nucl_times = num.zeros_like(nucl_indices) 

3085 else: 

3086 if self.nucleation_time.shape == nucl_x.shape: 

3087 nucl_times = self.nucleation_time 

3088 else: 

3089 raise ValueError( 

3090 'Nucleation coordinates and times have different ' 

3091 'shapes') 

3092 

3093 t = num.full(nx * ny, -1.) 

3094 t[nucl_indices] = nucl_times 

3095 return t.reshape(nx, ny) 

3096 

3097 if times is None: 

3098 times = initialize_times() 

3099 elif times.shape != tuple((nx, ny)): 

3100 times = initialize_times() 

3101 logger.warning( 

3102 'Given times are not in right shape. Therefore standard time' 

3103 ' array is used.') 

3104 

3105 eikonal_ext.eikonal_solver_fmm_cartesian( 

3106 speeds=vr, times=times, delta=delta) 

3107 

3108 return points, points_xy, vr, times 

3109 

3110 def get_vr_time_interpolators( 

3111 self, store, interpolation='nearest_neighbor', force=False, 

3112 **kwargs): 

3113 ''' 

3114 Get interpolators for rupture velocity and rupture time. 

3115 

3116 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3117 

3118 :param store: 

3119 Green's function database (needs to cover whole region of the 

3120 source). 

3121 :type store: 

3122 :py:class:`~pyrocko.gf.store.Store` 

3123 

3124 :param interpolation: 

3125 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3126 and ``'multilinear'``). 

3127 :type interpolation: 

3128 optional, str 

3129 

3130 :param force: 

3131 Force recalculation of the interpolators (e.g. after change of 

3132 nucleation point locations/times). Default is ``False``. 

3133 :type force: 

3134 optional, bool 

3135 ''' 

3136 interp_map = {'multilinear': 'linear', 'nearest_neighbor': 'nearest'} 

3137 if interpolation not in interp_map: 

3138 raise TypeError( 

3139 'Interpolation method %s not available' % interpolation) 

3140 

3141 if not self._interpolators.get(interpolation, False) or force: 

3142 _, points_xy, vr, times = self.discretize_time( 

3143 store, **kwargs) 

3144 

3145 if self.length <= 0.: 

3146 raise ValueError( 

3147 'length must be larger then 0. not %g' % self.length) 

3148 

3149 if self.width <= 0.: 

3150 raise ValueError( 

3151 'width must be larger then 0. not %g' % self.width) 

3152 

3153 nx, ny = times.shape 

3154 anch_x, anch_y = map_anchor[self.anchor] 

3155 

3156 points_xy[:, 0] = (points_xy[:, 0] - anch_x) * self.length / 2. 

3157 points_xy[:, 1] = (points_xy[:, 1] - anch_y) * self.width / 2. 

3158 

3159 self._interpolators[interpolation] = ( 

3160 nx, ny, times, vr, 

3161 RegularGridInterpolator( 

3162 (points_xy[::ny, 0], points_xy[:ny, 1]), times, 

3163 method=interp_map[interpolation]), 

3164 RegularGridInterpolator( 

3165 (points_xy[::ny, 0], points_xy[:ny, 1]), vr, 

3166 method=interp_map[interpolation])) 

3167 return self._interpolators[interpolation] 

3168 

3169 def discretize_patches( 

3170 self, store, interpolation='nearest_neighbor', force=False, 

3171 grid_shape=(), 

3172 **kwargs): 

3173 ''' 

3174 Get rupture start time and OkadaSource elements for points on rupture. 

3175 

3176 All source elements and their corresponding center points are 

3177 calculated and stored in the :py:attr:`patches` attribute. 

3178 

3179 Additional ``**kwargs`` are passed to :py:meth:`discretize_time`. 

3180 

3181 :param store: 

3182 Green's function database (needs to cover whole region of the 

3183 source). 

3184 :type store: 

3185 :py:class:`~pyrocko.gf.store.Store` 

3186 

3187 :param interpolation: 

3188 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3189 and ``'multilinear'``). 

3190 :type interpolation: 

3191 optional, str 

3192 

3193 :param force: 

3194 Force recalculation of the vr and time interpolators ( e.g. after 

3195 change of nucleation point locations/times). Default is ``False``. 

3196 :type force: 

3197 optional, bool 

3198 

3199 :param grid_shape: 

3200 Desired sub fault patch grid size (nlength, nwidth). Either factor 

3201 or grid_shape should be set. 

3202 :type grid_shape: 

3203 optional, tuple of int 

3204 ''' 

3205 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3206 self.get_vr_time_interpolators( 

3207 store, 

3208 interpolation=interpolation, force=force, **kwargs) 

3209 anch_x, anch_y = map_anchor[self.anchor] 

3210 

3211 al = self.length / 2. 

3212 aw = self.width / 2. 

3213 al1 = -(al + anch_x * al) 

3214 al2 = al - anch_x * al 

3215 aw1 = -aw + anch_y * aw 

3216 aw2 = aw + anch_y * aw 

3217 assert num.abs([al1, al2]).sum() == self.length 

3218 assert num.abs([aw1, aw2]).sum() == self.width 

3219 

3220 def get_lame(*a, **kw): 

3221 shear_mod = store.config.get_shear_moduli(*a, **kw) 

3222 lamb = store.config.get_vp(*a, **kw)**2 \ 

3223 * store.config.get_rho(*a, **kw) - 2. * shear_mod 

3224 return shear_mod, lamb / (2. * (lamb + shear_mod)) 

3225 

3226 shear_mod, poisson = get_lame( 

3227 self.lat, self.lon, 

3228 num.array([[self.north_shift, self.east_shift, self.depth]]), 

3229 interpolation=interpolation) 

3230 

3231 okada_src = OkadaSource( 

3232 lat=self.lat, lon=self.lon, 

3233 strike=self.strike, dip=self.dip, 

3234 north_shift=self.north_shift, east_shift=self.east_shift, 

3235 depth=self.depth, 

3236 al1=al1, al2=al2, aw1=aw1, aw2=aw2, 

3237 poisson=poisson.mean(), 

3238 shearmod=shear_mod.mean(), 

3239 opening=kwargs.get('opening', 0.)) 

3240 

3241 if not (self.nx and self.ny): 

3242 if grid_shape: 

3243 self.nx, self.ny = grid_shape 

3244 else: 

3245 self.nx = nx 

3246 self.ny = ny 

3247 

3248 source_disc, source_points = okada_src.discretize(self.nx, self.ny) 

3249 

3250 shear_mod, poisson = get_lame( 

3251 self.lat, self.lon, 

3252 num.array([src.source_patch()[:3] for src in source_disc]), 

3253 interpolation=interpolation) 

3254 

3255 if (self.nx, self.ny) != (nx, ny): 

3256 times_interp = time_interpolator(source_points[:, :2]) 

3257 vr_interp = vr_interpolator(source_points[:, :2]) 

3258 else: 

3259 times_interp = times.T.ravel() 

3260 vr_interp = vr.T.ravel() 

3261 

3262 for isrc, src in enumerate(source_disc): 

3263 src.vr = vr_interp[isrc] 

3264 src.time = times_interp[isrc] + self.time 

3265 

3266 self.patches = source_disc 

3267 

3268 def discretize_basesource(self, store, target=None): 

3269 ''' 

3270 Prepare source for synthetic waveform calculation. 

3271 

3272 :param store: 

3273 Green's function database (needs to cover whole region of the 

3274 source). 

3275 :type store: 

3276 :py:class:`~pyrocko.gf.store.Store` 

3277 

3278 :param target: 

3279 Target information. 

3280 :type target: 

3281 optional, :py:class:`~pyrocko.gf.targets.Target` 

3282 

3283 :returns: 

3284 Source discretized by a set of moment tensors and times. 

3285 :rtype: 

3286 :py:class:`~pyrocko.gf.meta.DiscretizedMTSource` 

3287 ''' 

3288 if not target: 

3289 interpolation = 'nearest_neighbor' 

3290 else: 

3291 interpolation = target.interpolation 

3292 

3293 if not self.patches: 

3294 self.discretize_patches(store, interpolation) 

3295 

3296 if self.coef_mat is None: 

3297 self.calc_coef_mat() 

3298 

3299 delta_slip, slip_times = self.get_delta_slip(store) 

3300 npatches = self.nx * self.ny 

3301 ntimes = slip_times.size 

3302 

3303 anch_x, anch_y = map_anchor[self.anchor] 

3304 

3305 pln = self.length / self.nx 

3306 pwd = self.width / self.ny 

3307 

3308 patch_coords = num.array([ 

3309 (p.ix, p.iy) 

3310 for p in self.patches]).reshape(self.nx, self.ny, 2) 

3311 

3312 # boundary condition is zero-slip 

3313 # is not valid to avoid unwished interpolation effects 

3314 slip_grid = num.zeros((self.nx + 2, self.ny + 2, ntimes, 3)) 

3315 slip_grid[1:-1, 1:-1, :, :] = \ 

3316 delta_slip.reshape(self.nx, self.ny, ntimes, 3) 

3317 

3318 slip_grid[0, 0, :, :] = slip_grid[1, 1, :, :] 

3319 slip_grid[0, -1, :, :] = slip_grid[1, -2, :, :] 

3320 slip_grid[-1, 0, :, :] = slip_grid[-2, 1, :, :] 

3321 slip_grid[-1, -1, :, :] = slip_grid[-2, -2, :, :] 

3322 

3323 slip_grid[1:-1, 0, :, :] = slip_grid[1:-1, 1, :, :] 

3324 slip_grid[1:-1, -1, :, :] = slip_grid[1:-1, -2, :, :] 

3325 slip_grid[0, 1:-1, :, :] = slip_grid[1, 1:-1, :, :] 

3326 slip_grid[-1, 1:-1, :, :] = slip_grid[-2, 1:-1, :, :] 

3327 

3328 def make_grid(patch_parameter): 

3329 grid = num.zeros((self.nx + 2, self.ny + 2)) 

3330 grid[1:-1, 1:-1] = patch_parameter.reshape(self.nx, self.ny) 

3331 

3332 grid[0, 0] = grid[1, 1] 

3333 grid[0, -1] = grid[1, -2] 

3334 grid[-1, 0] = grid[-2, 1] 

3335 grid[-1, -1] = grid[-2, -2] 

3336 

3337 grid[1:-1, 0] = grid[1:-1, 1] 

3338 grid[1:-1, -1] = grid[1:-1, -2] 

3339 grid[0, 1:-1] = grid[1, 1:-1] 

3340 grid[-1, 1:-1] = grid[-2, 1:-1] 

3341 

3342 return grid 

3343 

3344 lamb = self.get_patch_attribute('lamb') 

3345 mu = self.get_patch_attribute('shearmod') 

3346 

3347 lamb_grid = make_grid(lamb) 

3348 mu_grid = make_grid(mu) 

3349 

3350 coords_x = num.zeros(self.nx + 2) 

3351 coords_x[1:-1] = patch_coords[:, 0, 0] 

3352 coords_x[0] = coords_x[1] - pln / 2 

3353 coords_x[-1] = coords_x[-2] + pln / 2 

3354 

3355 coords_y = num.zeros(self.ny + 2) 

3356 coords_y[1:-1] = patch_coords[0, :, 1] 

3357 coords_y[0] = coords_y[1] - pwd / 2 

3358 coords_y[-1] = coords_y[-2] + pwd / 2 

3359 

3360 slip_interp = RegularGridInterpolator( 

3361 (coords_x, coords_y, slip_times), 

3362 slip_grid, method='nearest') 

3363 

3364 lamb_interp = RegularGridInterpolator( 

3365 (coords_x, coords_y), 

3366 lamb_grid, method='nearest') 

3367 

3368 mu_interp = RegularGridInterpolator( 

3369 (coords_x, coords_y), 

3370 mu_grid, method='nearest') 

3371 

3372 # discretize basesources 

3373 mindeltagf = min(tuple( 

3374 (self.length / self.nx, self.width / self.ny) + 

3375 tuple(store.config.deltas))) 

3376 

3377 nl = int((1. / self.decimation_factor) * 

3378 num.ceil(pln / mindeltagf)) + 1 

3379 nw = int((1. / self.decimation_factor) * 

3380 num.ceil(pwd / mindeltagf)) + 1 

3381 nsrc_patch = int(nl * nw) 

3382 dl = pln / nl 

3383 dw = pwd / nw 

3384 

3385 patch_area = dl * dw 

3386 

3387 xl = num.linspace(-0.5 * (pln - dl), 0.5 * (pln - dl), nl) 

3388 xw = num.linspace(-0.5 * (pwd - dw), 0.5 * (pwd - dw), nw) 

3389 

3390 base_coords = num.zeros((nsrc_patch, 3), dtype=num.float) 

3391 base_coords[:, 0] = num.tile(xl, nw) 

3392 base_coords[:, 1] = num.repeat(xw, nl) 

3393 base_coords = num.tile(base_coords, (npatches, 1)) 

3394 

3395 center_coords = num.zeros((npatches, 3)) 

3396 center_coords[:, 0] = num.repeat( 

3397 num.arange(self.nx) * pln + pln / 2, self.ny) - self.length / 2 

3398 center_coords[:, 1] = num.tile( 

3399 num.arange(self.ny) * pwd + pwd / 2, self.nx) - self.width / 2 

3400 

3401 base_coords -= center_coords.repeat(nsrc_patch, axis=0) 

3402 nbaselocs = base_coords.shape[0] 

3403 

3404 base_interp = base_coords.repeat(ntimes, axis=0) 

3405 

3406 base_times = num.tile(slip_times, nbaselocs) 

3407 base_interp[:, 0] -= anch_x * self.length / 2 

3408 base_interp[:, 1] -= anch_y * self.width / 2 

3409 base_interp[:, 2] = base_times 

3410 

3411 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3412 store, interpolation=interpolation) 

3413 

3414 time_eikonal_max = time_interpolator.values.max() 

3415 

3416 nbasesrcs = base_interp.shape[0] 

3417 delta_slip = slip_interp(base_interp).reshape(nbaselocs, ntimes, 3) 

3418 lamb = lamb_interp(base_interp[:, :2]).ravel() 

3419 mu = mu_interp(base_interp[:, :2]).ravel() 

3420 

3421 if False: 

3422 try: 

3423 import matplotlib.pyplot as plt 

3424 coords = base_coords.copy() 

3425 norm = num.sum(num.linalg.norm(delta_slip, axis=2), axis=1) 

3426 plt.scatter(coords[:, 0], coords[:, 1], c=norm) 

3427 plt.show() 

3428 except AttributeError: 

3429 pass 

3430 

3431 base_interp[:, 2] = 0. 

3432 rotmat = num.asarray( 

3433 pmt.euler_to_matrix(self.dip * d2r, self.strike * d2r, 0.0)) 

3434 base_interp = num.dot(rotmat.T, base_interp.T).T 

3435 base_interp[:, 0] += self.north_shift 

3436 base_interp[:, 1] += self.east_shift 

3437 base_interp[:, 2] += self.depth 

3438 

3439 slip_strike = delta_slip[:, :, 0].ravel() 

3440 slip_dip = delta_slip[:, :, 1].ravel() 

3441 slip_norm = delta_slip[:, :, 2].ravel() 

3442 

3443 slip_shear = num.linalg.norm([slip_strike, slip_dip], axis=0) 

3444 slip_rake = r2d * num.arctan2(slip_dip, slip_strike) 

3445 

3446 m6s = okada_ext.patch2m6( 

3447 strikes=num.full(nbasesrcs, self.strike, dtype=num.float), 

3448 dips=num.full(nbasesrcs, self.dip, dtype=num.float), 

3449 rakes=slip_rake, 

3450 disl_shear=slip_shear, 

3451 disl_norm=slip_norm, 

3452 lamb=lamb, 

3453 mu=mu, 

3454 nthreads=self.nthreads) 

3455 

3456 m6s *= patch_area 

3457 

3458 dl = -self.patches[0].al1 + self.patches[0].al2 

3459 dw = -self.patches[0].aw1 + self.patches[0].aw2 

3460 

3461 base_times[base_times > time_eikonal_max] = time_eikonal_max 

3462 

3463 ds = meta.DiscretizedMTSource( 

3464 lat=self.lat, 

3465 lon=self.lon, 

3466 times=base_times + self.time, 

3467 north_shifts=base_interp[:, 0], 

3468 east_shifts=base_interp[:, 1], 

3469 depths=base_interp[:, 2], 

3470 m6s=m6s, 

3471 dl=dl, 

3472 dw=dw, 

3473 nl=self.nx, 

3474 nw=self.ny) 

3475 

3476 return ds 

3477 

3478 def calc_coef_mat(self): 

3479 ''' 

3480 Calculate coefficients connecting tractions and dislocations. 

3481 ''' 

3482 if not self.patches: 

3483 raise ValueError( 

3484 'Patches are needed. Please calculate them first.') 

3485 

3486 self.coef_mat = make_okada_coefficient_matrix( 

3487 self.patches, nthreads=self.nthreads, pure_shear=self.pure_shear) 

3488 

3489 def get_patch_attribute(self, attr): 

3490 ''' 

3491 Get patch attributes. 

3492 

3493 :param attr: 

3494 Name of selected attribute (see 

3495 :py:class`pyrocko.modelling.okada.OkadaSource`). 

3496 :type attr: 

3497 str 

3498 

3499 :returns: 

3500 Array with attribute value for each fault patch. 

3501 :rtype: 

3502 :py:class:`~numpy.ndarray` 

3503 

3504 ''' 

3505 if not self.patches: 

3506 raise ValueError( 

3507 'Patches are needed. Please calculate them first.') 

3508 return num.array([getattr(p, attr) for p in self.patches]) 

3509 

3510 def get_slip( 

3511 self, 

3512 time=None, 

3513 scale_slip=True, 

3514 interpolation='nearest_neighbor', 

3515 **kwargs): 

3516 ''' 

3517 Get slip per subfault patch for given time after rupture start. 

3518 

3519 :param time: 

3520 Time after origin [s], for which slip is computed. If not 

3521 given, final static slip is returned. 

3522 :type time: 

3523 optional, float > 0. 

3524 

3525 :param scale_slip: 

3526 If ``True`` and :py:attr:`slip` given, all slip values are scaled 

3527 to fit the given maximum slip. 

3528 :type scale_slip: 

3529 optional, bool 

3530 

3531 :param interpolation: 

3532 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3533 and ``'multilinear'``). 

3534 :type interpolation: 

3535 optional, str 

3536 

3537 :returns: 

3538 Inverted dislocations (:math:`u_{strike}, u_{dip}, u_{tensile}`) 

3539 for each source patch. 

3540 :rtype: 

3541 :py:class:`~numpy.ndarray`: ``(n_sources, 3)`` 

3542 ''' 

3543 

3544 if self.patches is None: 

3545 raise ValueError( 

3546 'Please discretize the source first (discretize_patches())') 

3547 npatches = len(self.patches) 

3548 tractions = self.get_tractions() 

3549 time_patch_max = self.get_patch_attribute('time').max() - self.time 

3550 

3551 time_patch = time 

3552 if time is None: 

3553 time_patch = time_patch_max 

3554 

3555 if self.coef_mat is None: 

3556 self.calc_coef_mat() 

3557 

3558 if tractions.shape != (npatches, 3): 

3559 raise AttributeError( 

3560 'The traction vector is of invalid shape.' 

3561 ' Required shape is (npatches, 3)') 

3562 

3563 patch_mask = num.ones(npatches, dtype=num.bool) 

3564 if self.patch_mask is not None: 

3565 patch_mask = self.patch_mask 

3566 

3567 times = self.get_patch_attribute('time') - self.time 

3568 times[~patch_mask] = time_patch + 1. # exlcude unmasked patches 

3569 relevant_sources = num.nonzero(times <= time_patch)[0] 

3570 disloc_est = num.zeros_like(tractions) 

3571 

3572 if self.smooth_rupture: 

3573 patch_activation = num.zeros(npatches) 

3574 

3575 nx, ny, times, vr, time_interpolator, vr_interpolator = \ 

3576 self.get_vr_time_interpolators( 

3577 store, interpolation=interpolation) 

3578 

3579 # Getting the native Eikonal grid, bit hackish 

3580 points_x = num.round(time_interpolator.grid[0], decimals=2) 

3581 points_y = num.round(time_interpolator.grid[1], decimals=2) 

3582 times_eikonal = time_interpolator.values 

3583 

3584 time_max = time 

3585 if time is None: 

3586 time_max = times_eikonal.max() 

3587 

3588 for ip, p in enumerate(self.patches): 

3589 ul = num.round((p.ix + p.al1, p.iy + p.aw1), decimals=2) 

3590 lr = num.round((p.ix + p.al2, p.iy + p.aw2), decimals=2) 

3591 

3592 idx_length = num.logical_and( 

3593 points_x >= ul[0], points_x <= lr[0]) 

3594 idx_width = num.logical_and( 

3595 points_y >= ul[1], points_y <= lr[1]) 

3596 

3597 times_patch = times_eikonal[num.ix_(idx_length, idx_width)] 

3598 if times_patch.size == 0: 

3599 raise AttributeError('could not use smooth_rupture') 

3600 

3601 patch_activation[ip] = \ 

3602 (times_patch <= time_max).sum() / times_patch.size 

3603 

3604 if time_patch == 0 and time_patch != time_patch_max: 

3605 patch_activation[ip] = 0. 

3606 

3607 patch_activation[~patch_mask] = 0. # exlcude unmasked patches 

3608 

3609 relevant_sources = num.nonzero(patch_activation > 0.)[0] 

3610 

3611 if relevant_sources.size == 0: 

3612 return disloc_est 

3613 

3614 indices_disl = num.repeat(relevant_sources * 3, 3) 

3615 indices_disl[1::3] += 1 

3616 indices_disl[2::3] += 2 

3617 

3618 disloc_est[relevant_sources] = invert_fault_dislocations_bem( 

3619 stress_field=tractions[relevant_sources, :].ravel(), 

3620 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3621 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3622 epsilon=None, 

3623 **kwargs) 

3624 

3625 if self.smooth_rupture: 

3626 disloc_est *= patch_activation[:, num.newaxis] 

3627 

3628 if scale_slip and self.slip is not None: 

3629 disloc_tmax = num.zeros(npatches) 

3630 

3631 indices_disl = num.repeat(num.nonzero(patch_mask)[0] * 3, 3) 

3632 indices_disl[1::3] += 1 

3633 indices_disl[2::3] += 2 

3634 

3635 disloc_tmax[patch_mask] = num.linalg.norm( 

3636 invert_fault_dislocations_bem( 

3637 stress_field=tractions[patch_mask, :].ravel(), 

3638 coef_mat=self.coef_mat[indices_disl, :][:, indices_disl], 

3639 pure_shear=self.pure_shear, nthreads=self.nthreads, 

3640 epsilon=None, 

3641 **kwargs), axis=1) 

3642 

3643 disloc_tmax_max = disloc_tmax.max() 

3644 if disloc_tmax_max == 0.: 

3645 logger.warning( 

3646 'slip scaling not performed. Maximum slip is 0.') 

3647 

3648 disloc_est *= self.slip / disloc_tmax_max 

3649 

3650 return disloc_est 

3651 

3652 def get_delta_slip( 

3653 self, 

3654 store=None, 

3655 deltat=None, 

3656 delta=True, 

3657 interpolation='nearest_neighbor', 

3658 **kwargs): 

3659 ''' 

3660 Get slip change snapshots. 

3661 

3662 The time interval, within which the slip changes are computed is 

3663 determined by the sampling rate of the Green's function database or 

3664 ``deltat``. Additional ``**kwargs`` are passed to :py:meth:`get_slip`. 

3665 

3666 :param store: 

3667 Green's function database (needs to cover whole region of of the 

3668 source). Its sampling interval is used as time increment for slip 

3669 difference calculation. Either ``deltat`` or ``store`` should be 

3670 given. 

3671 :type store: 

3672 optional, :py:class:`~pyrocko.gf.store.Store` 

3673 

3674 :param deltat: 

3675 Time interval for slip difference calculation [s]. Either 

3676 ``deltat`` or ``store`` should be given. 

3677 :type deltat: 

3678 optional, float 

3679 

3680 :param delta: 

3681 If ``True``, slip differences between two time steps are given. If 

3682 ``False``, cumulative slip for all time steps. 

3683 :type delta: 

3684 optional, bool 

3685 

3686 :param interpolation: 

3687 Interpolation method to use (choose between ``'nearest_neighbor'`` 

3688 and ``'multilinear'``). 

3689 :type interpolation: 

3690 optional, str 

3691 

3692 :returns: 

3693 Displacement changes(:math:`\\Delta u_{strike}, 

3694 \\Delta u_{dip} , \\Delta u_{tensile}`) for each source patch and 

3695 time; corner times, for which delta slip is computed. The order of 

3696 displacement changes array is: 

3697 

3698 .. math:: 

3699 

3700 &[[\\\\ 

3701 &[\\Delta u_{strike, patch1, t1}, 

3702 \\Delta u_{dip, patch1, t1}, 

3703 \\Delta u_{tensile, patch1, t1}],\\\\ 

3704 &[\\Delta u_{strike, patch1, t2}, 

3705 \\Delta u_{dip, patch1, t2}, 

3706 \\Delta u_{tensile, patch1, t2}]\\\\ 

3707 &], [\\\\ 

3708 &[\\Delta u_{strike, patch2, t1}, ...],\\\\ 

3709 &[\\Delta u_{strike, patch2, t2}, ...]]]\\\\ 

3710 

3711 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3712 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3713 ''' 

3714 if store and deltat: 

3715 raise AttributeError( 

3716 'Argument collision. ' 

3717 'Please define only the store or the deltat argument.') 

3718 

3719 if store: 

3720 deltat = store.config.deltat 

3721 

3722 if not deltat: 

3723 raise AttributeError('Please give a GF store or set deltat.') 

3724 

3725 npatches = len(self.patches) 

3726 

3727 _, _, _, _, time_interpolator, _ = self.get_vr_time_interpolators( 

3728 store, interpolation=interpolation) 

3729 tmax = time_interpolator.values.max() 

3730 

3731 calc_times = num.arange(0., tmax + deltat, deltat) 

3732 calc_times[calc_times > tmax] = tmax 

3733 

3734 disloc_est = num.zeros((npatches, calc_times.size, 3)) 

3735 

3736 for itime, t in enumerate(calc_times): 

3737 disloc_est[:, itime, :] = self.get_slip( 

3738 time=t, scale_slip=False, **kwargs) 

3739 

3740 if self.slip: 

3741 disloc_tmax = num.linalg.norm( 

3742 self.get_slip(scale_slip=False, time=tmax), 

3743 axis=1) 

3744 

3745 disloc_tmax_max = disloc_tmax.max() 

3746 if disloc_tmax_max == 0.: 

3747 logger.warning( 

3748 'Slip scaling not performed. Maximum slip is 0.') 

3749 else: 

3750 disloc_est *= self.slip / disloc_tmax_max 

3751 

3752 if not delta: 

3753 return disloc_est, calc_times 

3754 

3755 # if we have only one timestep there is no gradient 

3756 if calc_times.size > 1: 

3757 disloc_init = disloc_est[:, 0, :] 

3758 disloc_est = num.diff(disloc_est, axis=1) 

3759 disloc_est = num.concatenate(( 

3760 disloc_init[:, num.newaxis, :], disloc_est), axis=1) 

3761 

3762 calc_times = calc_times 

3763 

3764 return disloc_est, calc_times 

3765 

3766 def get_slip_rate(self, *args, **kwargs): 

3767 ''' 

3768 Get slip rate inverted from patches. 

3769 

3770 The time interval, within which the slip rates are computed is 

3771 determined by the sampling rate of the Green's function database or 

3772 ``deltat``. Additional ``*args`` and ``**kwargs`` are passed to 

3773 :py:meth:`get_delta_slip`. 

3774 

3775 :returns: 

3776 Slip rates (:math:`\\Delta u_{strike}/\\Delta t`, 

3777 :math:`\\Delta u_{dip}/\\Delta t, \\Delta u_{tensile}/\\Delta t`) 

3778 for each source patch and time; corner times, for which slip rate 

3779 is computed. The order of sliprate array is: 

3780 

3781 .. math:: 

3782 

3783 &[[\\\\ 

3784 &[\\Delta u_{strike, patch1, t1}/\\Delta t, 

3785 \\Delta u_{dip, patch1, t1}/\\Delta t, 

3786 \\Delta u_{tensile, patch1, t1}/\\Delta t],\\\\ 

3787 &[\\Delta u_{strike, patch1, t2}/\\Delta t, 

3788 \\Delta u_{dip, patch1, t2}/\\Delta t, 

3789 \\Delta u_{tensile, patch1, t2}/\\Delta t]], [\\\\ 

3790 &[\\Delta u_{strike, patch2, t1}/\\Delta t, ...],\\\\ 

3791 &[\\Delta u_{strike, patch2, t2}/\\Delta t, ...]]]\\\\ 

3792 

3793 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times, 3)``, 

3794 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3795 ''' 

3796 ddisloc_est, calc_times = self.get_delta_slip( 

3797 *args, delta=True, **kwargs) 

3798 

3799 dt = num.concatenate( 

3800 [(num.diff(calc_times)[0], ), num.diff(calc_times)]) 

3801 slip_rate = num.linalg.norm(ddisloc_est, axis=2) / dt 

3802 

3803 return slip_rate, calc_times 

3804 

3805 def get_moment_rate_patches(self, *args, **kwargs): 

3806 ''' 

3807 Get scalar seismic moment rate for each patch individually. 

3808 

3809 Additional ``*args`` and ``**kwargs`` are passed to 

3810 :py:meth:`get_slip_rate`. 

3811 

3812 :returns: 

3813 Seismic moment rate for each source patch and time; corner times, 

3814 for which patch moment rate is computed based on slip rate. The 

3815 order of the moment rate array is: 

3816 

3817 .. math:: 

3818 

3819 &[\\\\ 

3820 &[(\\Delta M / \\Delta t)_{patch1, t1}, 

3821 (\\Delta M / \\Delta t)_{patch1, t2}, ...],\\\\ 

3822 &[(\\Delta M / \\Delta t)_{patch2, t1}, 

3823 (\\Delta M / \\Delta t)_{patch, t2}, ...],\\\\ 

3824 &[...]]\\\\ 

3825 

3826 :rtype: :py:class:`~numpy.ndarray`: ``(n_sources, n_times)``, 

3827 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3828 ''' 

3829 slip_rate, calc_times = self.get_slip_rate(*args, **kwargs) 

3830 

3831 shear_mod = self.get_patch_attribute('shearmod') 

3832 p_length = self.get_patch_attribute('length') 

3833 p_width = self.get_patch_attribute('width') 

3834 

3835 dA = p_length * p_width 

3836 

3837 mom_rate = shear_mod[:, num.newaxis] * slip_rate * dA[:, num.newaxis] 

3838 

3839 return mom_rate, calc_times 

3840 

3841 def get_moment_rate(self, store, target=None, deltat=None): 

3842 ''' 

3843 Get seismic source moment rate for the total source (STF). 

3844 

3845 :param store: 

3846 Green's function database (needs to cover whole region of of the 

3847 source). Its ``deltat`` [s] is used as time increment for slip 

3848 difference calculation. Either ``deltat`` or ``store`` should be 

3849 given. 

3850 :type store: 

3851 :py:class:`~pyrocko.gf.store.Store` 

3852 

3853 :param target: 

3854 Target information, needed for interpolation method. 

3855 :type target: 

3856 optional, :py:class:`~pyrocko.gf.targets.Target` 

3857 

3858 :param deltat: 

3859 Time increment for slip difference calculation [s]. If not given 

3860 ``store.deltat`` is used. 

3861 :type deltat: 

3862 optional, float 

3863 

3864 :return: 

3865 Seismic moment rate [Nm/s] for each time; corner times, for which 

3866 moment rate is computed. The order of the moment rate array is: 

3867 

3868 .. math:: 

3869 

3870 &[\\\\ 

3871 &(\\Delta M / \\Delta t)_{t1},\\\\ 

3872 &(\\Delta M / \\Delta t)_{t2},\\\\ 

3873 &...]\\\\ 

3874 

3875 :rtype: 

3876 :py:class:`~numpy.ndarray`: ``(n_times, )``, 

3877 :py:class:`~numpy.ndarray`: ``(n_times, )`` 

3878 ''' 

3879 if not deltat: 

3880 deltat = store.config.deltat 

3881 return self.discretize_basesource( 

3882 store, target=target).get_moment_rate(deltat) 

3883 

3884 def get_moment(self, *args, **kwargs): 

3885 ''' 

3886 Get seismic cumulative moment. 

3887 

3888 Additional ``*args`` and ``**kwargs`` are passed to 

3889 :py:meth:`get_magnitude`. 

3890 

3891 :returns: 

3892 Cumulative seismic moment in [Nm]. 

3893 :rtype: 

3894 float 

3895 ''' 

3896 return float(pmt.magnitude_to_moment(self.get_magnitude( 

3897 *args, **kwargs))) 

3898 

3899 def rescale_slip(self, magnitude=None, moment=None, **kwargs): 

3900 ''' 

3901 Rescale source slip based on given target magnitude or seismic moment. 

3902 

3903 Rescale the maximum source slip to fit the source moment magnitude or 

3904 seismic moment to the given target values. Either ``magnitude`` or 

3905 ``moment`` need to be given. Additional ``**kwargs`` are passed to 

3906 :py:meth:`get_moment`. 

3907 

3908 :param magnitude: 

3909 Target moment magnitude :math:`M_\\mathrm{w}` as in 

3910 [Hanks and Kanamori, 1979] 

3911 :type magnitude: 

3912 optional, float 

3913 

3914 :param moment: 

3915 Target seismic moment :math:`M_0` [Nm]. 

3916 :type moment: 

3917 optional, float 

3918 ''' 

3919 if self.slip is None: 

3920 self.slip = 1. 

3921 logger.warning('No slip found for rescaling. ' 

3922 'An initial slip of 1 m is assumed.') 

3923 

3924 if magnitude is None and moment is None: 

3925 raise ValueError( 

3926 'Either target magnitude or moment need to be given.') 

3927 

3928 moment_init = self.get_moment(**kwargs) 

3929 

3930 if magnitude is not None: 

3931 moment = pmt.magnitude_to_moment(magnitude) 

3932 

3933 self.slip *= moment / moment_init 

3934 

3935 

3936class DoubleDCSource(SourceWithMagnitude): 

3937 ''' 

3938 Two double-couple point sources separated in space and time. 

3939 Moment share between the sub-sources is controlled by the 

3940 parameter mix. 

3941 The position of the subsources is dependent on the moment 

3942 distribution between the two sources. Depth, east and north 

3943 shift are given for the centroid between the two double-couples. 

3944 The subsources will positioned according to their moment shares 

3945 around this centroid position. 

3946 This is done according to their delta parameters, which are 

3947 therefore in relation to that centroid. 

3948 Note that depth of the subsources therefore can be 

3949 depth+/-delta_depth. For shallow earthquakes therefore 

3950 the depth has to be chosen deeper to avoid sampling 

3951 above surface. 

3952 ''' 

3953 

3954 strike1 = Float.T( 

3955 default=0.0, 

3956 help='strike direction in [deg], measured clockwise from north') 

3957 

3958 dip1 = Float.T( 

3959 default=90.0, 

3960 help='dip angle in [deg], measured downward from horizontal') 

3961 

3962 azimuth = Float.T( 

3963 default=0.0, 

3964 help='azimuth to second double-couple [deg], ' 

3965 'measured at first, clockwise from north') 

3966 

3967 rake1 = Float.T( 

3968 default=0.0, 

3969 help='rake angle in [deg], ' 

3970 'measured counter-clockwise from right-horizontal ' 

3971 'in on-plane view') 

3972 

3973 strike2 = Float.T( 

3974 default=0.0, 

3975 help='strike direction in [deg], measured clockwise from north') 

3976 

3977 dip2 = Float.T( 

3978 default=90.0, 

3979 help='dip angle in [deg], measured downward from horizontal') 

3980 

3981 rake2 = Float.T( 

3982 default=0.0, 

3983 help='rake angle in [deg], ' 

3984 'measured counter-clockwise from right-horizontal ' 

3985 'in on-plane view') 

3986 

3987 delta_time = Float.T( 

3988 default=0.0, 

3989 help='separation of double-couples in time (t2-t1) [s]') 

3990 

3991 delta_depth = Float.T( 

3992 default=0.0, 

3993 help='difference in depth (z2-z1) [m]') 

3994 

3995 distance = Float.T( 

3996 default=0.0, 

3997 help='distance between the two double-couples [m]') 

3998 

3999 mix = Float.T( 

4000 default=0.5, 

4001 help='how to distribute the moment to the two doublecouples ' 

4002 'mix=0 -> m1=1 and m2=0; mix=1 -> m1=0, m2=1') 

4003 

4004 stf1 = STF.T( 

4005 optional=True, 

4006 help='Source time function of subsource 1 ' 

4007 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4008 

4009 stf2 = STF.T( 

4010 optional=True, 

4011 help='Source time function of subsource 2 ' 

4012 '(if given, overrides STF from attribute :py:gattr:`Source.stf`)') 

4013 

4014 discretized_source_class = meta.DiscretizedMTSource 

4015 

4016 def base_key(self): 

4017 return ( 

4018 self.time, self.depth, self.lat, self.north_shift, 

4019 self.lon, self.east_shift, type(self).__name__) + \ 

4020 self.effective_stf1_pre().base_key() + \ 

4021 self.effective_stf2_pre().base_key() + ( 

4022 self.strike1, self.dip1, self.rake1, 

4023 self.strike2, self.dip2, self.rake2, 

4024 self.delta_time, self.delta_depth, 

4025 self.azimuth, self.distance, self.mix) 

4026 

4027 def get_factor(self): 

4028 return self.moment 

4029 

4030 def effective_stf1_pre(self): 

4031 return self.stf1 or self.stf or g_unit_pulse 

4032 

4033 def effective_stf2_pre(self): 

4034 return self.stf2 or self.stf or g_unit_pulse 

4035 

4036 def effective_stf_post(self): 

4037 return g_unit_pulse 

4038 

4039 def split(self): 

4040 a1 = 1.0 - self.mix 

4041 a2 = self.mix 

4042 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4043 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4044 

4045 dc1 = DCSource( 

4046 lat=self.lat, 

4047 lon=self.lon, 

4048 time=self.time - self.delta_time * a2, 

4049 north_shift=self.north_shift - delta_north * a2, 

4050 east_shift=self.east_shift - delta_east * a2, 

4051 depth=self.depth - self.delta_depth * a2, 

4052 moment=self.moment * a1, 

4053 strike=self.strike1, 

4054 dip=self.dip1, 

4055 rake=self.rake1, 

4056 stf=self.stf1 or self.stf) 

4057 

4058 dc2 = DCSource( 

4059 lat=self.lat, 

4060 lon=self.lon, 

4061 time=self.time + self.delta_time * a1, 

4062 north_shift=self.north_shift + delta_north * a1, 

4063 east_shift=self.east_shift + delta_east * a1, 

4064 depth=self.depth + self.delta_depth * a1, 

4065 moment=self.moment * a2, 

4066 strike=self.strike2, 

4067 dip=self.dip2, 

4068 rake=self.rake2, 

4069 stf=self.stf2 or self.stf) 

4070 

4071 return [dc1, dc2] 

4072 

4073 def discretize_basesource(self, store, target=None): 

4074 a1 = 1.0 - self.mix 

4075 a2 = self.mix 

4076 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4077 rake=self.rake1, scalar_moment=a1) 

4078 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4079 rake=self.rake2, scalar_moment=a2) 

4080 

4081 delta_north = math.cos(self.azimuth * d2r) * self.distance 

4082 delta_east = math.sin(self.azimuth * d2r) * self.distance 

4083 

4084 times1, amplitudes1 = self.effective_stf1_pre().discretize_t( 

4085 store.config.deltat, self.time - self.delta_time * a2) 

4086 

4087 times2, amplitudes2 = self.effective_stf2_pre().discretize_t( 

4088 store.config.deltat, self.time + self.delta_time * a1) 

4089 

4090 nt1 = times1.size 

4091 nt2 = times2.size 

4092 

4093 ds = meta.DiscretizedMTSource( 

4094 lat=self.lat, 

4095 lon=self.lon, 

4096 times=num.concatenate((times1, times2)), 

4097 north_shifts=num.concatenate(( 

4098 num.repeat(self.north_shift - delta_north * a2, nt1), 

4099 num.repeat(self.north_shift + delta_north * a1, nt2))), 

4100 east_shifts=num.concatenate(( 

4101 num.repeat(self.east_shift - delta_east * a2, nt1), 

4102 num.repeat(self.east_shift + delta_east * a1, nt2))), 

4103 depths=num.concatenate(( 

4104 num.repeat(self.depth - self.delta_depth * a2, nt1), 

4105 num.repeat(self.depth + self.delta_depth * a1, nt2))), 

4106 m6s=num.vstack(( 

4107 mot1.m6()[num.newaxis, :] * amplitudes1[:, num.newaxis], 

4108 mot2.m6()[num.newaxis, :] * amplitudes2[:, num.newaxis]))) 

4109 

4110 return ds 

4111 

4112 def pyrocko_moment_tensor(self, store=None, target=None): 

4113 a1 = 1.0 - self.mix 

4114 a2 = self.mix 

4115 mot1 = pmt.MomentTensor(strike=self.strike1, dip=self.dip1, 

4116 rake=self.rake1, 

4117 scalar_moment=a1 * self.moment) 

4118 mot2 = pmt.MomentTensor(strike=self.strike2, dip=self.dip2, 

4119 rake=self.rake2, 

4120 scalar_moment=a2 * self.moment) 

4121 return pmt.MomentTensor(m=mot1.m() + mot2.m()) 

4122 

4123 def pyrocko_event(self, store=None, target=None, **kwargs): 

4124 return SourceWithMagnitude.pyrocko_event( 

4125 self, store, target, 

4126 moment_tensor=self.pyrocko_moment_tensor(store, target), 

4127 **kwargs) 

4128 

4129 @classmethod 

4130 def from_pyrocko_event(cls, ev, **kwargs): 

4131 d = {} 

4132 mt = ev.moment_tensor 

4133 if mt: 

4134 (strike, dip, rake), _ = mt.both_strike_dip_rake() 

4135 d.update( 

4136 strike1=float(strike), 

4137 dip1=float(dip), 

4138 rake1=float(rake), 

4139 strike2=float(strike), 

4140 dip2=float(dip), 

4141 rake2=float(rake), 

4142 mix=0.0, 

4143 magnitude=float(mt.moment_magnitude())) 

4144 

4145 d.update(kwargs) 

4146 source = super(DoubleDCSource, cls).from_pyrocko_event(ev, **d) 

4147 source.stf1 = source.stf 

4148 source.stf2 = HalfSinusoidSTF(effective_duration=0.) 

4149 source.stf = None 

4150 return source 

4151 

4152 

4153class RingfaultSource(SourceWithMagnitude): 

4154 ''' 

4155 A ring fault with vertical doublecouples. 

4156 ''' 

4157 

4158 diameter = Float.T( 

4159 default=1.0, 

4160 help='diameter of the ring in [m]') 

4161 

4162 sign = Float.T( 

4163 default=1.0, 

4164 help='inside of the ring moves up (+1) or down (-1)') 

4165 

4166 strike = Float.T( 

4167 default=0.0, 

4168 help='strike direction of the ring plane, clockwise from north,' 

4169 ' in [deg]') 

4170 

4171 dip = Float.T( 

4172 default=0.0, 

4173 help='dip angle of the ring plane from horizontal in [deg]') 

4174 

4175 npointsources = Int.T( 

4176 default=360, 

4177 help='number of point sources to use') 

4178 

4179 discretized_source_class = meta.DiscretizedMTSource 

4180 

4181 def base_key(self): 

4182 return Source.base_key(self) + ( 

4183 self.strike, self.dip, self.diameter, self.npointsources) 

4184 

4185 def get_factor(self): 

4186 return self.sign * self.moment 

4187 

4188 def discretize_basesource(self, store=None, target=None): 

4189 n = self.npointsources 

4190 phi = num.linspace(0, 2.0 * num.pi, n, endpoint=False) 

4191 

4192 points = num.zeros((n, 3)) 

4193 points[:, 0] = num.cos(phi) * 0.5 * self.diameter 

4194 points[:, 1] = num.sin(phi) * 0.5 * self.diameter 

4195 

4196 rotmat = num.array(pmt.euler_to_matrix( 

4197 self.dip * d2r, self.strike * d2r, 0.0)) 

4198 points = num.dot(rotmat.T, points.T).T # !!! ? 

4199 

4200 points[:, 0] += self.north_shift 

4201 points[:, 1] += self.east_shift 

4202 points[:, 2] += self.depth 

4203 

4204 m = num.array(pmt.MomentTensor(strike=90., dip=90., rake=-90., 

4205 scalar_moment=1.0 / n).m()) 

4206 

4207 rotmats = num.transpose( 

4208 [[num.cos(phi), num.sin(phi), num.zeros(n)], 

4209 [-num.sin(phi), num.cos(phi), num.zeros(n)], 

4210 [num.zeros(n), num.zeros(n), num.ones(n)]], (2, 0, 1)) 

4211 

4212 ms = num.zeros((n, 3, 3)) 

4213 for i in range(n): 

4214 mtemp = num.dot(rotmats[i].T, num.dot(m, rotmats[i])) 

4215 ms[i, :, :] = num.dot(rotmat.T, num.dot(mtemp, rotmat)) 

4216 

4217 m6s = num.vstack((ms[:, 0, 0], ms[:, 1, 1], ms[:, 2, 2], 

4218 ms[:, 0, 1], ms[:, 0, 2], ms[:, 1, 2])).T 

4219 

4220 times, amplitudes = self.effective_stf_pre().discretize_t( 

4221 store.config.deltat, self.time) 

4222 

4223 nt = times.size 

4224 

4225 return meta.DiscretizedMTSource( 

4226 times=num.tile(times, n), 

4227 lat=self.lat, 

4228 lon=self.lon, 

4229 north_shifts=num.repeat(points[:, 0], nt), 

4230 east_shifts=num.repeat(points[:, 1], nt), 

4231 depths=num.repeat(points[:, 2], nt), 

4232 m6s=num.repeat(m6s, nt, axis=0) * num.tile( 

4233 amplitudes, n)[:, num.newaxis]) 

4234 

4235 

4236class CombiSource(Source): 

4237 ''' 

4238 Composite source model. 

4239 ''' 

4240 

4241 discretized_source_class = meta.DiscretizedMTSource 

4242 

4243 subsources = List.T(Source.T()) 

4244 

4245 def __init__(self, subsources=[], **kwargs): 

4246 if not subsources: 

4247 raise BadRequest( 

4248 'Need at least one sub-source to create a CombiSource object.') 

4249 

4250 lats = num.array( 

4251 [subsource.lat for subsource in subsources], dtype=float) 

4252 lons = num.array( 

4253 [subsource.lon for subsource in subsources], dtype=float) 

4254 

4255 lat, lon = lats[0], lons[0] 

4256 if not num.all(lats == lat) and num.all(lons == lon): 

4257 subsources = [s.clone() for s in subsources] 

4258 for subsource in subsources[1:]: 

4259 subsource.set_origin(lat, lon) 

4260 

4261 depth = float(num.mean([p.depth for p in subsources])) 

4262 time = float(num.mean([p.time for p in subsources])) 

4263 north_shift = float(num.mean([p.north_shift for p in subsources])) 

4264 east_shift = float(num.mean([p.east_shift for p in subsources])) 

4265 kwargs.update( 

4266 time=time, 

4267 lat=float(lat), 

4268 lon=float(lon), 

4269 north_shift=north_shift, 

4270 east_shift=east_shift, 

4271 depth=depth) 

4272 

4273 Source.__init__(self, subsources=subsources, **kwargs) 

4274 

4275 def get_factor(self): 

4276 return 1.0 

4277 

4278 def discretize_basesource(self, store, target=None): 

4279 dsources = [] 

4280 for sf in self.subsources: 

4281 ds = sf.discretize_basesource(store, target) 

4282 ds.m6s *= sf.get_factor() 

4283 dsources.append(ds) 

4284 

4285 return meta.DiscretizedMTSource.combine(dsources) 

4286 

4287 

4288class SFSource(Source): 

4289 ''' 

4290 A single force point source. 

4291 

4292 Supported GF schemes: `'elastic5'`. 

4293 ''' 

4294 

4295 discretized_source_class = meta.DiscretizedSFSource 

4296 

4297 fn = Float.T( 

4298 default=0., 

4299 help='northward component of single force [N]') 

4300 

4301 fe = Float.T( 

4302 default=0., 

4303 help='eastward component of single force [N]') 

4304 

4305 fd = Float.T( 

4306 default=0., 

4307 help='downward component of single force [N]') 

4308 

4309 def __init__(self, **kwargs): 

4310 Source.__init__(self, **kwargs) 

4311 

4312 def base_key(self): 

4313 return Source.base_key(self) + (self.fn, self.fe, self.fd) 

4314 

4315 def get_factor(self): 

4316 return 1.0 

4317 

4318 def discretize_basesource(self, store, target=None): 

4319 times, amplitudes = self.effective_stf_pre().discretize_t( 

4320 store.config.deltat, self.time) 

4321 forces = amplitudes[:, num.newaxis] * num.array( 

4322 [[self.fn, self.fe, self.fd]], dtype=float) 

4323 

4324 return meta.DiscretizedSFSource(forces=forces, 

4325 **self._dparams_base_repeated(times)) 

4326 

4327 def pyrocko_event(self, store=None, target=None, **kwargs): 

4328 return Source.pyrocko_event( 

4329 self, store, target, 

4330 **kwargs) 

4331 

4332 @classmethod 

4333 def from_pyrocko_event(cls, ev, **kwargs): 

4334 d = {} 

4335 d.update(kwargs) 

4336 return super(SFSource, cls).from_pyrocko_event(ev, **d) 

4337 

4338 

4339class PorePressurePointSource(Source): 

4340 ''' 

4341 Excess pore pressure point source. 

4342 

4343 For poro-elastic initial value problem where an excess pore pressure is 

4344 brought into a small source volume. 

4345 ''' 

4346 

4347 discretized_source_class = meta.DiscretizedPorePressureSource 

4348 

4349 pp = Float.T( 

4350 default=1.0, 

4351 help='initial excess pore pressure in [Pa]') 

4352 

4353 def base_key(self): 

4354 return Source.base_key(self) 

4355 

4356 def get_factor(self): 

4357 return self.pp 

4358 

4359 def discretize_basesource(self, store, target=None): 

4360 return meta.DiscretizedPorePressureSource(pp=arr(1.0), 

4361 **self._dparams_base()) 

4362 

4363 

4364class PorePressureLineSource(Source): 

4365 ''' 

4366 Excess pore pressure line source. 

4367 

4368 The line source is centered at (north_shift, east_shift, depth). 

4369 ''' 

4370 

4371 discretized_source_class = meta.DiscretizedPorePressureSource 

4372 

4373 pp = Float.T( 

4374 default=1.0, 

4375 help='initial excess pore pressure in [Pa]') 

4376 

4377 length = Float.T( 

4378 default=0.0, 

4379 help='length of the line source [m]') 

4380 

4381 azimuth = Float.T( 

4382 default=0.0, 

4383 help='azimuth direction, clockwise from north [deg]') 

4384 

4385 dip = Float.T( 

4386 default=90., 

4387 help='dip direction, downward from horizontal [deg]') 

4388 

4389 def base_key(self): 

4390 return Source.base_key(self) + (self.azimuth, self.dip, self.length) 

4391 

4392 def get_factor(self): 

4393 return self.pp 

4394 

4395 def discretize_basesource(self, store, target=None): 

4396 

4397 n = 2 * int(math.ceil(self.length / num.min(store.config.deltas))) + 1 

4398 

4399 a = num.linspace(-0.5 * self.length, 0.5 * self.length, n) 

4400 

4401 sa = math.sin(self.azimuth * d2r) 

4402 ca = math.cos(self.azimuth * d2r) 

4403 sd = math.sin(self.dip * d2r) 

4404 cd = math.cos(self.dip * d2r) 

4405 

4406 points = num.zeros((n, 3)) 

4407 points[:, 0] = self.north_shift + a * ca * cd 

4408 points[:, 1] = self.east_shift + a * sa * cd 

4409 points[:, 2] = self.depth + a * sd 

4410 

4411 return meta.DiscretizedPorePressureSource( 

4412 times=util.num_full(n, self.time), 

4413 lat=self.lat, 

4414 lon=self.lon, 

4415 north_shifts=points[:, 0], 

4416 east_shifts=points[:, 1], 

4417 depths=points[:, 2], 

4418 pp=num.ones(n) / n) 

4419 

4420 

4421class Request(Object): 

4422 ''' 

4423 Synthetic seismogram computation request. 

4424 

4425 :: 

4426 

4427 Request(**kwargs) 

4428 Request(sources, targets, **kwargs) 

4429 ''' 

4430 

4431 sources = List.T( 

4432 Source.T(), 

4433 help='list of sources for which to produce synthetics.') 

4434 

4435 targets = List.T( 

4436 Target.T(), 

4437 help='list of targets for which to produce synthetics.') 

4438 

4439 @classmethod 

4440 def args2kwargs(cls, args): 

4441 if len(args) not in (0, 2, 3): 

4442 raise BadRequest('Invalid arguments.') 

4443 

4444 if len(args) == 2: 

4445 return dict(sources=args[0], targets=args[1]) 

4446 else: 

4447 return {} 

4448 

4449 def __init__(self, *args, **kwargs): 

4450 kwargs.update(self.args2kwargs(args)) 

4451 sources = kwargs.pop('sources', []) 

4452 targets = kwargs.pop('targets', []) 

4453 

4454 if isinstance(sources, Source): 

4455 sources = [sources] 

4456 

4457 if isinstance(targets, Target) or isinstance(targets, StaticTarget): 

4458 targets = [targets] 

4459 

4460 Object.__init__(self, sources=sources, targets=targets, **kwargs) 

4461 

4462 @property 

4463 def targets_dynamic(self): 

4464 return [t for t in self.targets if isinstance(t, Target)] 

4465 

4466 @property 

4467 def targets_static(self): 

4468 return [t for t in self.targets if isinstance(t, StaticTarget)] 

4469 

4470 @property 

4471 def has_dynamic(self): 

4472 return True if len(self.targets_dynamic) > 0 else False 

4473 

4474 @property 

4475 def has_statics(self): 

4476 return True if len(self.targets_static) > 0 else False 

4477 

4478 def subsources_map(self): 

4479 m = defaultdict(list) 

4480 for source in self.sources: 

4481 m[source.base_key()].append(source) 

4482 

4483 return m 

4484 

4485 def subtargets_map(self): 

4486 m = defaultdict(list) 

4487 for target in self.targets: 

4488 m[target.base_key()].append(target) 

4489 

4490 return m 

4491 

4492 def subrequest_map(self): 

4493 ms = self.subsources_map() 

4494 mt = self.subtargets_map() 

4495 m = {} 

4496 for (ks, ls) in ms.items(): 

4497 for (kt, lt) in mt.items(): 

4498 m[ks, kt] = (ls, lt) 

4499 

4500 return m 

4501 

4502 

4503class ProcessingStats(Object): 

4504 t_perc_get_store_and_receiver = Float.T(default=0.) 

4505 t_perc_discretize_source = Float.T(default=0.) 

4506 t_perc_make_base_seismogram = Float.T(default=0.) 

4507 t_perc_make_same_span = Float.T(default=0.) 

4508 t_perc_post_process = Float.T(default=0.) 

4509 t_perc_optimize = Float.T(default=0.) 

4510 t_perc_stack = Float.T(default=0.) 

4511 t_perc_static_get_store = Float.T(default=0.) 

4512 t_perc_static_discretize_basesource = Float.T(default=0.) 

4513 t_perc_static_sum_statics = Float.T(default=0.) 

4514 t_perc_static_post_process = Float.T(default=0.) 

4515 t_wallclock = Float.T(default=0.) 

4516 t_cpu = Float.T(default=0.) 

4517 n_read_blocks = Int.T(default=0) 

4518 n_results = Int.T(default=0) 

4519 n_subrequests = Int.T(default=0) 

4520 n_stores = Int.T(default=0) 

4521 n_records_stacked = Int.T(default=0) 

4522 

4523 

4524class Response(Object): 

4525 ''' 

4526 Resonse object to a synthetic seismogram computation request. 

4527 ''' 

4528 

4529 request = Request.T() 

4530 results_list = List.T(List.T(meta.SeismosizerResult.T())) 

4531 stats = ProcessingStats.T() 

4532 

4533 def pyrocko_traces(self): 

4534 ''' 

4535 Return a list of requested 

4536 :class:`~pyrocko.trace.Trace` instances. 

4537 ''' 

4538 

4539 traces = [] 

4540 for results in self.results_list: 

4541 for result in results: 

4542 if not isinstance(result, meta.Result): 

4543 continue 

4544 traces.append(result.trace.pyrocko_trace()) 

4545 

4546 return traces 

4547 

4548 def kite_scenes(self): 

4549 ''' 

4550 Return a list of requested 

4551 :class:`~kite.scenes` instances. 

4552 ''' 

4553 kite_scenes = [] 

4554 for results in self.results_list: 

4555 for result in results: 

4556 if isinstance(result, meta.KiteSceneResult): 

4557 sc = result.get_scene() 

4558 kite_scenes.append(sc) 

4559 

4560 return kite_scenes 

4561 

4562 def static_results(self): 

4563 ''' 

4564 Return a list of requested 

4565 :class:`~pyrocko.gf.meta.StaticResult` instances. 

4566 ''' 

4567 statics = [] 

4568 for results in self.results_list: 

4569 for result in results: 

4570 if not isinstance(result, meta.StaticResult): 

4571 continue 

4572 statics.append(result) 

4573 

4574 return statics 

4575 

4576 def iter_results(self, get='pyrocko_traces'): 

4577 ''' 

4578 Generator function to iterate over results of request. 

4579 

4580 Yields associated :py:class:`Source`, 

4581 :class:`~pyrocko.gf.targets.Target`, 

4582 :class:`~pyrocko.trace.Trace` instances in each iteration. 

4583 ''' 

4584 

4585 for isource, source in enumerate(self.request.sources): 

4586 for itarget, target in enumerate(self.request.targets): 

4587 result = self.results_list[isource][itarget] 

4588 if get == 'pyrocko_traces': 

4589 yield source, target, result.trace.pyrocko_trace() 

4590 elif get == 'results': 

4591 yield source, target, result 

4592 

4593 def snuffle(self, **kwargs): 

4594 ''' 

4595 Open *snuffler* with requested traces. 

4596 ''' 

4597 

4598 trace.snuffle(self.pyrocko_traces(), **kwargs) 

4599 

4600 

4601class Engine(Object): 

4602 ''' 

4603 Base class for synthetic seismogram calculators. 

4604 ''' 

4605 

4606 def get_store_ids(self): 

4607 ''' 

4608 Get list of available GF store IDs 

4609 ''' 

4610 

4611 return [] 

4612 

4613 

4614class Rule(object): 

4615 pass 

4616 

4617 

4618class VectorRule(Rule): 

4619 

4620 def __init__(self, quantity, differentiate=0, integrate=0): 

4621 self.components = [quantity + '.' + c for c in 'ned'] 

4622 self.differentiate = differentiate 

4623 self.integrate = integrate 

4624 

4625 def required_components(self, target): 

4626 n, e, d = self.components 

4627 sa, ca, sd, cd = target.get_sin_cos_factors() 

4628 

4629 comps = [] 

4630 if nonzero(ca * cd): 

4631 comps.append(n) 

4632 

4633 if nonzero(sa * cd): 

4634 comps.append(e) 

4635 

4636 if nonzero(sd): 

4637 comps.append(d) 

4638 

4639 return tuple(comps) 

4640 

4641 def apply_(self, target, base_seismogram): 

4642 n, e, d = self.components 

4643 sa, ca, sd, cd = target.get_sin_cos_factors() 

4644 

4645 if nonzero(ca * cd): 

4646 data = base_seismogram[n].data * (ca * cd) 

4647 deltat = base_seismogram[n].deltat 

4648 else: 

4649 data = 0.0 

4650 

4651 if nonzero(sa * cd): 

4652 data = data + base_seismogram[e].data * (sa * cd) 

4653 deltat = base_seismogram[e].deltat 

4654 

4655 if nonzero(sd): 

4656 data = data + base_seismogram[d].data * sd 

4657 deltat = base_seismogram[d].deltat 

4658 

4659 if self.differentiate: 

4660 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4661 

4662 if self.integrate: 

4663 raise NotImplementedError('Integration is not implemented yet.') 

4664 

4665 return data 

4666 

4667 

4668class HorizontalVectorRule(Rule): 

4669 

4670 def __init__(self, quantity, differentiate=0, integrate=0): 

4671 self.components = [quantity + '.' + c for c in 'ne'] 

4672 self.differentiate = differentiate 

4673 self.integrate = integrate 

4674 

4675 def required_components(self, target): 

4676 n, e = self.components 

4677 sa, ca, _, _ = target.get_sin_cos_factors() 

4678 

4679 comps = [] 

4680 if nonzero(ca): 

4681 comps.append(n) 

4682 

4683 if nonzero(sa): 

4684 comps.append(e) 

4685 

4686 return tuple(comps) 

4687 

4688 def apply_(self, target, base_seismogram): 

4689 n, e = self.components 

4690 sa, ca, _, _ = target.get_sin_cos_factors() 

4691 

4692 if nonzero(ca): 

4693 data = base_seismogram[n].data * ca 

4694 else: 

4695 data = 0.0 

4696 

4697 if nonzero(sa): 

4698 data = data + base_seismogram[e].data * sa 

4699 

4700 if self.differentiate: 

4701 deltat = base_seismogram[e].deltat 

4702 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4703 

4704 if self.integrate: 

4705 raise NotImplementedError('Integration is not implemented yet.') 

4706 

4707 return data 

4708 

4709 

4710class ScalarRule(Rule): 

4711 

4712 def __init__(self, quantity, differentiate=0): 

4713 self.c = quantity 

4714 

4715 def required_components(self, target): 

4716 return (self.c, ) 

4717 

4718 def apply_(self, target, base_seismogram): 

4719 data = base_seismogram[self.c].data.copy() 

4720 deltat = base_seismogram[self.c].deltat 

4721 if self.differentiate: 

4722 data = util.diff_fd(self.differentiate, 4, deltat, data) 

4723 

4724 return data 

4725 

4726 

4727class StaticDisplacement(Rule): 

4728 

4729 def required_components(self, target): 

4730 return tuple(['displacement.%s' % c for c in list('ned')]) 

4731 

4732 def apply_(self, target, base_statics): 

4733 if isinstance(target, SatelliteTarget): 

4734 los_fac = target.get_los_factors() 

4735 base_statics['displacement.los'] =\ 

4736 (los_fac[:, 0] * -base_statics['displacement.d'] + 

4737 los_fac[:, 1] * base_statics['displacement.e'] + 

4738 los_fac[:, 2] * base_statics['displacement.n']) 

4739 return base_statics 

4740 

4741 

4742channel_rules = { 

4743 'displacement': [VectorRule('displacement')], 

4744 'rotation': [VectorRule('rotation')], 

4745 'velocity': [ 

4746 VectorRule('velocity'), 

4747 VectorRule('displacement', differentiate=1)], 

4748 'acceleration': [ 

4749 VectorRule('acceleration'), 

4750 VectorRule('velocity', differentiate=1), 

4751 VectorRule('displacement', differentiate=2)], 

4752 'pore_pressure': [ScalarRule('pore_pressure')], 

4753 'vertical_tilt': [HorizontalVectorRule('vertical_tilt')], 

4754 'darcy_velocity': [VectorRule('darcy_velocity')], 

4755} 

4756 

4757static_rules = { 

4758 'displacement': [StaticDisplacement()] 

4759} 

4760 

4761 

4762class OutOfBoundsContext(Object): 

4763 source = Source.T() 

4764 target = Target.T() 

4765 distance = Float.T() 

4766 components = List.T(String.T()) 

4767 

4768 

4769def process_dynamic_timeseries(work, psources, ptargets, engine, nthreads=0): 

4770 dsource_cache = {} 

4771 tcounters = list(range(6)) 

4772 

4773 store_ids = set() 

4774 sources = set() 

4775 targets = set() 

4776 

4777 for itarget, target in enumerate(ptargets): 

4778 target._id = itarget 

4779 

4780 for w in work: 

4781 _, _, isources, itargets = w 

4782 

4783 sources.update([psources[isource] for isource in isources]) 

4784 targets.update([ptargets[itarget] for itarget in itargets]) 

4785 

4786 store_ids = set([t.store_id for t in targets]) 

4787 

4788 for isource, source in enumerate(psources): 

4789 

4790 components = set() 

4791 for itarget, target in enumerate(targets): 

4792 rule = engine.get_rule(source, target) 

4793 components.update(rule.required_components(target)) 

4794 

4795 for store_id in store_ids: 

4796 store_targets = [t for t in targets if t.store_id == store_id] 

4797 

4798 sample_rates = set([t.sample_rate for t in store_targets]) 

4799 interpolations = set([t.interpolation for t in store_targets]) 

4800 

4801 base_seismograms = [] 

4802 store_targets_out = [] 

4803 

4804 for samp_rate in sample_rates: 

4805 for interp in interpolations: 

4806 engine_targets = [ 

4807 t for t in store_targets if t.sample_rate == samp_rate 

4808 and t.interpolation == interp] 

4809 

4810 if not engine_targets: 

4811 continue 

4812 

4813 store_targets_out += engine_targets 

4814 

4815 base_seismograms += engine.base_seismograms( 

4816 source, 

4817 engine_targets, 

4818 components, 

4819 dsource_cache, 

4820 nthreads) 

4821 

4822 for iseis, seismogram in enumerate(base_seismograms): 

4823 for tr in seismogram.values(): 

4824 if tr.err != store.SeismosizerErrorEnum.SUCCESS: 

4825 e = SeismosizerError( 

4826 'Seismosizer failed with return code %i\n%s' % ( 

4827 tr.err, str( 

4828 OutOfBoundsContext( 

4829 source=source, 

4830 target=store_targets[iseis], 

4831 distance=source.distance_to( 

4832 store_targets[iseis]), 

4833 components=components)))) 

4834 raise e 

4835 

4836 for seismogram, target in zip(base_seismograms, store_targets_out): 

4837 

4838 try: 

4839 result = engine._post_process_dynamic( 

4840 seismogram, source, target) 

4841 except SeismosizerError as e: 

4842 result = e 

4843 

4844 yield (isource, target._id, result), tcounters 

4845 

4846 

4847def process_dynamic(work, psources, ptargets, engine, nthreads=0): 

4848 dsource_cache = {} 

4849 

4850 for w in work: 

4851 _, _, isources, itargets = w 

4852 

4853 sources = [psources[isource] for isource in isources] 

4854 targets = [ptargets[itarget] for itarget in itargets] 

4855 

4856 components = set() 

4857 for target in targets: 

4858 rule = engine.get_rule(sources[0], target) 

4859 components.update(rule.required_components(target)) 

4860 

4861 for isource, source in zip(isources, sources): 

4862 for itarget, target in zip(itargets, targets): 

4863 

4864 try: 

4865 base_seismogram, tcounters = engine.base_seismogram( 

4866 source, target, components, dsource_cache, nthreads) 

4867 except meta.OutOfBounds as e: 

4868 e.context = OutOfBoundsContext( 

4869 source=sources[0], 

4870 target=targets[0], 

4871 distance=sources[0].distance_to(targets[0]), 

4872 components=components) 

4873 raise 

4874 

4875 n_records_stacked = 0 

4876 t_optimize = 0.0 

4877 t_stack = 0.0 

4878 

4879 for _, tr in base_seismogram.items(): 

4880 n_records_stacked += tr.n_records_stacked 

4881 t_optimize += tr.t_optimize 

4882 t_stack += tr.t_stack 

4883 

4884 try: 

4885 result = engine._post_process_dynamic( 

4886 base_seismogram, source, target) 

4887 result.n_records_stacked = n_records_stacked 

4888 result.n_shared_stacking = len(sources) *\ 

4889 len(targets) 

4890 result.t_optimize = t_optimize 

4891 result.t_stack = t_stack 

4892 except SeismosizerError as e: 

4893 result = e 

4894 

4895 tcounters.append(xtime()) 

4896 yield (isource, itarget, result), tcounters 

4897 

4898 

4899def process_static(work, psources, ptargets, engine, nthreads=0): 

4900 for w in work: 

4901 _, _, isources, itargets = w 

4902 

4903 sources = [psources[isource] for isource in isources] 

4904 targets = [ptargets[itarget] for itarget in itargets] 

4905 

4906 for isource, source in zip(isources, sources): 

4907 for itarget, target in zip(itargets, targets): 

4908 components = engine.get_rule(source, target)\ 

4909 .required_components(target) 

4910 

4911 try: 

4912 base_statics, tcounters = engine.base_statics( 

4913 source, target, components, nthreads) 

4914 except meta.OutOfBounds as e: 

4915 e.context = OutOfBoundsContext( 

4916 source=sources[0], 

4917 target=targets[0], 

4918 distance=float('nan'), 

4919 components=components) 

4920 raise 

4921 result = engine._post_process_statics( 

4922 base_statics, source, target) 

4923 tcounters.append(xtime()) 

4924 

4925 yield (isource, itarget, result), tcounters 

4926 

4927 

4928class LocalEngine(Engine): 

4929 ''' 

4930 Offline synthetic seismogram calculator. 

4931 

4932 :param use_env: if ``True``, fill :py:attr:`store_superdirs` and 

4933 :py:attr:`store_dirs` with paths set in environment variables 

4934 GF_STORE_SUPERDIRS and GF_STORE_DIRS. 

4935 :param use_config: if ``True``, fill :py:attr:`store_superdirs` and 

4936 :py:attr:`store_dirs` with paths set in the user's config file. 

4937 

4938 The config file can be found at :file:`~/.pyrocko/config.pf` 

4939 

4940 .. code-block :: python 

4941 

4942 gf_store_dirs: ['/home/pyrocko/gf_stores/ak135/'] 

4943 gf_store_superdirs: ['/home/pyrocko/gf_stores/'] 

4944 ''' 

4945 

4946 store_superdirs = List.T( 

4947 String.T(), 

4948 help='directories which are searched for Green\'s function stores') 

4949 

4950 store_dirs = List.T( 

4951 String.T(), 

4952 help='additional individual Green\'s function store directories') 

4953 

4954 default_store_id = String.T( 

4955 optional=True, 

4956 help='default store ID to be used when a request does not provide ' 

4957 'one') 

4958 

4959 def __init__(self, **kwargs): 

4960 use_env = kwargs.pop('use_env', False) 

4961 use_config = kwargs.pop('use_config', False) 

4962 Engine.__init__(self, **kwargs) 

4963 if use_env: 

4964 env_store_superdirs = os.environ.get('GF_STORE_SUPERDIRS', '') 

4965 env_store_dirs = os.environ.get('GF_STORE_DIRS', '') 

4966 if env_store_superdirs: 

4967 self.store_superdirs.extend(env_store_superdirs.split(':')) 

4968 

4969 if env_store_dirs: 

4970 self.store_dirs.extend(env_store_dirs.split(':')) 

4971 

4972 if use_config: 

4973 c = config.config() 

4974 self.store_superdirs.extend(c.gf_store_superdirs) 

4975 self.store_dirs.extend(c.gf_store_dirs) 

4976 

4977 self._check_store_dirs_type() 

4978 self._id_to_store_dir = {} 

4979 self._open_stores = {} 

4980 self._effective_default_store_id = None 

4981 

4982 def _check_store_dirs_type(self): 

4983 for sdir in ['store_dirs', 'store_superdirs']: 

4984 if not isinstance(self.__getattribute__(sdir), list): 

4985 raise TypeError("{} of {} is not of type list".format( 

4986 sdir, self.__class__.__name__)) 

4987 

4988 def _get_store_id(self, store_dir): 

4989 store_ = store.Store(store_dir) 

4990 store_id = store_.config.id 

4991 store_.close() 

4992 return store_id 

4993 

4994 def _looks_like_store_dir(self, store_dir): 

4995 return os.path.isdir(store_dir) and \ 

4996 all(os.path.isfile(pjoin(store_dir, x)) for x in 

4997 ('index', 'traces', 'config')) 

4998 

4999 def iter_store_dirs(self): 

5000 store_dirs = set() 

5001 for d in self.store_superdirs: 

5002 if not os.path.exists(d): 

5003 logger.warning('store_superdir not available: %s' % d) 

5004 continue 

5005 

5006 for entry in os.listdir(d): 

5007 store_dir = os.path.realpath(pjoin(d, entry)) 

5008 if self._looks_like_store_dir(store_dir): 

5009 store_dirs.add(store_dir) 

5010 

5011 for store_dir in self.store_dirs: 

5012 store_dirs.add(os.path.realpath(store_dir)) 

5013 

5014 return store_dirs 

5015 

5016 def _scan_stores(self): 

5017 for store_dir in self.iter_store_dirs(): 

5018 store_id = self._get_store_id(store_dir) 

5019 if store_id not in self._id_to_store_dir: 

5020 self._id_to_store_dir[store_id] = store_dir 

5021 else: 

5022 if store_dir != self._id_to_store_dir[store_id]: 

5023 raise DuplicateStoreId( 

5024 'GF store ID %s is used in (at least) two ' 

5025 'different stores. Locations are: %s and %s' % 

5026 (store_id, self._id_to_store_dir[store_id], store_dir)) 

5027 

5028 def get_store_dir(self, store_id): 

5029 ''' 

5030 Lookup directory given a GF store ID. 

5031 ''' 

5032 

5033 if store_id not in self._id_to_store_dir: 

5034 self._scan_stores() 

5035 

5036 if store_id not in self._id_to_store_dir: 

5037 raise NoSuchStore(store_id, self.iter_store_dirs()) 

5038 

5039 return self._id_to_store_dir[store_id] 

5040 

5041 def get_store_ids(self): 

5042 ''' 

5043 Get list of available store IDs. 

5044 ''' 

5045 

5046 self._scan_stores() 

5047 return sorted(self._id_to_store_dir.keys()) 

5048 

5049 def effective_default_store_id(self): 

5050 if self._effective_default_store_id is None: 

5051 if self.default_store_id is None: 

5052 store_ids = self.get_store_ids() 

5053 if len(store_ids) == 1: 

5054 self._effective_default_store_id = self.get_store_ids()[0] 

5055 else: 

5056 raise NoDefaultStoreSet() 

5057 else: 

5058 self._effective_default_store_id = self.default_store_id 

5059 

5060 return self._effective_default_store_id 

5061 

5062 def get_store(self, store_id=None): 

5063 ''' 

5064 Get a store from the engine. 

5065 

5066 :param store_id: identifier of the store (optional) 

5067 :returns: :py:class:`~pyrocko.gf.store.Store` object 

5068 

5069 If no ``store_id`` is provided the store 

5070 associated with the :py:gattr:`default_store_id` is returned. 

5071 Raises :py:exc:`NoDefaultStoreSet` if :py:gattr:`default_store_id` is 

5072 undefined. 

5073 ''' 

5074 

5075 if store_id is None: 

5076 store_id = self.effective_default_store_id() 

5077 

5078 if store_id not in self._open_stores: 

5079 store_dir = self.get_store_dir(store_id) 

5080 self._open_stores[store_id] = store.Store(store_dir) 

5081 

5082 return self._open_stores[store_id] 

5083 

5084 def get_store_config(self, store_id): 

5085 store = self.get_store(store_id) 

5086 return store.config 

5087 

5088 def get_store_extra(self, store_id, key): 

5089 store = self.get_store(store_id) 

5090 return store.get_extra(key) 

5091 

5092 def close_cashed_stores(self): 

5093 ''' 

5094 Close and remove ids from cashed stores. 

5095 ''' 

5096 store_ids = [] 

5097 for store_id, store_ in self._open_stores.items(): 

5098 store_.close() 

5099 store_ids.append(store_id) 

5100 

5101 for store_id in store_ids: 

5102 self._open_stores.pop(store_id) 

5103 

5104 def get_rule(self, source, target): 

5105 cprovided = self.get_store(target.store_id).get_provided_components() 

5106 

5107 if isinstance(target, StaticTarget): 

5108 quantity = target.quantity 

5109 available_rules = static_rules 

5110 elif isinstance(target, Target): 

5111 quantity = target.effective_quantity() 

5112 available_rules = channel_rules 

5113 

5114 try: 

5115 for rule in available_rules[quantity]: 

5116 cneeded = rule.required_components(target) 

5117 if all(c in cprovided for c in cneeded): 

5118 return rule 

5119 

5120 except KeyError: 

5121 pass 

5122 

5123 raise BadRequest( 

5124 'No rule to calculate "%s" with GFs from store "%s" ' 

5125 'for source model "%s".' % ( 

5126 target.effective_quantity(), 

5127 target.store_id, 

5128 source.__class__.__name__)) 

5129 

5130 def _cached_discretize_basesource(self, source, store, cache, target): 

5131 if (source, store) not in cache: 

5132 cache[source, store] = source.discretize_basesource(store, target) 

5133 

5134 return cache[source, store] 

5135 

5136 def base_seismograms(self, source, targets, components, dsource_cache, 

5137 nthreads=0): 

5138 

5139 target = targets[0] 

5140 

5141 interp = set([t.interpolation for t in targets]) 

5142 if len(interp) > 1: 

5143 raise BadRequest('Targets have different interpolation schemes.') 

5144 

5145 rates = set([t.sample_rate for t in targets]) 

5146 if len(rates) > 1: 

5147 raise BadRequest('Targets have different sample rates.') 

5148 

5149 store_ = self.get_store(target.store_id) 

5150 receivers = [t.receiver(store_) for t in targets] 

5151 

5152 if target.sample_rate is not None: 

5153 deltat = 1. / target.sample_rate 

5154 rate = target.sample_rate 

5155 else: 

5156 deltat = None 

5157 rate = store_.config.sample_rate 

5158 

5159 tmin = num.fromiter( 

5160 (t.tmin for t in targets), dtype=float, count=len(targets)) 

5161 tmax = num.fromiter( 

5162 (t.tmax for t in targets), dtype=float, count=len(targets)) 

5163 

5164 itmin = num.floor(tmin * rate).astype(num.int64) 

5165 itmax = num.ceil(tmax * rate).astype(num.int64) 

5166 nsamples = itmax - itmin + 1 

5167 

5168 mask = num.isnan(tmin) 

5169 itmin[mask] = 0 

5170 nsamples[mask] = -1 

5171 

5172 base_source = self._cached_discretize_basesource( 

5173 source, store_, dsource_cache, target) 

5174 

5175 base_seismograms = store_.calc_seismograms( 

5176 base_source, receivers, components, 

5177 deltat=deltat, 

5178 itmin=itmin, nsamples=nsamples, 

5179 interpolation=target.interpolation, 

5180 optimization=target.optimization, 

5181 nthreads=nthreads) 

5182 

5183 for i, base_seismogram in enumerate(base_seismograms): 

5184 base_seismograms[i] = store.make_same_span(base_seismogram) 

5185 

5186 return base_seismograms 

5187 

5188 def base_seismogram(self, source, target, components, dsource_cache, 

5189 nthreads): 

5190 

5191 tcounters = [xtime()] 

5192 

5193 store_ = self.get_store(target.store_id) 

5194 receiver = target.receiver(store_) 

5195 

5196 if target.tmin and target.tmax is not None: 

5197 rate = store_.config.sample_rate 

5198 itmin = int(num.floor(target.tmin * rate)) 

5199 itmax = int(num.ceil(target.tmax * rate)) 

5200 nsamples = itmax - itmin + 1 

5201 else: 

5202 itmin = None 

5203 nsamples = None 

5204 

5205 tcounters.append(xtime()) 

5206 base_source = self._cached_discretize_basesource( 

5207 source, store_, dsource_cache, target) 

5208 

5209 tcounters.append(xtime()) 

5210 

5211 if target.sample_rate is not None: 

5212 deltat = 1. / target.sample_rate 

5213 else: 

5214 deltat = None 

5215 

5216 base_seismogram = store_.seismogram( 

5217 base_source, receiver, components, 

5218 deltat=deltat, 

5219 itmin=itmin, nsamples=nsamples, 

5220 interpolation=target.interpolation, 

5221 optimization=target.optimization, 

5222 nthreads=nthreads) 

5223 

5224 tcounters.append(xtime()) 

5225 

5226 base_seismogram = store.make_same_span(base_seismogram) 

5227 

5228 tcounters.append(xtime()) 

5229 

5230 return base_seismogram, tcounters 

5231 

5232 def base_statics(self, source, target, components, nthreads): 

5233 tcounters = [xtime()] 

5234 store_ = self.get_store(target.store_id) 

5235 

5236 if target.tsnapshot is not None: 

5237 rate = store_.config.sample_rate 

5238 itsnapshot = int(num.floor(target.tsnapshot * rate)) 

5239 else: 

5240 itsnapshot = None 

5241 tcounters.append(xtime()) 

5242 

5243 base_source = source.discretize_basesource(store_, target=target) 

5244 

5245 tcounters.append(xtime()) 

5246 

5247 base_statics = store_.statics( 

5248 base_source, 

5249 target, 

5250 itsnapshot, 

5251 components, 

5252 target.interpolation, 

5253 nthreads) 

5254 

5255 tcounters.append(xtime()) 

5256 

5257 return base_statics, tcounters 

5258 

5259 def _post_process_dynamic(self, base_seismogram, source, target): 

5260 base_any = next(iter(base_seismogram.values())) 

5261 deltat = base_any.deltat 

5262 itmin = base_any.itmin 

5263 

5264 rule = self.get_rule(source, target) 

5265 data = rule.apply_(target, base_seismogram) 

5266 

5267 factor = source.get_factor() * target.get_factor() 

5268 if factor != 1.0: 

5269 data = data * factor 

5270 

5271 stf = source.effective_stf_post() 

5272 

5273 times, amplitudes = stf.discretize_t( 

5274 deltat, 0.0) 

5275 

5276 # repeat end point to prevent boundary effects 

5277 padded_data = num.empty(data.size + amplitudes.size, dtype=float) 

5278 padded_data[:data.size] = data 

5279 padded_data[data.size:] = data[-1] 

5280 data = num.convolve(amplitudes, padded_data) 

5281 

5282 tmin = itmin * deltat + times[0] 

5283 

5284 tr = meta.SeismosizerTrace( 

5285 codes=target.codes, 

5286 data=data[:-amplitudes.size], 

5287 deltat=deltat, 

5288 tmin=tmin) 

5289 

5290 return target.post_process(self, source, tr) 

5291 

5292 def _post_process_statics(self, base_statics, source, starget): 

5293 rule = self.get_rule(source, starget) 

5294 data = rule.apply_(starget, base_statics) 

5295 

5296 factor = source.get_factor() 

5297 if factor != 1.0: 

5298 for v in data.values(): 

5299 v *= factor 

5300 

5301 return starget.post_process(self, source, base_statics) 

5302 

5303 def process(self, *args, **kwargs): 

5304 ''' 

5305 Process a request. 

5306 

5307 :: 

5308 

5309 process(**kwargs) 

5310 process(request, **kwargs) 

5311 process(sources, targets, **kwargs) 

5312 

5313 The request can be given a a :py:class:`Request` object, or such an 

5314 object is created using ``Request(**kwargs)`` for convenience. 

5315 

5316 :returns: :py:class:`Response` object 

5317 ''' 

5318 

5319 if len(args) not in (0, 1, 2): 

5320 raise BadRequest('Invalid arguments.') 

5321 

5322 if len(args) == 1: 

5323 kwargs['request'] = args[0] 

5324 

5325 elif len(args) == 2: 

5326 kwargs.update(Request.args2kwargs(args)) 

5327 

5328 request = kwargs.pop('request', None) 

5329 status_callback = kwargs.pop('status_callback', None) 

5330 calc_timeseries = kwargs.pop('calc_timeseries', True) 

5331 

5332 nprocs = kwargs.pop('nprocs', None) 

5333 nthreads = kwargs.pop('nthreads', 1) 

5334 if nprocs is not None: 

5335 nthreads = nprocs 

5336 

5337 if request is None: 

5338 request = Request(**kwargs) 

5339 

5340 if resource: 

5341 rs0 = resource.getrusage(resource.RUSAGE_SELF) 

5342 rc0 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5343 tt0 = xtime() 

5344 

5345 # make sure stores are open before fork() 

5346 store_ids = set(target.store_id for target in request.targets) 

5347 for store_id in store_ids: 

5348 self.get_store(store_id) 

5349 

5350 source_index = dict((x, i) for (i, x) in 

5351 enumerate(request.sources)) 

5352 target_index = dict((x, i) for (i, x) in 

5353 enumerate(request.targets)) 

5354 

5355 m = request.subrequest_map() 

5356 

5357 skeys = sorted(m.keys(), key=cmp_to_key(cmp_none_aware)) 

5358 results_list = [] 

5359 

5360 for i in range(len(request.sources)): 

5361 results_list.append([None] * len(request.targets)) 

5362 

5363 tcounters_dyn_list = [] 

5364 tcounters_static_list = [] 

5365 nsub = len(skeys) 

5366 isub = 0 

5367 

5368 # Processing dynamic targets through 

5369 # parimap(process_subrequest_dynamic) 

5370 

5371 if calc_timeseries: 

5372 _process_dynamic = process_dynamic_timeseries 

5373 else: 

5374 _process_dynamic = process_dynamic 

5375 

5376 if request.has_dynamic: 

5377 work_dynamic = [ 

5378 (i, nsub, 

5379 [source_index[source] for source in m[k][0]], 

5380 [target_index[target] for target in m[k][1] 

5381 if not isinstance(target, StaticTarget)]) 

5382 for (i, k) in enumerate(skeys)] 

5383 

5384 for ii_results, tcounters_dyn in _process_dynamic( 

5385 work_dynamic, request.sources, request.targets, self, 

5386 nthreads): 

5387 

5388 tcounters_dyn_list.append(num.diff(tcounters_dyn)) 

5389 isource, itarget, result = ii_results 

5390 results_list[isource][itarget] = result 

5391 

5392 if status_callback: 

5393 status_callback(isub, nsub) 

5394 

5395 isub += 1 

5396 

5397 # Processing static targets through process_static 

5398 if request.has_statics: 

5399 work_static = [ 

5400 (i, nsub, 

5401 [source_index[source] for source in m[k][0]], 

5402 [target_index[target] for target in m[k][1] 

5403 if isinstance(target, StaticTarget)]) 

5404 for (i, k) in enumerate(skeys)] 

5405 

5406 for ii_results, tcounters_static in process_static( 

5407 work_static, request.sources, request.targets, self, 

5408 nthreads=nthreads): 

5409 

5410 tcounters_static_list.append(num.diff(tcounters_static)) 

5411 isource, itarget, result = ii_results 

5412 results_list[isource][itarget] = result 

5413 

5414 if status_callback: 

5415 status_callback(isub, nsub) 

5416 

5417 isub += 1 

5418 

5419 if status_callback: 

5420 status_callback(nsub, nsub) 

5421 

5422 tt1 = time.time() 

5423 if resource: 

5424 rs1 = resource.getrusage(resource.RUSAGE_SELF) 

5425 rc1 = resource.getrusage(resource.RUSAGE_CHILDREN) 

5426 

5427 s = ProcessingStats() 

5428 

5429 if request.has_dynamic: 

5430 tcumu_dyn = num.sum(num.vstack(tcounters_dyn_list), axis=0) 

5431 t_dyn = float(num.sum(tcumu_dyn)) 

5432 perc_dyn = map(float, tcumu_dyn / t_dyn * 100.) 

5433 (s.t_perc_get_store_and_receiver, 

5434 s.t_perc_discretize_source, 

5435 s.t_perc_make_base_seismogram, 

5436 s.t_perc_make_same_span, 

5437 s.t_perc_post_process) = perc_dyn 

5438 else: 

5439 t_dyn = 0. 

5440 

5441 if request.has_statics: 

5442 tcumu_static = num.sum(num.vstack(tcounters_static_list), axis=0) 

5443 t_static = num.sum(tcumu_static) 

5444 perc_static = map(float, tcumu_static / t_static * 100.) 

5445 (s.t_perc_static_get_store, 

5446 s.t_perc_static_discretize_basesource, 

5447 s.t_perc_static_sum_statics, 

5448 s.t_perc_static_post_process) = perc_static 

5449 

5450 s.t_wallclock = tt1 - tt0 

5451 if resource: 

5452 s.t_cpu = ( 

5453 (rs1.ru_utime + rs1.ru_stime + rc1.ru_utime + rc1.ru_stime) - 

5454 (rs0.ru_utime + rs0.ru_stime + rc0.ru_utime + rc0.ru_stime)) 

5455 s.n_read_blocks = ( 

5456 (rs1.ru_inblock + rc1.ru_inblock) - 

5457 (rs0.ru_inblock + rc0.ru_inblock)) 

5458 

5459 n_records_stacked = 0. 

5460 for results in results_list: 

5461 for result in results: 

5462 if not isinstance(result, meta.Result): 

5463 continue 

5464 shr = float(result.n_shared_stacking) 

5465 n_records_stacked += result.n_records_stacked / shr 

5466 s.t_perc_optimize += result.t_optimize / shr 

5467 s.t_perc_stack += result.t_stack / shr 

5468 s.n_records_stacked = int(n_records_stacked) 

5469 if t_dyn != 0.: 

5470 s.t_perc_optimize /= t_dyn * 100 

5471 s.t_perc_stack /= t_dyn * 100 

5472 

5473 return Response( 

5474 request=request, 

5475 results_list=results_list, 

5476 stats=s) 

5477 

5478 

5479class RemoteEngine(Engine): 

5480 ''' 

5481 Client for remote synthetic seismogram calculator. 

5482 ''' 

5483 

5484 site = String.T(default=ws.g_default_site, optional=True) 

5485 url = String.T(default=ws.g_url, optional=True) 

5486 

5487 def process(self, request=None, status_callback=None, **kwargs): 

5488 

5489 if request is None: 

5490 request = Request(**kwargs) 

5491 

5492 return ws.seismosizer(url=self.url, site=self.site, request=request) 

5493 

5494 

5495g_engine = None 

5496 

5497 

5498def get_engine(store_superdirs=[]): 

5499 global g_engine 

5500 if g_engine is None: 

5501 g_engine = LocalEngine(use_env=True, use_config=True) 

5502 

5503 for d in store_superdirs: 

5504 if d not in g_engine.store_superdirs: 

5505 g_engine.store_superdirs.append(d) 

5506 

5507 return g_engine 

5508 

5509 

5510class SourceGroup(Object): 

5511 

5512 def __getattr__(self, k): 

5513 return num.fromiter((getattr(s, k) for s in self), 

5514 dtype=float) 

5515 

5516 def __iter__(self): 

5517 raise NotImplementedError( 

5518 'This method should be implemented in subclass.') 

5519 

5520 def __len__(self): 

5521 raise NotImplementedError( 

5522 'This method should be implemented in subclass.') 

5523 

5524 

5525class SourceList(SourceGroup): 

5526 sources = List.T(Source.T()) 

5527 

5528 def append(self, s): 

5529 self.sources.append(s) 

5530 

5531 def __iter__(self): 

5532 return iter(self.sources) 

5533 

5534 def __len__(self): 

5535 return len(self.sources) 

5536 

5537 

5538class SourceGrid(SourceGroup): 

5539 

5540 base = Source.T() 

5541 variables = Dict.T(String.T(), Range.T()) 

5542 order = List.T(String.T()) 

5543 

5544 def __len__(self): 

5545 n = 1 

5546 for (k, v) in self.make_coords(self.base): 

5547 n *= len(list(v)) 

5548 

5549 return n 

5550 

5551 def __iter__(self): 

5552 for items in permudef(self.make_coords(self.base)): 

5553 s = self.base.clone(**{k: v for (k, v) in items}) 

5554 s.regularize() 

5555 yield s 

5556 

5557 def ordered_params(self): 

5558 ks = list(self.variables.keys()) 

5559 for k in self.order + list(self.base.keys()): 

5560 if k in ks: 

5561 yield k 

5562 ks.remove(k) 

5563 if ks: 

5564 raise Exception('Invalid parameter "%s" for source type "%s".' % 

5565 (ks[0], self.base.__class__.__name__)) 

5566 

5567 def make_coords(self, base): 

5568 return [(param, self.variables[param].make(base=base[param])) 

5569 for param in self.ordered_params()] 

5570 

5571 

5572source_classes = [ 

5573 Source, 

5574 SourceWithMagnitude, 

5575 SourceWithDerivedMagnitude, 

5576 ExplosionSource, 

5577 ExplosionLineSource, 

5578 RectangularExplosionSource, 

5579 DCSource, 

5580 CLVDSource, 

5581 VLVDSource, 

5582 MTSource, 

5583 RectangularSource, 

5584 PseudoDynamicRupture, 

5585 DoubleDCSource, 

5586 RingfaultSource, 

5587 CombiSource, 

5588 SFSource, 

5589 PorePressurePointSource, 

5590 PorePressureLineSource, 

5591] 

5592 

5593stf_classes = [ 

5594 STF, 

5595 BoxcarSTF, 

5596 TriangularSTF, 

5597 HalfSinusoidSTF, 

5598 ResonatorSTF, 

5599] 

5600 

5601__all__ = ''' 

5602SeismosizerError 

5603BadRequest 

5604NoSuchStore 

5605DerivedMagnitudeError 

5606STFMode 

5607'''.split() + [S.__name__ for S in source_classes + stf_classes] + ''' 

5608Request 

5609ProcessingStats 

5610Response 

5611Engine 

5612LocalEngine 

5613RemoteEngine 

5614source_classes 

5615get_engine 

5616Range 

5617SourceGroup 

5618SourceList 

5619SourceGrid 

5620map_anchor 

5621'''.split()