1# https://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7This module provides basic signal processing for seismic traces. 

8''' 

9 

10import time 

11import math 

12import copy 

13import logging 

14import hashlib 

15from collections import defaultdict 

16 

17import numpy as num 

18from scipy import signal 

19 

20from pyrocko import util, orthodrome, pchain, model, signal_ext 

21from pyrocko.util import reuse 

22from pyrocko.guts import Object, Float, Int, String, List, \ 

23 StringChoice, Timestamp 

24from pyrocko.guts_array import Array 

25from pyrocko.model import squirrel_content 

26 

27# backward compatibility 

28from pyrocko.util import UnavailableDecimation # noqa 

29from pyrocko.response import ( # noqa 

30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

31 ButterworthResponse, SampledResponse, IntegrationResponse, 

32 DifferentiationResponse, MultiplyResponse) 

33 

34 

35guts_prefix = 'pf' 

36 

37logger = logging.getLogger('pyrocko.trace') 

38 

39 

40g_tapered_coeffs_cache = {} 

41g_one_response = FrequencyResponse() 

42 

43 

44@squirrel_content 

45class Trace(Object): 

46 

47 ''' 

48 Create new trace object. 

49 

50 A ``Trace`` object represents a single continuous strip of evenly sampled 

51 time series data. It is built from a 1D NumPy array containing the data 

52 samples and some attributes describing its beginning and ending time, its 

53 sampling rate and four string identifiers (its network, station, location 

54 and channel code). 

55 

56 :param network: network code 

57 :param station: station code 

58 :param location: location code 

59 :param channel: channel code 

60 :param extra: extra code 

61 :param tmin: system time of first sample in [s] 

62 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

63 computed from length of ``ydata``) 

64 :param deltat: sampling interval in [s] 

65 :param ydata: 1D numpy array with data samples (can be ``None`` when 

66 ``tmax`` is not ``None``) 

67 :param mtime: optional modification time 

68 

69 :param meta: additional meta information (not used, but maintained by the 

70 library) 

71 

72 The length of the network, station, location and channel codes is not 

73 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

74 have different limits on the lengths of these codes. The codes set here are 

75 silently truncated when the trace is stored 

76 ''' 

77 

78 network = String.T(default='', help='Deployment/network code (1-8)') 

79 station = String.T(default='STA', help='Station code (1-5)') 

80 location = String.T(default='', help='Location code (0-2)') 

81 channel = String.T(default='', help='Channel code (3)') 

82 extra = String.T(default='', help='Extra/custom code') 

83 

84 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

85 tmax = Timestamp.T() 

86 

87 deltat = Float.T(default=1.0) 

88 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

89 

90 mtime = Timestamp.T(optional=True) 

91 

92 cached_frequencies = {} 

93 

94 def __init__(self, network='', station='STA', location='', channel='', 

95 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

96 meta=None, extra=''): 

97 

98 Object.__init__(self, init_props=False) 

99 

100 time_float = util.get_time_float() 

101 

102 if not isinstance(tmin, time_float): 

103 tmin = Trace.tmin.regularize_extra(tmin) 

104 

105 if tmax is not None and not isinstance(tmax, time_float): 

106 tmax = Trace.tmax.regularize_extra(tmax) 

107 

108 if mtime is not None and not isinstance(mtime, time_float): 

109 mtime = Trace.mtime.regularize_extra(mtime) 

110 

111 if ydata is not None and not isinstance(ydata, num.ndarray): 

112 ydata = Trace.ydata.regularize_extra(ydata) 

113 

114 self._growbuffer = None 

115 

116 tmin = time_float(tmin) 

117 if tmax is not None: 

118 tmax = time_float(tmax) 

119 

120 if mtime is None: 

121 mtime = time_float(time.time()) 

122 

123 self.network, self.station, self.location, self.channel, \ 

124 self.extra = [ 

125 reuse(x) for x in ( 

126 network, station, location, channel, extra)] 

127 

128 self.tmin = tmin 

129 self.deltat = deltat 

130 

131 if tmax is None: 

132 if ydata is not None: 

133 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

134 else: 

135 raise Exception( 

136 'fixme: trace must be created with tmax or ydata') 

137 else: 

138 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

139 self.tmax = self.tmin + (n - 1) * self.deltat 

140 

141 self.meta = meta 

142 self.ydata = ydata 

143 self.mtime = mtime 

144 self._update_ids() 

145 self.file = None 

146 self._pchain = None 

147 

148 def __str__(self): 

149 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

150 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

151 s += ' timerange: %s - %s\n' % ( 

152 util.time_to_str(self.tmin, format=fmt), 

153 util.time_to_str(self.tmax, format=fmt)) 

154 

155 s += ' delta t: %g\n' % self.deltat 

156 if self.meta: 

157 for k in sorted(self.meta.keys()): 

158 s += ' %s: %s\n' % (k, self.meta[k]) 

159 return s 

160 

161 @property 

162 def codes(self): 

163 from pyrocko.squirrel import model 

164 return model.CodesNSLCE( 

165 self.network, self.station, self.location, self.channel, 

166 self.extra) 

167 

168 @property 

169 def time_span(self): 

170 return self.tmin, self.tmax 

171 

172 @property 

173 def summary_entries(self): 

174 return ( 

175 self.__class__.__name__, 

176 str(self.codes), 

177 self.str_time_span, 

178 '%g' % (1.0/self.deltat)) 

179 

180 @property 

181 def summary_stats_entries(self): 

182 return tuple('%13.7g' % v for v in ( 

183 self.ydata.min(), 

184 self.ydata.max(), 

185 self.ydata.mean(), 

186 self.ydata.std())) 

187 

188 @property 

189 def summary(self): 

190 return util.fmt_summary(self.summary_entries, (10, 20, 55, 0)) 

191 

192 @property 

193 def summary_stats(self): 

194 return self.summary + ' | ' + util.fmt_summary( 

195 self.summary_stats_entries, (12, 12, 12, 12)) 

196 

197 def __getstate__(self): 

198 return (self.network, self.station, self.location, self.channel, 

199 self.tmin, self.tmax, self.deltat, self.mtime, 

200 self.ydata, self.meta, self.extra) 

201 

202 def __setstate__(self, state): 

203 if len(state) == 11: 

204 self.network, self.station, self.location, self.channel, \ 

205 self.tmin, self.tmax, self.deltat, self.mtime, \ 

206 self.ydata, self.meta, self.extra = state 

207 

208 elif len(state) == 12: 

209 # backward compatibility 0 

210 self.network, self.station, self.location, self.channel, \ 

211 self.tmin, self.tmax, self.deltat, self.mtime, \ 

212 self.ydata, self.meta, _, self.extra = state 

213 

214 elif len(state) == 10: 

215 # backward compatibility 1 

216 self.network, self.station, self.location, self.channel, \ 

217 self.tmin, self.tmax, self.deltat, self.mtime, \ 

218 self.ydata, self.meta = state 

219 

220 self.extra = '' 

221 

222 else: 

223 # backward compatibility 2 

224 self.network, self.station, self.location, self.channel, \ 

225 self.tmin, self.tmax, self.deltat, self.mtime = state 

226 self.ydata = None 

227 self.meta = None 

228 self.extra = '' 

229 

230 self._growbuffer = None 

231 self._update_ids() 

232 

233 def hash(self, unsafe=False): 

234 sha1 = hashlib.sha1() 

235 for code in self.nslc_id: 

236 sha1.update(code.encode()) 

237 sha1.update(self.tmin.hex().encode()) 

238 sha1.update(self.tmax.hex().encode()) 

239 sha1.update(self.deltat.hex().encode()) 

240 

241 if self.ydata is not None: 

242 if not unsafe: 

243 sha1.update(memoryview(self.ydata)) 

244 else: 

245 sha1.update(memoryview(self.ydata[:32])) 

246 sha1.update(memoryview(self.ydata[-32:])) 

247 

248 return sha1.hexdigest() 

249 

250 def name(self): 

251 ''' 

252 Get a short string description. 

253 ''' 

254 

255 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

256 util.time_to_str(self.tmin), 

257 util.time_to_str(self.tmax))) 

258 

259 return s 

260 

261 def __eq__(self, other): 

262 return ( 

263 isinstance(other, Trace) 

264 and self.network == other.network 

265 and self.station == other.station 

266 and self.location == other.location 

267 and self.channel == other.channel 

268 and (abs(self.deltat - other.deltat) 

269 < (self.deltat + other.deltat)*1e-6) 

270 and abs(self.tmin-other.tmin) < self.deltat*0.01 

271 and abs(self.tmax-other.tmax) < self.deltat*0.01 

272 and num.all(self.ydata == other.ydata)) 

273 

274 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

275 return ( 

276 self.network == other.network 

277 and self.station == other.station 

278 and self.location == other.location 

279 and self.channel == other.channel 

280 and (abs(self.deltat - other.deltat) 

281 < (self.deltat + other.deltat)*1e-6) 

282 and abs(self.tmin-other.tmin) < self.deltat*0.01 

283 and abs(self.tmax-other.tmax) < self.deltat*0.01 

284 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

285 

286 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

287 

288 assert self.network == other.network, \ 

289 'network codes differ: %s, %s' % (self.network, other.network) 

290 assert self.station == other.station, \ 

291 'station codes differ: %s, %s' % (self.station, other.station) 

292 assert self.location == other.location, \ 

293 'location codes differ: %s, %s' % (self.location, other.location) 

294 assert self.channel == other.channel, 'channel codes differ' 

295 assert (abs(self.deltat - other.deltat) 

296 < (self.deltat + other.deltat)*1e-6), \ 

297 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

298 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

299 'start times differ: %s, %s' % ( 

300 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

301 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

302 'end times differ: %s, %s' % ( 

303 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

304 

305 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

306 'trace values differ' 

307 

308 def __hash__(self): 

309 return id(self) 

310 

311 def __call__(self, t, clip=False, snap=round): 

312 it = int(snap((t-self.tmin)/self.deltat)) 

313 if clip: 

314 it = max(0, min(it, self.ydata.size-1)) 

315 else: 

316 if it < 0 or self.ydata.size <= it: 

317 raise IndexError() 

318 

319 return self.tmin+it*self.deltat, self.ydata[it] 

320 

321 def interpolate(self, t, clip=False): 

322 ''' 

323 Value of trace between supporting points through linear interpolation. 

324 

325 :param t: time instant 

326 :param clip: whether to clip indices to trace ends 

327 ''' 

328 

329 t0, y0 = self(t, clip=clip, snap=math.floor) 

330 t1, y1 = self(t, clip=clip, snap=math.ceil) 

331 if t0 == t1: 

332 return y0 

333 else: 

334 return y0+(t-t0)/(t1-t0)*(y1-y0) 

335 

336 def index_clip(self, i): 

337 ''' 

338 Clip index to valid range. 

339 ''' 

340 

341 return min(max(0, i), self.ydata.size) 

342 

343 def add(self, other, interpolate=True, left=0., right=0.): 

344 ''' 

345 Add values of other trace (self += other). 

346 

347 Add values of ``other`` trace to the values of ``self``, where it 

348 intersects with ``other``. This method does not change the extent of 

349 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

350 ``other`` to be added are interpolated at sampling instants of 

351 ``self``. Linear interpolation is performed. In this case the sampling 

352 rate of ``other`` must be equal to or lower than that of ``self``. If 

353 ``interpolate`` is ``False``, the sampling rates of the two traces must 

354 match. 

355 ''' 

356 

357 if interpolate: 

358 assert self.deltat <= other.deltat \ 

359 or same_sampling_rate(self, other) 

360 

361 other_xdata = other.get_xdata() 

362 xdata = self.get_xdata() 

363 self.ydata += num.interp( 

364 xdata, other_xdata, other.ydata, left=left, right=left) 

365 else: 

366 assert self.deltat == other.deltat 

367 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

368 ibeg = max(0, ioff) 

369 iend = min(self.data_len(), ioff+other.data_len()) 

370 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

371 

372 def mult(self, other, interpolate=True): 

373 ''' 

374 Muliply with values of other trace ``(self *= other)``. 

375 

376 Multiply values of ``other`` trace to the values of ``self``, where it 

377 intersects with ``other``. This method does not change the extent of 

378 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

379 ``other`` to be multiplied are interpolated at sampling instants of 

380 ``self``. Linear interpolation is performed. In this case the sampling 

381 rate of ``other`` must be equal to or lower than that of ``self``. If 

382 ``interpolate`` is ``False``, the sampling rates of the two traces must 

383 match. 

384 ''' 

385 

386 if interpolate: 

387 assert self.deltat <= other.deltat or \ 

388 same_sampling_rate(self, other) 

389 

390 other_xdata = other.get_xdata() 

391 xdata = self.get_xdata() 

392 self.ydata *= num.interp( 

393 xdata, other_xdata, other.ydata, left=0., right=0.) 

394 else: 

395 assert self.deltat == other.deltat 

396 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

397 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

398 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

399 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

400 

401 ibeg1 = self.index_clip(ibeg1) 

402 iend1 = self.index_clip(iend1) 

403 ibeg2 = self.index_clip(ibeg2) 

404 iend2 = self.index_clip(iend2) 

405 

406 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

407 

408 def max(self): 

409 ''' 

410 Get time and value of data maximum. 

411 ''' 

412 

413 i = num.argmax(self.ydata) 

414 return self.tmin + i*self.deltat, self.ydata[i] 

415 

416 def min(self): 

417 ''' 

418 Get time and value of data minimum. 

419 ''' 

420 

421 i = num.argmin(self.ydata) 

422 return self.tmin + i*self.deltat, self.ydata[i] 

423 

424 def absmax(self): 

425 ''' 

426 Get time and value of maximum of the absolute of data. 

427 ''' 

428 

429 tmi, mi = self.min() 

430 tma, ma = self.max() 

431 if abs(mi) > abs(ma): 

432 return tmi, abs(mi) 

433 else: 

434 return tma, abs(ma) 

435 

436 def set_codes( 

437 self, network=None, station=None, location=None, channel=None, 

438 extra=None): 

439 

440 ''' 

441 Set network, station, location, and channel codes. 

442 ''' 

443 

444 if network is not None: 

445 self.network = network 

446 if station is not None: 

447 self.station = station 

448 if location is not None: 

449 self.location = location 

450 if channel is not None: 

451 self.channel = channel 

452 if extra is not None: 

453 self.extra = extra 

454 

455 self._update_ids() 

456 

457 def set_network(self, network): 

458 self.network = network 

459 self._update_ids() 

460 

461 def set_station(self, station): 

462 self.station = station 

463 self._update_ids() 

464 

465 def set_location(self, location): 

466 self.location = location 

467 self._update_ids() 

468 

469 def set_channel(self, channel): 

470 self.channel = channel 

471 self._update_ids() 

472 

473 def overlaps(self, tmin, tmax): 

474 ''' 

475 Check if trace has overlap with a given time span. 

476 ''' 

477 return not (tmax < self.tmin or self.tmax < tmin) 

478 

479 def is_relevant(self, tmin, tmax, selector=None): 

480 ''' 

481 Check if trace has overlap with a given time span and matches a 

482 condition callback. (internal use) 

483 ''' 

484 

485 return not (tmax <= self.tmin or self.tmax < tmin) \ 

486 and (selector is None or selector(self)) 

487 

488 def _update_ids(self): 

489 ''' 

490 Update dependent ids. 

491 ''' 

492 

493 self.full_id = ( 

494 self.network, self.station, self.location, self.channel, self.tmin) 

495 self.nslc_id = reuse( 

496 (self.network, self.station, self.location, self.channel)) 

497 

498 def prune_from_reuse_cache(self): 

499 util.deuse(self.nslc_id) 

500 util.deuse(self.network) 

501 util.deuse(self.station) 

502 util.deuse(self.location) 

503 util.deuse(self.channel) 

504 

505 def set_mtime(self, mtime): 

506 ''' 

507 Set modification time of the trace. 

508 ''' 

509 

510 self.mtime = mtime 

511 

512 def get_xdata(self): 

513 ''' 

514 Create array for time axis. 

515 ''' 

516 

517 if self.ydata is None: 

518 raise NoData() 

519 

520 return self.tmin \ 

521 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

522 

523 def get_ydata(self): 

524 ''' 

525 Get data array. 

526 ''' 

527 

528 if self.ydata is None: 

529 raise NoData() 

530 

531 return self.ydata 

532 

533 def set_ydata(self, new_ydata): 

534 ''' 

535 Replace data array. 

536 ''' 

537 

538 self.drop_growbuffer() 

539 self.ydata = new_ydata 

540 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

541 

542 def data_len(self): 

543 if self.ydata is not None: 

544 return self.ydata.size 

545 else: 

546 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

547 

548 def drop_data(self): 

549 ''' 

550 Forget data, make dataless trace. 

551 ''' 

552 

553 self.drop_growbuffer() 

554 self.ydata = None 

555 

556 def drop_growbuffer(self): 

557 ''' 

558 Detach the traces grow buffer. 

559 ''' 

560 

561 self._growbuffer = None 

562 self._pchain = None 

563 

564 def copy(self, data=True): 

565 ''' 

566 Make a deep copy of the trace. 

567 ''' 

568 

569 tracecopy = copy.copy(self) 

570 tracecopy.drop_growbuffer() 

571 if data: 

572 tracecopy.ydata = self.ydata.copy() 

573 tracecopy.meta = copy.deepcopy(self.meta) 

574 return tracecopy 

575 

576 def crop_zeros(self): 

577 ''' 

578 Remove any zeros at beginning and end. 

579 ''' 

580 

581 indices = num.where(self.ydata != 0.0)[0] 

582 if indices.size == 0: 

583 raise NoData() 

584 

585 ibeg = indices[0] 

586 iend = indices[-1]+1 

587 if ibeg == 0 and iend == self.ydata.size-1: 

588 return 

589 

590 self.drop_growbuffer() 

591 self.ydata = self.ydata[ibeg:iend].copy() 

592 self.tmin = self.tmin+ibeg*self.deltat 

593 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

594 self._update_ids() 

595 

596 def append(self, data): 

597 ''' 

598 Append data to the end of the trace. 

599 

600 To make this method efficient when successively very few or even single 

601 samples are appended, a larger grow buffer is allocated upon first 

602 invocation. The traces data is then changed to be a view into the 

603 currently filled portion of the grow buffer array. 

604 ''' 

605 

606 assert self.ydata.dtype == data.dtype 

607 newlen = data.size + self.ydata.size 

608 if self._growbuffer is None or self._growbuffer.size < newlen: 

609 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

610 self._growbuffer[:self.ydata.size] = self.ydata 

611 self._growbuffer[self.ydata.size:newlen] = data 

612 self.ydata = self._growbuffer[:newlen] 

613 self.tmax = self.tmin + (newlen-1)*self.deltat 

614 

615 def chop( 

616 self, tmin, tmax, inplace=True, include_last=False, 

617 snap=(round, round), want_incomplete=True): 

618 

619 ''' 

620 Cut the trace to given time span. 

621 

622 If the ``inplace`` argument is True (the default) the trace is cut in 

623 place, otherwise a new trace with the cut part is returned. By 

624 default, the indices where to start and end the trace data array are 

625 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

626 using Python's :py:func:`round` function. This behaviour can be changed 

627 with the ``snap`` argument, which takes a tuple of two functions (one 

628 for the lower and one for the upper end) to be used instead of 

629 :py:func:`round`. The last sample is by default not included unless 

630 ``include_last`` is set to True. If the given time span exceeds the 

631 available time span of the trace, the available part is returned, 

632 unless ``want_incomplete`` is set to False - in that case, a 

633 :py:exc:`NoData` exception is raised. This exception is always raised, 

634 when the requested time span does dot overlap with the trace's time 

635 span. 

636 ''' 

637 

638 if want_incomplete: 

639 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

640 raise NoData() 

641 else: 

642 if tmin < self.tmin or self.tmax < tmax: 

643 raise NoData() 

644 

645 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

646 iplus = 0 

647 if include_last: 

648 iplus = 1 

649 

650 iend = min( 

651 self.data_len(), 

652 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

653 

654 if ibeg >= iend: 

655 raise NoData() 

656 

657 obj = self 

658 if not inplace: 

659 obj = self.copy(data=False) 

660 

661 self.drop_growbuffer() 

662 if self.ydata is not None: 

663 obj.ydata = self.ydata[ibeg:iend].copy() 

664 else: 

665 obj.ydata = None 

666 

667 obj.tmin = obj.tmin+ibeg*obj.deltat 

668 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

669 

670 obj._update_ids() 

671 

672 return obj 

673 

674 def downsample( 

675 self, ndecimate, snap=False, demean=False, ftype='fir-remez', 

676 cut=False): 

677 

678 ''' 

679 Downsample (decimate) trace by a given integer factor. 

680 

681 Antialiasing filter details: 

682 

683 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum 

684 ripple of 0.05 dB in the passband. 

685 * ``fir``: A FIR filter using a Hamming window and 31 taps and a 

686 well-behaved phase response. 

687 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm 

688 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency. 

689 

690 Comparison of the digital filters: 

691 

692 .. figure :: ../../static/downsampling-filter-comparison.png 

693 :width: 60% 

694 :alt: Comparison of the downsampling filters. 

695 

696 See also :py:meth:`Trace.downsample_to`. 

697 

698 :param ndecimate: 

699 Decimation factor, avoid values larger than 8. 

700 :type ndecimate: 

701 int 

702 

703 :param snap: 

704 Whether to put the new sampling instants closest to multiples of 

705 the sampling rate (according to absolute time). 

706 :type snap: 

707 bool 

708 

709 :param demean: 

710 Whether to demean the signal before filtering. 

711 :type demean: 

712 bool 

713 

714 :param ftype: 

715 Which FIR filter to use, choose from ``'iir'``, ``'fir'``, 

716 ``'fir-remez'``. Default is ``'fir-remez'``. 

717 

718 :param cut: 

719 Whether to cut off samples in the beginning of the trace which 

720 are polluted by artifacts of the anti-aliasing filter. 

721 :type cut: 

722 bool 

723 ''' 

724 newdeltat = self.deltat*ndecimate 

725 b, a, n = util.decimate_coeffs(ndecimate, None, ftype) 

726 if snap: 

727 ilag = int(round((math.ceil( 

728 (self.tmin+(n//2 if cut else 0)*self.deltat) / 

729 newdeltat) * newdeltat - self.tmin) / self.deltat)) 

730 else: 

731 ilag = (n//2 if cut else 0) 

732 

733 data = self.ydata.astype(num.float64) 

734 if data.size != 0: 

735 if demean: 

736 data -= num.mean(data) 

737 y = signal.lfilter(b, a, data) 

738 self.ydata = y[n//2+ilag::ndecimate].copy() 

739 else: 

740 self.ydata = data 

741 

742 self.tmin += ilag * self.deltat 

743 self.deltat = reuse(self.deltat*ndecimate) 

744 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

745 self._update_ids() 

746 

747 def downsample_to( 

748 self, deltat, snap=False, allow_upsample_max=1, demean=False, 

749 ftype='fir-remez', cut=False): 

750 

751 ''' 

752 Downsample to given sampling rate. 

753 

754 Tries to downsample the trace to a target sampling interval of 

755 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

756 times. If ``allow_upsample_max`` is set to a value larger than 1, 

757 intermediate upsampling steps are allowed, in order to increase the 

758 number of possible downsampling ratios. 

759 

760 If the requested ratio is not supported, an exception of type 

761 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

762 

763 The downsampled trace will be shorter than the input trace because the 

764 anti-aliasing filter introduces edge effects. If `cut` is selected, 

765 additionally, polluted samples in the beginning of the trace are 

766 removed. The approximate amount of cutoff which will be produced by a 

767 given downsampling configuration can be estimated with 

768 :py:func:`downsample_tpad`. 

769 

770 See also: :meth:`Trace.downample` 

771 

772 :param deltat: 

773 Desired sampling interval in [s]. 

774 :type deltat: 

775 float 

776 

777 :param allow_upsample_max: 

778 Maximum allowed upsampling factor of the signal to achieve the 

779 desired sampling interval. Default is ``1``. 

780 :type allow_upsample_max: 

781 int 

782 

783 :param snap: 

784 Whether to put the new sampling instants closest to multiples of 

785 the sampling rate (according to absolute time). 

786 :type snap: 

787 bool 

788 

789 :param demean: 

790 Whether to demean the signal before filtering. 

791 :type demean: 

792 bool 

793 

794 :param ftype: 

795 Which FIR filter to use, choose from ``'iir'``, ``'fir'``, 

796 ``'fir-remez'``. Default is ``'fir-remez'``. 

797 

798 :param cut: 

799 Whether to cut off samples in the beginning of the trace which 

800 are polluted by artifacts of the anti-aliasing filter. 

801 :type demean: 

802 bool 

803 ''' 

804 

805 upsratio, deci_seq = _configure_downsampling( 

806 self.deltat, deltat, allow_upsample_max) 

807 

808 if demean: 

809 self.drop_growbuffer() 

810 self.ydata = self.ydata.astype(num.float64) 

811 self.ydata -= num.mean(self.ydata) 

812 

813 if upsratio > 1: 

814 self.drop_growbuffer() 

815 ydata = self.ydata 

816 self.ydata = num.zeros( 

817 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

818 self.ydata[::upsratio] = ydata 

819 for i in range(1, upsratio): 

820 self.ydata[i::upsratio] = \ 

821 float(i)/upsratio * ydata[:-1] \ 

822 + float(upsratio-i)/upsratio * ydata[1:] 

823 self.deltat = self.deltat/upsratio 

824 

825 for i, ndecimate in enumerate(deci_seq): 

826 self.downsample( 

827 ndecimate, snap=snap, demean=False, ftype=ftype, cut=cut) 

828 

829 def resample(self, deltat): 

830 ''' 

831 Resample to given sampling rate ``deltat``. 

832 

833 Resampling is performed in the frequency domain. 

834 ''' 

835 

836 ndata = self.ydata.size 

837 ntrans = nextpow2(ndata) 

838 fntrans2 = ntrans * self.deltat/deltat 

839 ntrans2 = int(round(fntrans2)) 

840 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

841 ndata2 = int(round(ndata*self.deltat/deltat2)) 

842 if abs(fntrans2 - ntrans2) > 1e-7: 

843 logger.warning( 

844 'resample: requested deltat %g could not be matched exactly: ' 

845 '%g' % (deltat, deltat2)) 

846 

847 data = self.ydata 

848 data_pad = num.zeros(ntrans, dtype=float) 

849 data_pad[:ndata] = data 

850 fdata = num.fft.rfft(data_pad) 

851 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

852 n = min(fdata.size, fdata2.size) 

853 fdata2[:n] = fdata[:n] 

854 data2 = num.fft.irfft(fdata2) 

855 data2 = data2[:ndata2] 

856 data2 *= float(ntrans2) / float(ntrans) 

857 self.deltat = deltat2 

858 self.set_ydata(data2) 

859 

860 def resample_simple(self, deltat): 

861 tyear = 3600*24*365. 

862 

863 if deltat == self.deltat: 

864 return 

865 

866 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

867 logger.warning( 

868 'resample_simple: less than one sample would have to be ' 

869 'inserted/deleted per year. Doing nothing.') 

870 return 

871 

872 ninterval = int(round(deltat / (self.deltat - deltat))) 

873 if abs(ninterval) < 20: 

874 logger.error( 

875 'resample_simple: sample insertion/deletion interval less ' 

876 'than 20. results would be erroneous.') 

877 raise ResamplingFailed() 

878 

879 delete = False 

880 if ninterval < 0: 

881 ninterval = - ninterval 

882 delete = True 

883 

884 tyearbegin = util.year_start(self.tmin) 

885 

886 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

887 

888 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

889 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

890 if nindices > 0: 

891 indices = ibegin + num.arange(nindices) * ninterval 

892 data_split = num.split(self.ydata, indices) 

893 data = [] 

894 for ln, h in zip(data_split[:-1], data_split[1:]): 

895 if delete: 

896 ln = ln[:-1] 

897 

898 data.append(ln) 

899 if not delete: 

900 if ln.size == 0: 

901 v = h[0] 

902 else: 

903 v = 0.5*(ln[-1] + h[0]) 

904 data.append(num.array([v], dtype=ln.dtype)) 

905 

906 data.append(data_split[-1]) 

907 

908 ydata_new = num.concatenate(data) 

909 

910 self.tmin = tyearbegin + nmin * deltat 

911 self.deltat = deltat 

912 self.set_ydata(ydata_new) 

913 

914 def stretch(self, tmin_new, tmax_new): 

915 ''' 

916 Stretch signal while preserving sample rate using sinc interpolation. 

917 

918 :param tmin_new: new time of first sample 

919 :param tmax_new: new time of last sample 

920 

921 This method can be used to correct for a small linear time drift or to 

922 introduce sub-sample time shifts. The amount of stretching is limited 

923 to 10% by the implementation and is expected to be much smaller than 

924 that by the approximations used. 

925 ''' 

926 

927 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

928 t_control = num.array([tmin_new, tmax_new], dtype=float) 

929 

930 r = (tmax_new - tmin_new) / self.deltat + 1.0 

931 n_new = int(round(r)) 

932 if abs(n_new - r) > 0.001: 

933 n_new = int(math.floor(r)) 

934 

935 assert n_new >= 2 

936 

937 tmax_new = tmin_new + (n_new-1) * self.deltat 

938 

939 ydata_new = num.empty(n_new, dtype=float) 

940 signal_ext.antidrift(i_control, t_control, 

941 self.ydata.astype(float), 

942 tmin_new, self.deltat, ydata_new) 

943 

944 self.tmin = tmin_new 

945 self.set_ydata(ydata_new) 

946 self._update_ids() 

947 

948 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

949 raise_exception=False): 

950 

951 ''' 

952 Check if a given frequency is above the Nyquist frequency of the trace. 

953 

954 :param intro: string used to introduce the warning/error message 

955 :param warn: whether to emit a warning 

956 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

957 exception. 

958 ''' 

959 

960 if frequency >= 0.5/self.deltat: 

961 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

962 'frequency (%g Hz). (Trace %s)' \ 

963 % (intro, frequency, 0.5/self.deltat, self.name()) 

964 if warn: 

965 logger.warning(message) 

966 if raise_exception: 

967 raise AboveNyquist(message) 

968 

969 def lowpass(self, order, corner, nyquist_warn=True, 

970 nyquist_exception=False, demean=True): 

971 

972 ''' 

973 Apply Butterworth lowpass to the trace. 

974 

975 :param order: order of the filter 

976 :param corner: corner frequency of the filter 

977 

978 Mean is removed before filtering. 

979 ''' 

980 

981 self.nyquist_check( 

982 corner, 'Corner frequency of lowpass', nyquist_warn, 

983 nyquist_exception) 

984 

985 (b, a) = _get_cached_filter_coeffs( 

986 order, [corner*2.0*self.deltat], btype='low') 

987 

988 if len(a) != order+1 or len(b) != order+1: 

989 logger.warning( 

990 'Erroneous filter coefficients returned by ' 

991 'scipy.signal.butter(). You may need to downsample the ' 

992 'signal before filtering.') 

993 

994 data = self.ydata.astype(num.float64) 

995 if demean: 

996 data -= num.mean(data) 

997 self.drop_growbuffer() 

998 self.ydata = signal.lfilter(b, a, data) 

999 

1000 def highpass(self, order, corner, nyquist_warn=True, 

1001 nyquist_exception=False, demean=True): 

1002 

1003 ''' 

1004 Apply butterworth highpass to the trace. 

1005 

1006 :param order: order of the filter 

1007 :param corner: corner frequency of the filter 

1008 

1009 Mean is removed before filtering. 

1010 ''' 

1011 

1012 self.nyquist_check( 

1013 corner, 'Corner frequency of highpass', nyquist_warn, 

1014 nyquist_exception) 

1015 

1016 (b, a) = _get_cached_filter_coeffs( 

1017 order, [corner*2.0*self.deltat], btype='high') 

1018 

1019 data = self.ydata.astype(num.float64) 

1020 if len(a) != order+1 or len(b) != order+1: 

1021 logger.warning( 

1022 'Erroneous filter coefficients returned by ' 

1023 'scipy.signal.butter(). You may need to downsample the ' 

1024 'signal before filtering.') 

1025 if demean: 

1026 data -= num.mean(data) 

1027 self.drop_growbuffer() 

1028 self.ydata = signal.lfilter(b, a, data) 

1029 

1030 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

1031 ''' 

1032 Apply butterworth bandpass to the trace. 

1033 

1034 :param order: order of the filter 

1035 :param corner_hp: lower corner frequency of the filter 

1036 :param corner_lp: upper corner frequency of the filter 

1037 

1038 Mean is removed before filtering. 

1039 ''' 

1040 

1041 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

1042 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

1043 (b, a) = _get_cached_filter_coeffs( 

1044 order, 

1045 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1046 btype='band') 

1047 data = self.ydata.astype(num.float64) 

1048 if demean: 

1049 data -= num.mean(data) 

1050 self.drop_growbuffer() 

1051 self.ydata = signal.lfilter(b, a, data) 

1052 

1053 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1054 ''' 

1055 Apply bandstop (attenuates frequencies in band) to the trace. 

1056 

1057 :param order: order of the filter 

1058 :param corner_hp: lower corner frequency of the filter 

1059 :param corner_lp: upper corner frequency of the filter 

1060 

1061 Mean is removed before filtering. 

1062 ''' 

1063 

1064 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1065 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1066 (b, a) = _get_cached_filter_coeffs( 

1067 order, 

1068 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1069 btype='bandstop') 

1070 data = self.ydata.astype(num.float64) 

1071 if demean: 

1072 data -= num.mean(data) 

1073 self.drop_growbuffer() 

1074 self.ydata = signal.lfilter(b, a, data) 

1075 

1076 def envelope(self, inplace=True): 

1077 ''' 

1078 Calculate the envelope of the trace. 

1079 

1080 :param inplace: calculate envelope in place 

1081 

1082 The calculation follows: 

1083 

1084 .. math:: 

1085 

1086 Y' = \\sqrt{Y^2+H(Y)^2} 

1087 

1088 where H is the Hilbert-Transform of the signal Y. 

1089 ''' 

1090 

1091 y = self.ydata.astype(float) 

1092 env = num.abs(hilbert(y)) 

1093 if inplace: 

1094 self.drop_growbuffer() 

1095 self.ydata = env 

1096 else: 

1097 tr = self.copy(data=False) 

1098 tr.ydata = env 

1099 return tr 

1100 

1101 def taper(self, taperer, inplace=True, chop=False): 

1102 ''' 

1103 Apply a :py:class:`Taper` to the trace. 

1104 

1105 :param taperer: instance of :py:class:`Taper` subclass 

1106 :param inplace: apply taper inplace 

1107 :param chop: if ``True``: exclude tapered parts from the resulting 

1108 trace 

1109 ''' 

1110 

1111 if not inplace: 

1112 tr = self.copy() 

1113 else: 

1114 tr = self 

1115 

1116 if chop: 

1117 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1118 tr.shift(i*tr.deltat) 

1119 tr.set_ydata(tr.ydata[i:i+n]) 

1120 

1121 taperer(tr.ydata, tr.tmin, tr.deltat) 

1122 

1123 if not inplace: 

1124 return tr 

1125 

1126 def whiten(self, order=6): 

1127 ''' 

1128 Whiten signal in time domain using autoregression and recursive filter. 

1129 

1130 :param order: order of the autoregression process 

1131 ''' 

1132 

1133 b, a = self.whitening_coefficients(order) 

1134 self.drop_growbuffer() 

1135 self.ydata = signal.lfilter(b, a, self.ydata) 

1136 

1137 def whitening_coefficients(self, order=6): 

1138 ar = yulewalker(self.ydata, order) 

1139 b, a = [1.] + ar.tolist(), [1.] 

1140 return b, a 

1141 

1142 def ampspec_whiten( 

1143 self, 

1144 width, 

1145 td_taper='auto', 

1146 fd_taper='auto', 

1147 pad_to_pow2=True, 

1148 demean=True): 

1149 

1150 ''' 

1151 Whiten signal via frequency domain using moving average on amplitude 

1152 spectra. 

1153 

1154 :param width: width of smoothing kernel [Hz] 

1155 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1156 ``None`` or ``'auto'``. 

1157 :param fd_taper: frequency domain taper, object of type 

1158 :py:class:`Taper` or ``None`` or ``'auto'``. 

1159 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1160 of 2^n 

1161 :param demean: whether to demean the signal before tapering 

1162 

1163 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1164 the spectrum is calculated and inversely weighted with a smoothed 

1165 version of its amplitude spectrum. A moving average is used for the 

1166 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1167 Finally, the smoothed and tapered spectrum is back-transformed into the 

1168 time domain. 

1169 

1170 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1171 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1172 ''' 

1173 

1174 ndata = self.data_len() 

1175 

1176 if pad_to_pow2: 

1177 ntrans = nextpow2(ndata) 

1178 else: 

1179 ntrans = ndata 

1180 

1181 df = 1./(ntrans*self.deltat) 

1182 nw = int(round(width/df)) 

1183 if ndata//2+1 <= nw: 

1184 raise TraceTooShort( 

1185 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1186 

1187 if td_taper == 'auto': 

1188 td_taper = CosFader(1./width) 

1189 

1190 if fd_taper == 'auto': 

1191 fd_taper = CosFader(width) 

1192 

1193 if td_taper: 

1194 self.taper(td_taper) 

1195 

1196 ydata = self.get_ydata().astype(float) 

1197 if demean: 

1198 ydata -= ydata.mean() 

1199 

1200 spec = num.fft.rfft(ydata, ntrans) 

1201 

1202 amp = num.abs(spec) 

1203 nspec = amp.size 

1204 csamp = num.cumsum(amp) 

1205 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1206 n1, n2 = nw//2, nw//2 + nspec - nw 

1207 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1208 amp_smoothed[:n1] = amp_smoothed[n1] 

1209 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1210 

1211 denom = amp_smoothed * amp 

1212 numer = amp 

1213 eps = num.mean(denom) * 1e-9 

1214 if eps == 0.0: 

1215 eps = 1e-9 

1216 

1217 numer += eps 

1218 denom += eps 

1219 spec *= numer/denom 

1220 

1221 if fd_taper: 

1222 fd_taper(spec, 0., df) 

1223 

1224 ydata = num.fft.irfft(spec) 

1225 self.set_ydata(ydata[:ndata]) 

1226 

1227 def _get_cached_freqs(self, nf, deltaf): 

1228 ck = (nf, deltaf) 

1229 if ck not in Trace.cached_frequencies: 

1230 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1231 nf, dtype=float) 

1232 

1233 return Trace.cached_frequencies[ck] 

1234 

1235 def bandpass_fft(self, corner_hp, corner_lp): 

1236 ''' 

1237 Apply boxcar bandbpass to trace (in spectral domain). 

1238 ''' 

1239 

1240 n = len(self.ydata) 

1241 n2 = nextpow2(n) 

1242 data = num.zeros(n2, dtype=num.float64) 

1243 data[:n] = self.ydata 

1244 fdata = num.fft.rfft(data) 

1245 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1246 fdata[0] = 0.0 

1247 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1248 data = num.fft.irfft(fdata) 

1249 self.drop_growbuffer() 

1250 self.ydata = data[:n] 

1251 

1252 def shift(self, tshift): 

1253 ''' 

1254 Time shift the trace. 

1255 ''' 

1256 

1257 self.tmin += tshift 

1258 self.tmax += tshift 

1259 self._update_ids() 

1260 

1261 def snap(self, inplace=True, interpolate=False): 

1262 ''' 

1263 Shift trace samples to nearest even multiples of the sampling rate. 

1264 

1265 :param inplace: (boolean) snap traces inplace 

1266 

1267 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1268 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1269 :py:func:`snap` returns the unsnapped instance of the original trace. 

1270 ''' 

1271 

1272 tmin = round(self.tmin/self.deltat)*self.deltat 

1273 tmax = tmin + (self.ydata.size-1)*self.deltat 

1274 

1275 if inplace: 

1276 xself = self 

1277 else: 

1278 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1279 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1280 return self 

1281 

1282 xself = self.copy() 

1283 

1284 if interpolate: 

1285 n = xself.data_len() 

1286 ydata_new = num.empty(n, dtype=float) 

1287 i_control = num.array([0, n-1], dtype=num.int64) 

1288 tref = tmin 

1289 t_control = num.array( 

1290 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1291 dtype=float) 

1292 

1293 signal_ext.antidrift(i_control, t_control, 

1294 xself.ydata.astype(float), 

1295 float(tmin-tref), xself.deltat, ydata_new) 

1296 

1297 xself.ydata = ydata_new 

1298 

1299 xself.tmin = tmin 

1300 xself.tmax = tmax 

1301 xself._update_ids() 

1302 

1303 return xself 

1304 

1305 def fix_deltat_rounding_errors(self): 

1306 ''' 

1307 Try to undo sampling rate rounding errors. 

1308 

1309 See :py:func:`fix_deltat_rounding_errors`. 

1310 ''' 

1311 

1312 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1313 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1314 

1315 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1316 ''' 

1317 Run special STA/LTA filter where the short time window is centered on 

1318 the long time window. 

1319 

1320 :param tshort: length of short time window in [s] 

1321 :param tlong: length of long time window in [s] 

1322 :param quad: whether to square the data prior to applying the STA/LTA 

1323 filter 

1324 :param scalingmethod: integer key to select how output values are 

1325 scaled / normalized (``1``, ``2``, or ``3``) 

1326 

1327 ============= ====================================== =========== 

1328 Scalingmethod Implementation Range 

1329 ============= ====================================== =========== 

1330 ``1`` As/Al* Ts/Tl [0,1] 

1331 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1332 ``3`` Like ``2`` but clipping range at zero [0,1] 

1333 ============= ====================================== =========== 

1334 

1335 ''' 

1336 

1337 nshort = int(round(tshort/self.deltat)) 

1338 nlong = int(round(tlong/self.deltat)) 

1339 

1340 assert nshort < nlong 

1341 if nlong > len(self.ydata): 

1342 raise TraceTooShort( 

1343 'Samples in trace: %s, samples needed: %s' 

1344 % (len(self.ydata), nlong)) 

1345 

1346 if quad: 

1347 sqrdata = self.ydata**2 

1348 else: 

1349 sqrdata = self.ydata 

1350 

1351 mavg_short = moving_avg(sqrdata, nshort) 

1352 mavg_long = moving_avg(sqrdata, nlong) 

1353 

1354 self.drop_growbuffer() 

1355 

1356 if scalingmethod not in (1, 2, 3): 

1357 raise Exception('Invalid argument to scalingrange argument.') 

1358 

1359 if scalingmethod == 1: 

1360 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1361 elif scalingmethod in (2, 3): 

1362 self.ydata = (mavg_short/mavg_long - 1.) \ 

1363 / ((float(nlong)/float(nshort)) - 1) 

1364 

1365 if scalingmethod == 3: 

1366 self.ydata = num.maximum(self.ydata, 0.) 

1367 

1368 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1369 ''' 

1370 Run special STA/LTA filter where the short time window is overlapping 

1371 with the last part of the long time window. 

1372 

1373 :param tshort: length of short time window in [s] 

1374 :param tlong: length of long time window in [s] 

1375 :param quad: whether to square the data prior to applying the STA/LTA 

1376 filter 

1377 :param scalingmethod: integer key to select how output values are 

1378 scaled / normalized (``1``, ``2``, or ``3``) 

1379 

1380 ============= ====================================== =========== 

1381 Scalingmethod Implementation Range 

1382 ============= ====================================== =========== 

1383 ``1`` As/Al* Ts/Tl [0,1] 

1384 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1385 ``3`` Like ``2`` but clipping range at zero [0,1] 

1386 ============= ====================================== =========== 

1387 

1388 With ``scalingmethod=1``, the values produced by this variant of the 

1389 STA/LTA are equivalent to 

1390 

1391 .. math:: 

1392 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1393 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1394 

1395 where :math:`f_j` are the input samples, :math:`s` are the number of 

1396 samples in the short time window and :math:`l` are the number of 

1397 samples in the long time window. 

1398 ''' 

1399 

1400 n = self.data_len() 

1401 tmin = self.tmin 

1402 

1403 nshort = max(1, int(round(tshort/self.deltat))) 

1404 nlong = max(1, int(round(tlong/self.deltat))) 

1405 

1406 assert nshort < nlong 

1407 

1408 if nlong > len(self.ydata): 

1409 raise TraceTooShort( 

1410 'Samples in trace: %s, samples needed: %s' 

1411 % (len(self.ydata), nlong)) 

1412 

1413 if quad: 

1414 sqrdata = self.ydata**2 

1415 else: 

1416 sqrdata = self.ydata 

1417 

1418 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1419 if nlong % 2 != 0 and nshort % 2 == 0: 

1420 nshift += 1 

1421 

1422 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1423 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1424 

1425 self.drop_growbuffer() 

1426 

1427 if scalingmethod not in (1, 2, 3): 

1428 raise Exception('Invalid argument to scalingrange argument.') 

1429 

1430 if scalingmethod == 1: 

1431 ydata = mavg_short/mavg_long * nshort/nlong 

1432 elif scalingmethod in (2, 3): 

1433 ydata = (mavg_short/mavg_long - 1.) \ 

1434 / ((float(nlong)/float(nshort)) - 1) 

1435 

1436 if scalingmethod == 3: 

1437 ydata = num.maximum(self.ydata, 0.) 

1438 

1439 self.set_ydata(ydata) 

1440 

1441 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1442 

1443 self.chop( 

1444 tmin + (nlong - nshort) * self.deltat, 

1445 tmin + (n - nshort) * self.deltat) 

1446 

1447 def peaks(self, threshold, tsearch, 

1448 deadtime=False, 

1449 nblock_duration_detection=100): 

1450 

1451 ''' 

1452 Detect peaks above a given threshold (method 1). 

1453 

1454 From every instant, where the signal rises above ``threshold``, a time 

1455 length of ``tsearch`` seconds is searched for a maximum. A list with 

1456 tuples (time, value) for each detected peak is returned. The 

1457 ``deadtime`` argument turns on a special deadtime duration detection 

1458 algorithm useful in combination with recursive STA/LTA filters. 

1459 ''' 

1460 

1461 y = self.ydata 

1462 above = num.where(y > threshold, 1, 0) 

1463 deriv = num.zeros(y.size, dtype=num.int8) 

1464 deriv[1:] = above[1:]-above[:-1] 

1465 itrig_positions = num.nonzero(deriv > 0)[0] 

1466 tpeaks = [] 

1467 apeaks = [] 

1468 tzeros = [] 

1469 tzero = self.tmin 

1470 

1471 for itrig_pos in itrig_positions: 

1472 ibeg = itrig_pos 

1473 iend = min( 

1474 len(self.ydata), 

1475 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1476 ipeak = num.argmax(y[ibeg:iend]) 

1477 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1478 apeak = y[ibeg+ipeak] 

1479 

1480 if tpeak < tzero: 

1481 continue 

1482 

1483 if deadtime: 

1484 ibeg = itrig_pos 

1485 iblock = 0 

1486 nblock = nblock_duration_detection 

1487 totalsum = 0. 

1488 while True: 

1489 if ibeg+iblock*nblock >= len(y): 

1490 tzero = self.tmin + (len(y)-1) * self.deltat 

1491 break 

1492 

1493 logy = num.log( 

1494 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1495 logy[0] += totalsum 

1496 ysum = num.cumsum(logy) 

1497 totalsum = ysum[-1] 

1498 below = num.where(ysum <= 0., 1, 0) 

1499 deriv = num.zeros(ysum.size, dtype=num.int8) 

1500 deriv[1:] = below[1:]-below[:-1] 

1501 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1502 if len(izero_positions) > 0: 

1503 tzero = self.tmin + self.deltat * ( 

1504 ibeg + izero_positions[0]) 

1505 break 

1506 iblock += 1 

1507 else: 

1508 tzero = ibeg*self.deltat + self.tmin + tsearch 

1509 

1510 tpeaks.append(tpeak) 

1511 apeaks.append(apeak) 

1512 tzeros.append(tzero) 

1513 

1514 if deadtime: 

1515 return tpeaks, apeaks, tzeros 

1516 else: 

1517 return tpeaks, apeaks 

1518 

1519 def peaks2(self, threshold, tsearch): 

1520 

1521 ''' 

1522 Detect peaks above a given threshold (method 2). 

1523 

1524 This variant of peak detection is a bit more robust (and slower) than 

1525 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1526 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1527 iteratively the one with the maximum amplitude ``a[j]`` and time 

1528 ``t[j]`` is choosen and potential peaks within 

1529 ``t[j] - tsearch, t[j] + tsearch`` 

1530 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1531 no more potential peaks are left. 

1532 ''' 

1533 

1534 a = num.copy(self.ydata) 

1535 

1536 amin = num.min(a) 

1537 

1538 a[0] = amin 

1539 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1540 a[-1] = amin 

1541 

1542 data = [] 

1543 while True: 

1544 imax = num.argmax(a) 

1545 amax = a[imax] 

1546 

1547 if amax < threshold or amax == amin: 

1548 break 

1549 

1550 data.append((self.tmin + imax * self.deltat, amax)) 

1551 

1552 ntsearch = int(round(tsearch / self.deltat)) 

1553 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1554 

1555 if data: 

1556 data.sort() 

1557 tpeaks, apeaks = list(zip(*data)) 

1558 else: 

1559 tpeaks, apeaks = [], [] 

1560 

1561 return tpeaks, apeaks 

1562 

1563 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1564 ''' 

1565 Extend trace to given span. 

1566 

1567 :param tmin: begin time of new span 

1568 :param tmax: end time of new span 

1569 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1570 ``'median'`` 

1571 ''' 

1572 

1573 nold = self.ydata.size 

1574 

1575 if tmin is not None: 

1576 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1577 else: 

1578 nl = 0 

1579 

1580 if tmax is not None: 

1581 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1582 else: 

1583 nh = nold - 1 

1584 

1585 n = nh - nl + 1 

1586 data = num.zeros(n, dtype=self.ydata.dtype) 

1587 data[-nl:-nl + nold] = self.ydata 

1588 if self.ydata.size >= 1: 

1589 if fillmethod == 'repeat': 

1590 data[:-nl] = self.ydata[0] 

1591 data[-nl + nold:] = self.ydata[-1] 

1592 elif fillmethod == 'median': 

1593 v = num.median(self.ydata) 

1594 data[:-nl] = v 

1595 data[-nl + nold:] = v 

1596 elif fillmethod == 'mean': 

1597 v = num.mean(self.ydata) 

1598 data[:-nl] = v 

1599 data[-nl + nold:] = v 

1600 

1601 self.drop_growbuffer() 

1602 self.ydata = data 

1603 

1604 self.tmin += nl * self.deltat 

1605 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1606 

1607 self._update_ids() 

1608 

1609 def transfer(self, 

1610 tfade=0., 

1611 freqlimits=None, 

1612 transfer_function=None, 

1613 cut_off_fading=True, 

1614 demean=True, 

1615 invert=False): 

1616 

1617 ''' 

1618 Return new trace with transfer function applied (convolution). 

1619 

1620 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1621 at both ends of trace. 

1622 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1623 :param transfer_function: FrequencyResponse object; must provide a 

1624 method 'evaluate(freqs)', which returns the transfer function 

1625 coefficients at the frequencies 'freqs'. 

1626 :param cut_off_fading: whether to cut off rise/fall interval in output 

1627 trace. 

1628 :param demean: remove mean before applying transfer function 

1629 :param invert: set to True to do a deconvolution 

1630 ''' 

1631 

1632 if transfer_function is None: 

1633 transfer_function = g_one_response 

1634 

1635 if self.tmax - self.tmin <= tfade*2.: 

1636 raise TraceTooShort( 

1637 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1638 'trace length = %g, fading length = %g' 

1639 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1640 

1641 if freqlimits is None and ( 

1642 transfer_function is None or transfer_function.is_scalar()): 

1643 

1644 # special case for flat responses 

1645 

1646 output = self.copy() 

1647 data = self.ydata 

1648 ndata = data.size 

1649 

1650 if transfer_function is not None: 

1651 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1652 

1653 if invert: 

1654 c = 1.0/c 

1655 

1656 data *= c 

1657 

1658 if tfade != 0.0: 

1659 data *= costaper( 

1660 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1661 ndata, self.deltat) 

1662 

1663 output.ydata = data 

1664 

1665 else: 

1666 ndata = self.ydata.size 

1667 ntrans = nextpow2(ndata*1.2) 

1668 coeffs = self._get_tapered_coeffs( 

1669 ntrans, freqlimits, transfer_function, invert=invert) 

1670 

1671 data = self.ydata 

1672 

1673 data_pad = num.zeros(ntrans, dtype=float) 

1674 data_pad[:ndata] = data 

1675 if demean: 

1676 data_pad[:ndata] -= data.mean() 

1677 

1678 if tfade != 0.0: 

1679 data_pad[:ndata] *= costaper( 

1680 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1681 ndata, self.deltat) 

1682 

1683 fdata = num.fft.rfft(data_pad) 

1684 fdata *= coeffs 

1685 ddata = num.fft.irfft(fdata) 

1686 output = self.copy() 

1687 output.ydata = ddata[:ndata] 

1688 

1689 if cut_off_fading and tfade != 0.0: 

1690 try: 

1691 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1692 except NoData: 

1693 raise TraceTooShort( 

1694 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1695 'trace length = %g, fading length = %g' 

1696 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1697 else: 

1698 output.ydata = output.ydata.copy() 

1699 

1700 return output 

1701 

1702 def differentiate(self, n=1, order=4, inplace=True): 

1703 ''' 

1704 Approximate first or second derivative of the trace. 

1705 

1706 :param n: 1 for first derivative, 2 for second 

1707 :param order: order of the approximation 2 and 4 are supported 

1708 :param inplace: if ``True`` the trace is differentiated in place, 

1709 otherwise a new trace object with the derivative is returned. 

1710 

1711 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1712 

1713 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1714 ''' 

1715 

1716 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1717 

1718 if inplace: 

1719 self.ydata = ddata 

1720 else: 

1721 output = self.copy(data=False) 

1722 output.set_ydata(ddata) 

1723 return output 

1724 

1725 def drop_chain_cache(self): 

1726 if self._pchain: 

1727 self._pchain.clear() 

1728 

1729 def init_chain(self): 

1730 self._pchain = pchain.Chain( 

1731 do_downsample, 

1732 do_extend, 

1733 do_pre_taper, 

1734 do_fft, 

1735 do_filter, 

1736 do_ifft) 

1737 

1738 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1739 if setup.domain == 'frequency_domain': 

1740 _, _, data = self._pchain( 

1741 (self, deltat), 

1742 (tmin, tmax), 

1743 (setup.taper,), 

1744 (setup.filter,), 

1745 (setup.filter,), 

1746 nocache=nocache) 

1747 

1748 return num.abs(data), num.abs(data) 

1749 

1750 else: 

1751 processed = self._pchain( 

1752 (self, deltat), 

1753 (tmin, tmax), 

1754 (setup.taper,), 

1755 (setup.filter,), 

1756 (setup.filter,), 

1757 (), 

1758 nocache=nocache) 

1759 

1760 if setup.domain == 'time_domain': 

1761 data = processed.get_ydata() 

1762 

1763 elif setup.domain == 'envelope': 

1764 processed = processed.envelope(inplace=False) 

1765 

1766 elif setup.domain == 'absolute': 

1767 processed.set_ydata(num.abs(processed.get_ydata())) 

1768 

1769 return processed.get_ydata(), processed 

1770 

1771 def misfit(self, candidate, setup, nocache=False, debug=False): 

1772 ''' 

1773 Calculate misfit and normalization factor against candidate trace. 

1774 

1775 :param candidate: :py:class:`Trace` object 

1776 :param setup: :py:class:`MisfitSetup` object 

1777 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1778 normalization divisor 

1779 

1780 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1781 with the higher sampling rate will be downsampled. 

1782 ''' 

1783 

1784 a = self 

1785 b = candidate 

1786 

1787 for tr in (a, b): 

1788 if not tr._pchain: 

1789 tr.init_chain() 

1790 

1791 deltat = max(a.deltat, b.deltat) 

1792 tmin = min(a.tmin, b.tmin) - deltat 

1793 tmax = max(a.tmax, b.tmax) + deltat 

1794 

1795 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1796 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1797 

1798 if setup.domain != 'cc_max_norm': 

1799 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1800 else: 

1801 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1802 ccmax = ctr.max()[1] 

1803 m = 0.5 - 0.5 * ccmax 

1804 n = 0.5 

1805 

1806 if debug: 

1807 return m, n, aproc, bproc 

1808 else: 

1809 return m, n 

1810 

1811 def spectrum(self, pad_to_pow2=False, tfade=None): 

1812 ''' 

1813 Get FFT spectrum of trace. 

1814 

1815 :param pad_to_pow2: whether to zero-pad the data to next larger 

1816 power-of-two length 

1817 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1818 shaped tapers to both 

1819 

1820 :returns: a tuple with (frequencies, values) 

1821 ''' 

1822 

1823 ndata = self.ydata.size 

1824 

1825 if pad_to_pow2: 

1826 ntrans = nextpow2(ndata) 

1827 else: 

1828 ntrans = ndata 

1829 

1830 if tfade is None: 

1831 ydata = self.ydata 

1832 else: 

1833 ydata = self.ydata * costaper( 

1834 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1835 ndata, self.deltat) 

1836 

1837 fydata = num.fft.rfft(ydata, ntrans) 

1838 df = 1./(ntrans*self.deltat) 

1839 fxdata = num.arange(len(fydata))*df 

1840 return fxdata, fydata 

1841 

1842 def multi_filter(self, filter_freqs, bandwidth): 

1843 

1844 class Gauss(FrequencyResponse): 

1845 f0 = Float.T() 

1846 a = Float.T(default=1.0) 

1847 

1848 def __init__(self, f0, a=1.0, **kwargs): 

1849 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1850 

1851 def evaluate(self, freqs): 

1852 omega0 = 2.*math.pi*self.f0 

1853 omega = 2.*math.pi*freqs 

1854 return num.exp(-((omega-omega0) 

1855 / (self.a*omega0))**2) 

1856 

1857 freqs, coeffs = self.spectrum() 

1858 n = self.data_len() 

1859 nfilt = len(filter_freqs) 

1860 signal_tf = num.zeros((nfilt, n)) 

1861 centroid_freqs = num.zeros(nfilt) 

1862 for ifilt, f0 in enumerate(filter_freqs): 

1863 taper = Gauss(f0, a=bandwidth) 

1864 weights = taper.evaluate(freqs) 

1865 nhalf = freqs.size 

1866 analytic_spec = num.zeros(n, dtype=complex) 

1867 analytic_spec[:nhalf] = coeffs*weights 

1868 

1869 enorm = num.abs(analytic_spec[:nhalf])**2 

1870 enorm /= num.sum(enorm) 

1871 

1872 if n % 2 == 0: 

1873 analytic_spec[1:nhalf-1] *= 2. 

1874 else: 

1875 analytic_spec[1:nhalf] *= 2. 

1876 

1877 analytic = num.fft.ifft(analytic_spec) 

1878 signal_tf[ifilt, :] = num.abs(analytic) 

1879 

1880 enorm = num.abs(analytic_spec[:nhalf])**2 

1881 enorm /= num.sum(enorm) 

1882 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1883 

1884 return centroid_freqs, signal_tf 

1885 

1886 def _get_tapered_coeffs( 

1887 self, ntrans, freqlimits, transfer_function, invert=False): 

1888 

1889 cache_key = ( 

1890 ntrans, self.deltat, freqlimits, transfer_function.uuid, invert) 

1891 

1892 if cache_key in g_tapered_coeffs_cache: 

1893 return g_tapered_coeffs_cache[cache_key] 

1894 

1895 deltaf = 1./(self.deltat*ntrans) 

1896 nfreqs = ntrans//2 + 1 

1897 transfer = num.ones(nfreqs, dtype=complex) 

1898 hi = snapper(nfreqs, deltaf) 

1899 if freqlimits is not None: 

1900 kmin, kmax = hi(freqlimits[0]), hi(freqlimits[3]) 

1901 freqs = num.arange(kmin, kmax)*deltaf 

1902 coeffs = transfer_function.evaluate(freqs) 

1903 if invert: 

1904 if num.any(coeffs == 0.0): 

1905 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1906 

1907 transfer[kmin:kmax] = 1.0 / coeffs 

1908 else: 

1909 transfer[kmin:kmax] = coeffs 

1910 

1911 tapered_transfer = costaper(*freqlimits, nfreqs, deltaf) * transfer 

1912 else: 

1913 if invert: 

1914 raise Exception( 

1915 'transfer: `freqlimits` must be given when `invert` is ' 

1916 'set to `True`') 

1917 

1918 freqs = num.arange(nfreqs) * deltaf 

1919 tapered_transfer = transfer_function.evaluate(freqs) 

1920 

1921 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1922 

1923 g_tapered_coeffs_cache[cache_key] = tapered_transfer 

1924 return tapered_transfer 

1925 

1926 def fill_template(self, template, **additional): 

1927 ''' 

1928 Fill string template with trace metadata. 

1929 

1930 Uses normal python '%(placeholder)s' string templates. The following 

1931 placeholders are considered: ``network``, ``station``, ``location``, 

1932 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1933 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1934 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1935 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1936 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1937 microseconds, those with ``'_year'`` contain only the year. 

1938 ''' 

1939 

1940 template = template.replace('%n', '%(network)s')\ 

1941 .replace('%s', '%(station)s')\ 

1942 .replace('%l', '%(location)s')\ 

1943 .replace('%c', '%(channel)s')\ 

1944 .replace('%b', '%(tmin)s')\ 

1945 .replace('%e', '%(tmax)s')\ 

1946 .replace('%j', '%(julianday)s') 

1947 

1948 params = dict( 

1949 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1950 params['tmin'] = util.time_to_str( 

1951 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1952 params['tmax'] = util.time_to_str( 

1953 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1954 params['tmin_ms'] = util.time_to_str( 

1955 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1956 params['tmax_ms'] = util.time_to_str( 

1957 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1958 params['tmin_us'] = util.time_to_str( 

1959 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1960 params['tmax_us'] = util.time_to_str( 

1961 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1962 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1963 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1964 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1965 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1966 params['julianday'] = util.julian_day_of_year(self.tmin) 

1967 params.update(additional) 

1968 return template % params 

1969 

1970 def plot(self): 

1971 ''' 

1972 Show trace with matplotlib. 

1973 

1974 See also: :py:meth:`Trace.snuffle`. 

1975 ''' 

1976 

1977 import pylab 

1978 pylab.plot(self.get_xdata(), self.get_ydata()) 

1979 name = '%s %s %s - %s' % ( 

1980 self.channel, 

1981 self.station, 

1982 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmin)), 

1983 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmax))) 

1984 

1985 pylab.title(name) 

1986 pylab.show() 

1987 

1988 def snuffle(self, **kwargs): 

1989 ''' 

1990 Show trace in a snuffler window. 

1991 

1992 :param stations: list of :py:class:`pyrocko.model.Station` objects or 

1993 ``None`` 

1994 :param events: list of :py:class:`pyrocko.model.Event` objects or 

1995 ``None`` 

1996 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker` 

1997 objects or ``None`` 

1998 :param ntracks: float, number of tracks to be shown initially (default: 

1999 12) 

2000 :param follow: time interval (in seconds) for real time follow mode or 

2001 ``None`` 

2002 :param controls: bool, whether to show the main controls (default: 

2003 ``True``) 

2004 :param opengl: bool, whether to use opengl (default: ``False``) 

2005 ''' 

2006 

2007 return snuffle([self], **kwargs) 

2008 

2009 

2010def snuffle(traces, **kwargs): 

2011 ''' 

2012 Show traces in a snuffler window. 

2013 

2014 :param stations: list of :py:class:`pyrocko.model.Station` objects or 

2015 ``None`` 

2016 :param events: list of :py:class:`pyrocko.model.Event` objects or ``None`` 

2017 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker` 

2018 objects or ``None`` 

2019 :param ntracks: float, number of tracks to be shown initially (default: 12) 

2020 :param follow: time interval (in seconds) for real time follow mode or 

2021 ``None`` 

2022 :param controls: bool, whether to show the main controls (default: 

2023 ``True``) 

2024 :param opengl: bool, whether to use opengl (default: ``False``) 

2025 ''' 

2026 

2027 from pyrocko import pile 

2028 from pyrocko.gui.snuffler import snuffler 

2029 p = pile.Pile() 

2030 if traces: 

2031 trf = pile.MemTracesFile(None, traces) 

2032 p.add_file(trf) 

2033 return snuffler.snuffle(p, **kwargs) 

2034 

2035 

2036def downsample_tpad( 

2037 deltat_in, deltat_out, allow_upsample_max=1, ftype='fir-remez'): 

2038 ''' 

2039 Get approximate amount of cutoff which will be produced by downsampling. 

2040 

2041 The :py:meth:`Trace.downsample_to` method removes some samples at the 

2042 beginning and end of the trace which is downsampled. This function 

2043 estimates the approximate length [s] which will be cut off for a given set 

2044 of parameters supplied to :py:meth:`Trace.downsample_to`. 

2045 

2046 :param deltat_in: 

2047 Input sampling interval [s]. 

2048 :type deltat_in: 

2049 float 

2050 

2051 :param deltat_out: 

2052 Output samling interval [s]. 

2053 :type deltat_out: 

2054 float 

2055 

2056 :returns: 

2057 Approximate length [s] which will be cut off. 

2058 

2059 See :py:meth:`Trace.downsample_to` for details. 

2060 ''' 

2061 

2062 upsratio, deci_seq = _configure_downsampling( 

2063 deltat_in, deltat_out, allow_upsample_max) 

2064 

2065 tpad = 0.0 

2066 deltat = deltat_in / upsratio 

2067 for deci in deci_seq: 

2068 b, a, n = util.decimate_coeffs(deci, None, ftype) 

2069 # n//2 for the antialiasing 

2070 # +deci for possible snap to multiples 

2071 # +1 for rounding errors 

2072 tpad += (n//2 + deci + 1) * deltat 

2073 deltat = deltat * deci 

2074 

2075 return tpad 

2076 

2077 

2078def _configure_downsampling(deltat_in, deltat_out, allow_upsample_max): 

2079 for upsratio in range(1, allow_upsample_max+1): 

2080 dratio = (upsratio/deltat_in) / (1./deltat_out) 

2081 deci_seq = util.decitab(int(round(dratio))) 

2082 if abs(dratio - round(dratio)) / dratio < 0.0001 and deci_seq: 

2083 return upsratio, [deci for deci in deci_seq if deci != 1] 

2084 

2085 raise util.UnavailableDecimation('ratio = %g' % (deltat_out / deltat_in)) 

2086 

2087 

2088class InfiniteResponse(Exception): 

2089 ''' 

2090 This exception is raised by :py:class:`Trace` operations when deconvolution 

2091 of a frequency response (instrument response transfer function) would 

2092 result in a division by zero. 

2093 ''' 

2094 

2095 

2096class MisalignedTraces(Exception): 

2097 ''' 

2098 This exception is raised by some :py:class:`Trace` operations when tmin, 

2099 tmax or number of samples do not match. 

2100 ''' 

2101 

2102 pass 

2103 

2104 

2105class NoData(Exception): 

2106 ''' 

2107 This exception is raised by some :py:class:`Trace` operations when no or 

2108 not enough data is available. 

2109 ''' 

2110 

2111 pass 

2112 

2113 

2114class AboveNyquist(Exception): 

2115 ''' 

2116 This exception is raised by some :py:class:`Trace` operations when given 

2117 frequencies are above the Nyquist frequency. 

2118 ''' 

2119 

2120 pass 

2121 

2122 

2123class TraceTooShort(Exception): 

2124 ''' 

2125 This exception is raised by some :py:class:`Trace` operations when the 

2126 trace is too short. 

2127 ''' 

2128 

2129 pass 

2130 

2131 

2132class ResamplingFailed(Exception): 

2133 pass 

2134 

2135 

2136def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2137 

2138 ''' 

2139 Get data range given traces grouped by selected pattern. 

2140 

2141 :param key: a callable which takes as single argument a trace and returns a 

2142 key for the grouping of the results. If this is ``None``, the default, 

2143 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2144 used. 

2145 :param mode: ``'minmax'`` or floating point number. If this is 

2146 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2147 number, mean +- standard deviation times ``mode`` is used. 

2148 

2149 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2150 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2151 extreme values on either end. 

2152 

2153 :returns: a dict with the combined data ranges. 

2154 

2155 Examples:: 

2156 

2157 ranges = minmax(traces, lambda tr: tr.channel) 

2158 print ranges['N'] # print min & max of all traces with channel == 'N' 

2159 print ranges['E'] # print min & max of all traces with channel == 'E' 

2160 

2161 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2162 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2163 # network == 'GR' and station == 'HAM3' 

2164 

2165 ranges = minmax(traces, lambda tr: None) 

2166 print ranges[None] # prints min & max of all traces 

2167 ''' 

2168 

2169 if key is None: 

2170 key = _default_key 

2171 

2172 ranges = defaultdict(list) 

2173 for trace in traces: 

2174 if isinstance(mode, str) and mode == 'minmax': 

2175 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2176 else: 

2177 mean = trace.ydata.mean() 

2178 std = trace.ydata.std() 

2179 mi, ma = mean-std*mode, mean+std*mode 

2180 

2181 k = key(trace) 

2182 ranges[k].append((mi, ma)) 

2183 

2184 for k in ranges: 

2185 mins, maxs = num.array(ranges[k]).T 

2186 if outer_mode == 'minmax': 

2187 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2188 elif outer_mode == 'robust': 

2189 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2190 

2191 return ranges 

2192 

2193 

2194def minmaxtime(traces, key=None): 

2195 

2196 ''' 

2197 Get time range given traces grouped by selected pattern. 

2198 

2199 :param key: a callable which takes as single argument a trace and returns a 

2200 key for the grouping of the results. If this is ``None``, the default, 

2201 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2202 used. 

2203 

2204 :returns: a dict with the combined data ranges. 

2205 ''' 

2206 

2207 if key is None: 

2208 key = _default_key 

2209 

2210 ranges = {} 

2211 for trace in traces: 

2212 mi, ma = trace.tmin, trace.tmax 

2213 k = key(trace) 

2214 if k not in ranges: 

2215 ranges[k] = mi, ma 

2216 else: 

2217 tmi, tma = ranges[k] 

2218 ranges[k] = min(tmi, mi), max(tma, ma) 

2219 

2220 return ranges 

2221 

2222 

2223def degapper( 

2224 traces, 

2225 maxgap=5, 

2226 fillmethod='interpolate', 

2227 deoverlap='use_second', 

2228 maxlap=None): 

2229 

2230 ''' 

2231 Try to connect traces and remove gaps. 

2232 

2233 This method will combine adjacent traces, which match in their network, 

2234 station, location and channel attributes. Overlapping parts are handled 

2235 according to the ``deoverlap`` argument. 

2236 

2237 :param traces: input traces, must be sorted by their full_id attribute. 

2238 :param maxgap: maximum number of samples to interpolate. 

2239 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2240 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2241 second trace (default), 'use_first' to use data from first trace, 

2242 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2243 values. 

2244 :param maxlap: maximum number of samples of overlap which are removed 

2245 

2246 :returns: list of traces 

2247 ''' 

2248 

2249 in_traces = traces 

2250 out_traces = [] 

2251 if not in_traces: 

2252 return out_traces 

2253 out_traces.append(in_traces.pop(0)) 

2254 while in_traces: 

2255 

2256 a = out_traces[-1] 

2257 b = in_traces.pop(0) 

2258 

2259 avirt, bvirt = a.ydata is None, b.ydata is None 

2260 assert avirt == bvirt, \ 

2261 'traces given to degapper() must either all have data or have ' \ 

2262 'no data.' 

2263 

2264 virtual = avirt and bvirt 

2265 

2266 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2267 and a.data_len() >= 1 and b.data_len() >= 1 

2268 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2269 

2270 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2271 idist = int(round(dist)) 

2272 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2273 # logger.warning('Cannot degap traces with displaced sampling ' 

2274 # '(%s, %s, %s, %s)' % a.nslc_id) 

2275 pass 

2276 else: 

2277 if 1 < idist <= maxgap: 

2278 if not virtual: 

2279 if fillmethod == 'interpolate': 

2280 filler = a.ydata[-1] + ( 

2281 ((1.0 + num.arange(idist-1, dtype=float)) 

2282 / idist) * (b.ydata[0]-a.ydata[-1]) 

2283 ).astype(a.ydata.dtype) 

2284 elif fillmethod == 'zeros': 

2285 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2286 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2287 a.tmax = b.tmax 

2288 if a.mtime and b.mtime: 

2289 a.mtime = max(a.mtime, b.mtime) 

2290 continue 

2291 

2292 elif idist == 1: 

2293 if not virtual: 

2294 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2295 a.tmax = b.tmax 

2296 if a.mtime and b.mtime: 

2297 a.mtime = max(a.mtime, b.mtime) 

2298 continue 

2299 

2300 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2301 if b.tmax > a.tmax: 

2302 if not virtual: 

2303 na = a.ydata.size 

2304 n = -idist+1 

2305 if deoverlap == 'use_second': 

2306 a.ydata = num.concatenate( 

2307 (a.ydata[:-n], b.ydata)) 

2308 elif deoverlap in ('use_first', 'crossfade_cos'): 

2309 a.ydata = num.concatenate( 

2310 (a.ydata, b.ydata[n:])) 

2311 elif deoverlap == 'add': 

2312 a.ydata[-n:] += b.ydata[:n] 

2313 a.ydata = num.concatenate( 

2314 (a.ydata, b.ydata[n:])) 

2315 else: 

2316 assert False, 'unknown deoverlap method' 

2317 

2318 if deoverlap == 'crossfade_cos': 

2319 n = -idist+1 

2320 taper = 0.5-0.5*num.cos( 

2321 (1.+num.arange(n))/(1.+n)*num.pi) 

2322 a.ydata[na-n:na] *= 1.-taper 

2323 a.ydata[na-n:na] += b.ydata[:n] * taper 

2324 

2325 a.tmax = b.tmax 

2326 if a.mtime and b.mtime: 

2327 a.mtime = max(a.mtime, b.mtime) 

2328 continue 

2329 else: 

2330 # make short second trace vanish 

2331 continue 

2332 

2333 if b.data_len() >= 1: 

2334 out_traces.append(b) 

2335 

2336 for tr in out_traces: 

2337 tr._update_ids() 

2338 

2339 return out_traces 

2340 

2341 

2342def rotate(traces, azimuth, in_channels, out_channels): 

2343 ''' 

2344 2D rotation of traces. 

2345 

2346 :param traces: list of input traces 

2347 :param azimuth: difference of the azimuths of the component directions 

2348 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2349 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2350 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2351 :returns: list of rotated traces 

2352 ''' 

2353 

2354 phi = azimuth/180.*math.pi 

2355 cphi = math.cos(phi) 

2356 sphi = math.sin(phi) 

2357 rotated = [] 

2358 in_channels = tuple(_channels_to_names(in_channels)) 

2359 out_channels = tuple(_channels_to_names(out_channels)) 

2360 for a in traces: 

2361 for b in traces: 

2362 if ((a.channel, b.channel) == in_channels and 

2363 a.nslc_id[:3] == b.nslc_id[:3] and 

2364 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2365 tmin = max(a.tmin, b.tmin) 

2366 tmax = min(a.tmax, b.tmax) 

2367 

2368 if tmin < tmax: 

2369 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2370 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2371 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2372 logger.warning( 

2373 'Cannot rotate traces with displaced sampling ' 

2374 '(%s, %s, %s, %s)' % a.nslc_id) 

2375 continue 

2376 

2377 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2378 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2379 ac.set_ydata(acydata) 

2380 bc.set_ydata(bcydata) 

2381 

2382 ac.set_codes(channel=out_channels[0]) 

2383 bc.set_codes(channel=out_channels[1]) 

2384 rotated.append(ac) 

2385 rotated.append(bc) 

2386 

2387 return rotated 

2388 

2389 

2390def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2391 ''' 

2392 Rotate traces from NE to RT system. 

2393 

2394 :param n: 

2395 North trace. 

2396 :type n: 

2397 :py:class:`~pyrocko.trace.Trace` 

2398 

2399 :param e: 

2400 East trace. 

2401 :type e: 

2402 :py:class:`~pyrocko.trace.Trace` 

2403 

2404 :param source: 

2405 Source of the recorded signal. 

2406 :type source: 

2407 :py:class:`pyrocko.gf.seismosizer.Source` 

2408 

2409 :param receiver: 

2410 Receiver of the recorded signal. 

2411 :type receiver: 

2412 :py:class:`pyrocko.model.location.Location` 

2413 

2414 :param out_channels: 

2415 Channel codes of the output channels (radial, transversal). 

2416 Default is ('R', 'T'). 

2417 . 

2418 :type out_channels 

2419 optional, tuple[str, str] 

2420 

2421 :returns: 

2422 Rotated traces (radial, transversal). 

2423 :rtype: 

2424 tuple[ 

2425 :py:class:`~pyrocko.trace.Trace`, 

2426 :py:class:`~pyrocko.trace.Trace`] 

2427 ''' 

2428 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2429 in_channels = n.channel, e.channel 

2430 out = rotate( 

2431 [n, e], azimuth, 

2432 in_channels=in_channels, 

2433 out_channels=out_channels) 

2434 

2435 assert len(out) == 2 

2436 for tr in out: 

2437 if tr.channel == out_channels[0]: 

2438 r = tr 

2439 elif tr.channel == out_channels[1]: 

2440 t = tr 

2441 else: 

2442 assert False 

2443 

2444 return r, t 

2445 

2446 

2447def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2448 out_channels=('L', 'Q', 'T')): 

2449 ''' 

2450 Rotate traces from ZNE to LQT system. 

2451 

2452 :param traces: list of traces in arbitrary order 

2453 :param backazimuth: backazimuth in degrees clockwise from north 

2454 :param incidence: incidence angle in degrees from vertical 

2455 :param in_channels: input channel names 

2456 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2457 :returns: list of transformed traces 

2458 ''' 

2459 i = incidence/180.*num.pi 

2460 b = backazimuth/180.*num.pi 

2461 

2462 ci = num.cos(i) 

2463 cb = num.cos(b) 

2464 si = num.sin(i) 

2465 sb = num.sin(b) 

2466 

2467 rotmat = num.array( 

2468 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2469 return project(traces, rotmat, in_channels, out_channels) 

2470 

2471 

2472def _decompose(a): 

2473 ''' 

2474 Decompose matrix into independent submatrices. 

2475 ''' 

2476 

2477 def depends(iout, a): 

2478 row = a[iout, :] 

2479 return set(num.arange(row.size).compress(row != 0.0)) 

2480 

2481 def provides(iin, a): 

2482 col = a[:, iin] 

2483 return set(num.arange(col.size).compress(col != 0.0)) 

2484 

2485 a = num.asarray(a) 

2486 outs = set(range(a.shape[0])) 

2487 systems = [] 

2488 while outs: 

2489 iout = outs.pop() 

2490 

2491 gout = set() 

2492 for iin in depends(iout, a): 

2493 gout.update(provides(iin, a)) 

2494 

2495 if not gout: 

2496 continue 

2497 

2498 gin = set() 

2499 for iout2 in gout: 

2500 gin.update(depends(iout2, a)) 

2501 

2502 if not gin: 

2503 continue 

2504 

2505 for iout2 in gout: 

2506 if iout2 in outs: 

2507 outs.remove(iout2) 

2508 

2509 gin = list(gin) 

2510 gin.sort() 

2511 gout = list(gout) 

2512 gout.sort() 

2513 

2514 systems.append((gin, gout, a[gout, :][:, gin])) 

2515 

2516 return systems 

2517 

2518 

2519def _channels_to_names(channels): 

2520 from pyrocko import squirrel 

2521 names = [] 

2522 for ch in channels: 

2523 if isinstance(ch, model.Channel): 

2524 names.append(ch.name) 

2525 elif isinstance(ch, squirrel.Channel): 

2526 names.append(ch.codes.channel) 

2527 else: 

2528 names.append(ch) 

2529 

2530 return names 

2531 

2532 

2533def project(traces, matrix, in_channels, out_channels): 

2534 ''' 

2535 Affine transform of three-component traces. 

2536 

2537 Compute matrix-vector product of three-component traces, to e.g. rotate 

2538 traces into a different basis. The traces are distinguished and ordered by 

2539 their channel attribute. The tranform is applied to overlapping parts of 

2540 any appropriate combinations of the input traces. This should allow this 

2541 function to be robust with data gaps. It also tries to apply the 

2542 tranformation to subsets of the channels, if this is possible, so that, if 

2543 for example a vertical compontent is missing, horizontal components can 

2544 still be rotated. 

2545 

2546 :param traces: list of traces in arbitrary order 

2547 :param matrix: tranformation matrix 

2548 :param in_channels: input channel names 

2549 :param out_channels: output channel names 

2550 :returns: list of transformed traces 

2551 ''' 

2552 

2553 in_channels = tuple(_channels_to_names(in_channels)) 

2554 out_channels = tuple(_channels_to_names(out_channels)) 

2555 systems = _decompose(matrix) 

2556 

2557 # fallback to full matrix if some are not quadratic 

2558 for iins, iouts, submatrix in systems: 

2559 if submatrix.shape[0] != submatrix.shape[1]: 

2560 if len(in_channels) != 3 or len(out_channels) != 3: 

2561 return [] 

2562 else: 

2563 return _project3(traces, matrix, in_channels, out_channels) 

2564 

2565 projected = [] 

2566 for iins, iouts, submatrix in systems: 

2567 in_cha = tuple([in_channels[iin] for iin in iins]) 

2568 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2569 if submatrix.shape[0] == 1: 

2570 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2571 elif submatrix.shape[1] == 2: 

2572 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2573 else: 

2574 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2575 

2576 return projected 

2577 

2578 

2579def project_dependencies(matrix, in_channels, out_channels): 

2580 ''' 

2581 Figure out what dependencies project() would produce. 

2582 ''' 

2583 

2584 in_channels = tuple(_channels_to_names(in_channels)) 

2585 out_channels = tuple(_channels_to_names(out_channels)) 

2586 systems = _decompose(matrix) 

2587 

2588 subpro = [] 

2589 for iins, iouts, submatrix in systems: 

2590 if submatrix.shape[0] != submatrix.shape[1]: 

2591 subpro.append((matrix, in_channels, out_channels)) 

2592 

2593 if not subpro: 

2594 for iins, iouts, submatrix in systems: 

2595 in_cha = tuple([in_channels[iin] for iin in iins]) 

2596 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2597 subpro.append((submatrix, in_cha, out_cha)) 

2598 

2599 deps = {} 

2600 for mat, in_cha, out_cha in subpro: 

2601 for oc in out_cha: 

2602 if oc not in deps: 

2603 deps[oc] = [] 

2604 

2605 for ic in in_cha: 

2606 deps[oc].append(ic) 

2607 

2608 return deps 

2609 

2610 

2611def _project1(traces, matrix, in_channels, out_channels): 

2612 assert len(in_channels) == 1 

2613 assert len(out_channels) == 1 

2614 assert matrix.shape == (1, 1) 

2615 

2616 projected = [] 

2617 for a in traces: 

2618 if not (a.channel,) == in_channels: 

2619 continue 

2620 

2621 ac = a.copy() 

2622 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2623 ac.set_codes(channel=out_channels[0]) 

2624 projected.append(ac) 

2625 

2626 return projected 

2627 

2628 

2629def _project2(traces, matrix, in_channels, out_channels): 

2630 assert len(in_channels) == 2 

2631 assert len(out_channels) == 2 

2632 assert matrix.shape == (2, 2) 

2633 projected = [] 

2634 for a in traces: 

2635 for b in traces: 

2636 if not ((a.channel, b.channel) == in_channels and 

2637 a.nslc_id[:3] == b.nslc_id[:3] and 

2638 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2639 continue 

2640 

2641 tmin = max(a.tmin, b.tmin) 

2642 tmax = min(a.tmax, b.tmax) 

2643 

2644 if tmin > tmax: 

2645 continue 

2646 

2647 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2648 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2649 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2650 logger.warning( 

2651 'Cannot project traces with displaced sampling ' 

2652 '(%s, %s, %s, %s)' % a.nslc_id) 

2653 continue 

2654 

2655 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2656 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2657 

2658 ac.set_ydata(acydata) 

2659 bc.set_ydata(bcydata) 

2660 

2661 ac.set_codes(channel=out_channels[0]) 

2662 bc.set_codes(channel=out_channels[1]) 

2663 

2664 projected.append(ac) 

2665 projected.append(bc) 

2666 

2667 return projected 

2668 

2669 

2670def _project3(traces, matrix, in_channels, out_channels): 

2671 assert len(in_channels) == 3 

2672 assert len(out_channels) == 3 

2673 assert matrix.shape == (3, 3) 

2674 projected = [] 

2675 for a in traces: 

2676 for b in traces: 

2677 for c in traces: 

2678 if not ((a.channel, b.channel, c.channel) == in_channels 

2679 and a.nslc_id[:3] == b.nslc_id[:3] 

2680 and b.nslc_id[:3] == c.nslc_id[:3] 

2681 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2682 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2683 

2684 continue 

2685 

2686 tmin = max(a.tmin, b.tmin, c.tmin) 

2687 tmax = min(a.tmax, b.tmax, c.tmax) 

2688 

2689 if tmin >= tmax: 

2690 continue 

2691 

2692 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2693 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2694 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2695 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2696 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2697 

2698 logger.warning( 

2699 'Cannot project traces with displaced sampling ' 

2700 '(%s, %s, %s, %s)' % a.nslc_id) 

2701 continue 

2702 

2703 acydata = num.dot( 

2704 matrix[0], 

2705 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2706 bcydata = num.dot( 

2707 matrix[1], 

2708 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2709 ccydata = num.dot( 

2710 matrix[2], 

2711 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2712 

2713 ac.set_ydata(acydata) 

2714 bc.set_ydata(bcydata) 

2715 cc.set_ydata(ccydata) 

2716 

2717 ac.set_codes(channel=out_channels[0]) 

2718 bc.set_codes(channel=out_channels[1]) 

2719 cc.set_codes(channel=out_channels[2]) 

2720 

2721 projected.append(ac) 

2722 projected.append(bc) 

2723 projected.append(cc) 

2724 

2725 return projected 

2726 

2727 

2728def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2729 ''' 

2730 Cross correlation of two traces. 

2731 

2732 :param a,b: input traces 

2733 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2734 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2735 :param use_fft: bool, whether to do cross correlation in spectral domain 

2736 

2737 :returns: trace containing cross correlation coefficients 

2738 

2739 This function computes the cross correlation between two traces. It 

2740 evaluates the discrete equivalent of 

2741 

2742 .. math:: 

2743 

2744 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2745 

2746 where the star denotes complex conjugate. Note, that the arguments here are 

2747 swapped when compared with the :py:func:`numpy.correlate` function, 

2748 which is internally called. This function should be safe even with older 

2749 versions of NumPy, where the correlate function has some problems. 

2750 

2751 A trace containing the cross correlation coefficients is returned. The time 

2752 information of the output trace is set so that the returned cross 

2753 correlation can be viewed directly as a function of time lag. 

2754 

2755 Example:: 

2756 

2757 # align two traces a and b containing a time shifted similar signal: 

2758 c = pyrocko.trace.correlate(a,b) 

2759 t, coef = c.max() # get time and value of maximum 

2760 b.shift(-t) # align b with a 

2761 

2762 ''' 

2763 

2764 assert_same_sampling_rate(a, b) 

2765 

2766 ya, yb = a.ydata, b.ydata 

2767 

2768 # need reversed order here: 

2769 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2770 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2771 

2772 if normalization == 'normal': 

2773 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2774 yc = yc/normfac 

2775 

2776 elif normalization == 'gliding': 

2777 if mode != 'valid': 

2778 assert False, 'gliding normalization currently only available ' \ 

2779 'with "valid" mode.' 

2780 

2781 if ya.size < yb.size: 

2782 yshort, ylong = ya, yb 

2783 else: 

2784 yshort, ylong = yb, ya 

2785 

2786 epsilon = 0.00001 

2787 normfac_short = num.sqrt(num.sum(yshort**2)) 

2788 normfac = normfac_short * num.sqrt( 

2789 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2790 + normfac_short*epsilon 

2791 

2792 if yb.size <= ya.size: 

2793 normfac = normfac[::-1] 

2794 

2795 yc /= normfac 

2796 

2797 c = a.copy() 

2798 c.set_ydata(yc) 

2799 c.set_codes(*merge_codes(a, b, '~')) 

2800 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2801 

2802 return c 

2803 

2804 

2805def deconvolve( 

2806 a, b, waterlevel, 

2807 tshift=0., 

2808 pad=0.5, 

2809 fd_taper=None, 

2810 pad_to_pow2=True): 

2811 

2812 same_sampling_rate(a, b) 

2813 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2814 deltat = a.deltat 

2815 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2816 

2817 ndata = max(a.data_len(), b.data_len()) 

2818 ndata_pad = ndata + npad 

2819 

2820 if pad_to_pow2: 

2821 ntrans = nextpow2(ndata_pad) 

2822 else: 

2823 ntrans = ndata 

2824 

2825 aspec = num.fft.rfft(a.ydata, ntrans) 

2826 bspec = num.fft.rfft(b.ydata, ntrans) 

2827 

2828 out = aspec * num.conj(bspec) 

2829 

2830 bautocorr = bspec*num.conj(bspec) 

2831 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2832 

2833 out /= denom 

2834 df = 1/(ntrans*deltat) 

2835 

2836 if fd_taper is not None: 

2837 fd_taper(out, 0.0, df) 

2838 

2839 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2840 c = a.copy(data=False) 

2841 c.set_ydata(ydata[:ndata]) 

2842 c.set_codes(*merge_codes(a, b, '/')) 

2843 return c 

2844 

2845 

2846def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2847 assert same_sampling_rate(a, b, eps), \ 

2848 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2849 

2850 

2851def same_sampling_rate(a, b, eps=1.0e-6): 

2852 ''' 

2853 Check if two traces have the same sampling rate. 

2854 

2855 :param a,b: input traces 

2856 :param eps: relative tolerance 

2857 ''' 

2858 

2859 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2860 

2861 

2862def fix_deltat_rounding_errors(deltat): 

2863 ''' 

2864 Try to undo sampling rate rounding errors. 

2865 

2866 Fix rounding errors of sampling intervals when these are read from single 

2867 precision floating point values. 

2868 

2869 Assumes that the true sampling rate or sampling interval was an integer 

2870 value. No correction will be applied if this would change the sampling 

2871 rate by more than 0.001%. 

2872 ''' 

2873 

2874 if deltat <= 1.0: 

2875 deltat_new = 1.0 / round(1.0 / deltat) 

2876 else: 

2877 deltat_new = round(deltat) 

2878 

2879 if abs(deltat_new - deltat) / deltat > 1e-5: 

2880 deltat_new = deltat 

2881 

2882 return deltat_new 

2883 

2884 

2885def merge_codes(a, b, sep='-'): 

2886 ''' 

2887 Merge network-station-location-channel codes of a pair of traces. 

2888 ''' 

2889 

2890 o = [] 

2891 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2892 if xa == xb: 

2893 o.append(xa) 

2894 else: 

2895 o.append(sep.join((xa, xb))) 

2896 return o 

2897 

2898 

2899class Taper(Object): 

2900 ''' 

2901 Base class for tapers. 

2902 

2903 Does nothing by default. 

2904 ''' 

2905 

2906 def __call__(self, y, x0, dx): 

2907 pass 

2908 

2909 

2910class CosTaper(Taper): 

2911 ''' 

2912 Cosine Taper. 

2913 

2914 :param a: start of fading in 

2915 :param b: end of fading in 

2916 :param c: start of fading out 

2917 :param d: end of fading out 

2918 ''' 

2919 

2920 a = Float.T() 

2921 b = Float.T() 

2922 c = Float.T() 

2923 d = Float.T() 

2924 

2925 def __init__(self, a, b, c, d): 

2926 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2927 

2928 def __call__(self, y, x0, dx): 

2929 

2930 if y.dtype == num.dtype(float): 

2931 _apply_costaper = signal_ext.apply_costaper 

2932 else: 

2933 _apply_costaper = apply_costaper 

2934 

2935 _apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2936 

2937 def span(self, y, x0, dx): 

2938 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2939 

2940 def time_span(self): 

2941 return self.a, self.d 

2942 

2943 

2944class CosFader(Taper): 

2945 ''' 

2946 Cosine Fader. 

2947 

2948 :param xfade: fade in and fade out time in seconds (optional) 

2949 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2950 

2951 Only one argument can be set. The other should to be ``None``. 

2952 ''' 

2953 

2954 xfade = Float.T(optional=True) 

2955 xfrac = Float.T(optional=True) 

2956 

2957 def __init__(self, xfade=None, xfrac=None): 

2958 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2959 assert (xfade is None) != (xfrac is None) 

2960 self._xfade = xfade 

2961 self._xfrac = xfrac 

2962 

2963 def __call__(self, y, x0, dx): 

2964 

2965 xfade = self._xfade 

2966 

2967 xlen = (y.size - 1)*dx 

2968 if xfade is None: 

2969 xfade = xlen * self._xfrac 

2970 

2971 a = x0 

2972 b = x0 + xfade 

2973 c = x0 + xlen - xfade 

2974 d = x0 + xlen 

2975 

2976 apply_costaper(a, b, c, d, y, x0, dx) 

2977 

2978 def span(self, y, x0, dx): 

2979 return 0, y.size 

2980 

2981 def time_span(self): 

2982 return None, None 

2983 

2984 

2985def none_min(li): 

2986 if None in li: 

2987 return None 

2988 else: 

2989 return min(x for x in li if x is not None) 

2990 

2991 

2992def none_max(li): 

2993 if None in li: 

2994 return None 

2995 else: 

2996 return max(x for x in li if x is not None) 

2997 

2998 

2999class MultiplyTaper(Taper): 

3000 ''' 

3001 Multiplication of several tapers. 

3002 ''' 

3003 

3004 tapers = List.T(Taper.T()) 

3005 

3006 def __init__(self, tapers=None): 

3007 if tapers is None: 

3008 tapers = [] 

3009 

3010 Taper.__init__(self, tapers=tapers) 

3011 

3012 def __call__(self, y, x0, dx): 

3013 for taper in self.tapers: 

3014 taper(y, x0, dx) 

3015 

3016 def span(self, y, x0, dx): 

3017 spans = [] 

3018 for taper in self.tapers: 

3019 spans.append(taper.span(y, x0, dx)) 

3020 

3021 mins, maxs = list(zip(*spans)) 

3022 return min(mins), max(maxs) 

3023 

3024 def time_span(self): 

3025 spans = [] 

3026 for taper in self.tapers: 

3027 spans.append(taper.time_span()) 

3028 

3029 mins, maxs = list(zip(*spans)) 

3030 return none_min(mins), none_max(maxs) 

3031 

3032 

3033class GaussTaper(Taper): 

3034 ''' 

3035 Frequency domain Gaussian filter. 

3036 ''' 

3037 

3038 alpha = Float.T() 

3039 

3040 def __init__(self, alpha): 

3041 Taper.__init__(self, alpha=alpha) 

3042 self._alpha = alpha 

3043 

3044 def __call__(self, y, x0, dx): 

3045 f = x0 + num.arange(y.size)*dx 

3046 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

3047 

3048 

3049cached_coefficients = {} 

3050 

3051 

3052def _get_cached_filter_coeffs(order, corners, btype): 

3053 ck = (order, tuple(corners), btype) 

3054 if ck not in cached_coefficients: 

3055 if len(corners) == 0: 

3056 cached_coefficients[ck] = signal.butter( 

3057 order, corners[0], btype=btype) 

3058 else: 

3059 cached_coefficients[ck] = signal.butter( 

3060 order, corners, btype=btype) 

3061 

3062 return cached_coefficients[ck] 

3063 

3064 

3065class _globals(object): 

3066 _numpy_has_correlate_flip_bug = None 

3067 

3068 

3069def _default_key(tr): 

3070 return (tr.network, tr.station, tr.location, tr.channel) 

3071 

3072 

3073def numpy_has_correlate_flip_bug(): 

3074 ''' 

3075 Check if NumPy's correlate function reveals old behaviour. 

3076 ''' 

3077 

3078 if _globals._numpy_has_correlate_flip_bug is None: 

3079 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

3080 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

3081 ab = num.correlate(a, b, mode='same') 

3082 ba = num.correlate(b, a, mode='same') 

3083 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

3084 

3085 return _globals._numpy_has_correlate_flip_bug 

3086 

3087 

3088def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

3089 ''' 

3090 Call :py:func:`numpy.correlate` with fixes. 

3091 

3092 c[k] = sum_i a[i+k] * conj(b[i]) 

3093 

3094 Note that the result produced by newer numpy.correlate is always flipped 

3095 with respect to the formula given in its documentation (if ascending k 

3096 assumed for the output). 

3097 ''' 

3098 

3099 if use_fft: 

3100 if a.size < b.size: 

3101 c = signal.fftconvolve(b[::-1], a, mode=mode) 

3102 else: 

3103 c = signal.fftconvolve(a, b[::-1], mode=mode) 

3104 return c 

3105 

3106 else: 

3107 buggy = numpy_has_correlate_flip_bug() 

3108 

3109 a = num.asarray(a) 

3110 b = num.asarray(b) 

3111 

3112 if buggy: 

3113 b = num.conj(b) 

3114 

3115 c = num.correlate(a, b, mode=mode) 

3116 

3117 if buggy and a.size < b.size: 

3118 return c[::-1] 

3119 else: 

3120 return c 

3121 

3122 

3123def numpy_correlate_emulate(a, b, mode='valid'): 

3124 ''' 

3125 Slow version of :py:func:`numpy.correlate` for comparison. 

3126 ''' 

3127 

3128 a = num.asarray(a) 

3129 b = num.asarray(b) 

3130 kmin = -(b.size-1) 

3131 klen = a.size-kmin 

3132 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3133 kmin = int(kmin) 

3134 kmax = int(kmax) 

3135 klen = kmax - kmin + 1 

3136 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3137 for k in range(kmin, kmin+klen): 

3138 imin = max(0, -k) 

3139 ilen = min(b.size, a.size-k) - imin 

3140 c[k-kmin] = num.sum( 

3141 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3142 

3143 return c 

3144 

3145 

3146def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3147 ''' 

3148 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3149 ''' 

3150 

3151 a = num.asarray(a) 

3152 b = num.asarray(b) 

3153 

3154 kmin = -(b.size-1) 

3155 if mode == 'full': 

3156 klen = a.size-kmin 

3157 elif mode == 'same': 

3158 klen = max(a.size, b.size) 

3159 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3160 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3161 elif mode == 'valid': 

3162 klen = abs(a.size - b.size) + 1 

3163 kmin += min(a.size, b.size) - 1 

3164 

3165 return kmin, kmin + klen - 1 

3166 

3167 

3168def autocorr(x, nshifts): 

3169 ''' 

3170 Compute biased estimate of the first autocorrelation coefficients. 

3171 

3172 :param x: input array 

3173 :param nshifts: number of coefficients to calculate 

3174 ''' 

3175 

3176 mean = num.mean(x) 

3177 std = num.std(x) 

3178 n = x.size 

3179 xdm = x - mean 

3180 r = num.zeros(nshifts) 

3181 for k in range(nshifts): 

3182 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3183 

3184 return r 

3185 

3186 

3187def yulewalker(x, order): 

3188 ''' 

3189 Compute autoregression coefficients using Yule-Walker method. 

3190 

3191 :param x: input array 

3192 :param order: number of coefficients to produce 

3193 

3194 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3195 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3196 recursion which is normally used. 

3197 ''' 

3198 

3199 gamma = autocorr(x, order+1) 

3200 d = gamma[1:1+order] 

3201 a = num.zeros((order, order)) 

3202 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3203 for i in range(order): 

3204 ioff = order-i 

3205 a[i, :] = gamma2[ioff:ioff+order] 

3206 

3207 return num.dot(num.linalg.inv(a), -d) 

3208 

3209 

3210def moving_avg(x, n): 

3211 n = int(n) 

3212 cx = x.cumsum() 

3213 nn = len(x) 

3214 y = num.zeros(nn, dtype=cx.dtype) 

3215 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3216 y[:n//2] = y[n//2] 

3217 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3218 return y 

3219 

3220 

3221def moving_sum(x, n, mode='valid'): 

3222 n = int(n) 

3223 cx = x.cumsum() 

3224 nn = len(x) 

3225 

3226 if mode == 'valid': 

3227 if nn-n+1 <= 0: 

3228 return num.zeros(0, dtype=cx.dtype) 

3229 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3230 y[0] = cx[n-1] 

3231 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3232 

3233 if mode == 'full': 

3234 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3235 if n <= nn: 

3236 y[0:n] = cx[0:n] 

3237 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3238 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3239 else: 

3240 y[0:nn] = cx[0:nn] 

3241 y[nn:n] = cx[nn-1] 

3242 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3243 

3244 if mode == 'same': 

3245 n1 = (n-1)//2 

3246 y = num.zeros(nn, dtype=cx.dtype) 

3247 if n <= nn: 

3248 y[0:n-n1] = cx[n1:n] 

3249 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3250 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3251 else: 

3252 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3253 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3254 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3255 

3256 return y 

3257 

3258 

3259def nextpow2(i): 

3260 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3261 

3262 

3263def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3264 def snap(x): 

3265 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3266 return snap 

3267 

3268 

3269def snapper(nmax, delta, snapfun=math.ceil): 

3270 def snap(x): 

3271 return max(0, min(int(snapfun(x/delta)), nmax)) 

3272 return snap 

3273 

3274 

3275def apply_costaper(a, b, c, d, y, x0, dx): 

3276 abcd = num.array((a, b, c, d), dtype=float) 

3277 ja, jb, jc, jd = num.clip(num.ceil((abcd-x0)/dx).astype(int), 0, y.size) 

3278 y[:ja] = 0. 

3279 y[ja:jb] *= 0.5 \ 

3280 - 0.5*num.cos((dx*num.arange(ja, jb)-(a-x0))/(b-a)*num.pi) 

3281 y[jc:jd] *= 0.5 \ 

3282 + 0.5*num.cos((dx*num.arange(jc, jd)-(c-x0))/(d-c)*num.pi) 

3283 y[jd:] = 0. 

3284 

3285 

3286def span_costaper(a, b, c, d, y, x0, dx): 

3287 hi = snapper_w_offset(y.size, x0, dx) 

3288 return hi(a), hi(d) - hi(a) 

3289 

3290 

3291def costaper(a, b, c, d, nfreqs, deltaf): 

3292 hi = snapper(nfreqs, deltaf) 

3293 tap = num.zeros(nfreqs) 

3294 tap[hi(a):hi(b)] = 0.5 \ 

3295 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3296 tap[hi(b):hi(c)] = 1. 

3297 tap[hi(c):hi(d)] = 0.5 \ 

3298 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3299 

3300 return tap 

3301 

3302 

3303def t2ind(t, tdelta, snap=round): 

3304 return int(snap(t/tdelta)) 

3305 

3306 

3307def hilbert(x, N=None): 

3308 ''' 

3309 Return the hilbert transform of x of length N. 

3310 

3311 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3312 ''' 

3313 

3314 x = num.asarray(x) 

3315 if N is None: 

3316 N = len(x) 

3317 if N <= 0: 

3318 raise ValueError('N must be positive.') 

3319 if num.iscomplexobj(x): 

3320 logger.warning('imaginary part of x ignored.') 

3321 x = num.real(x) 

3322 

3323 Xf = num.fft.fft(x, N, axis=0) 

3324 h = num.zeros(N) 

3325 if N % 2 == 0: 

3326 h[0] = h[N//2] = 1 

3327 h[1:N//2] = 2 

3328 else: 

3329 h[0] = 1 

3330 h[1:(N+1)//2] = 2 

3331 

3332 if len(x.shape) > 1: 

3333 h = h[:, num.newaxis] 

3334 x = num.fft.ifft(Xf*h) 

3335 return x 

3336 

3337 

3338def near(a, b, eps): 

3339 return abs(a-b) < eps 

3340 

3341 

3342def coroutine(func): 

3343 def wrapper(*args, **kwargs): 

3344 gen = func(*args, **kwargs) 

3345 next(gen) 

3346 return gen 

3347 

3348 wrapper.__name__ = func.__name__ 

3349 wrapper.__dict__ = func.__dict__ 

3350 wrapper.__doc__ = func.__doc__ 

3351 return wrapper 

3352 

3353 

3354class States(object): 

3355 ''' 

3356 Utility to store channel-specific state in coroutines. 

3357 ''' 

3358 

3359 def __init__(self): 

3360 self._states = {} 

3361 

3362 def get(self, tr): 

3363 k = tr.nslc_id 

3364 if k in self._states: 

3365 tmin, deltat, dtype, value = self._states[k] 

3366 if (near(tmin, tr.tmin, deltat/100.) 

3367 and near(deltat, tr.deltat, deltat/10000.) 

3368 and dtype == tr.ydata.dtype): 

3369 

3370 return value 

3371 

3372 return None 

3373 

3374 def set(self, tr, value): 

3375 k = tr.nslc_id 

3376 if k in self._states and self._states[k][-1] is not value: 

3377 self.free(self._states[k][-1]) 

3378 

3379 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3380 

3381 def free(self, value): 

3382 pass 

3383 

3384 

3385@coroutine 

3386def co_list_append(list): 

3387 while True: 

3388 list.append((yield)) 

3389 

3390 

3391class ScipyBug(Exception): 

3392 pass 

3393 

3394 

3395@coroutine 

3396def co_lfilter(target, b, a): 

3397 ''' 

3398 Successively filter broken continuous trace data (coroutine). 

3399 

3400 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3401 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3402 objects containing the filtered data to target. This is useful, if one 

3403 wants to filter a long continuous time series, which is split into many 

3404 successive traces without producing filter artifacts at trace boundaries. 

3405 

3406 Filter states are kept *per channel*, specifically, for each (network, 

3407 station, location, channel) combination occuring in the input traces, a 

3408 separate state is created and maintained. This makes it possible to filter 

3409 multichannel or multistation data with only one :py:func:`co_lfilter` 

3410 instance. 

3411 

3412 Filter state is reset, when gaps occur. 

3413 

3414 Use it like this:: 

3415 

3416 from pyrocko.trace import co_lfilter, co_list_append 

3417 

3418 filtered_traces = [] 

3419 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3420 for trace in traces: 

3421 pipe.send(trace) 

3422 

3423 pipe.close() 

3424 

3425 ''' 

3426 

3427 try: 

3428 states = States() 

3429 output = None 

3430 while True: 

3431 input = (yield) 

3432 

3433 zi = states.get(input) 

3434 if zi is None: 

3435 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3436 

3437 output = input.copy(data=False) 

3438 try: 

3439 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3440 except ValueError: 

3441 raise ScipyBug( 

3442 'signal.lfilter failed: could be related to a bug ' 

3443 'in some older scipy versions, e.g. on opensuse42.1') 

3444 

3445 output.set_ydata(ydata) 

3446 states.set(input, zf) 

3447 target.send(output) 

3448 

3449 except GeneratorExit: 

3450 target.close() 

3451 

3452 

3453def co_antialias(target, q, n=None, ftype='fir'): 

3454 b, a, n = util.decimate_coeffs(q, n, ftype) 

3455 anti = co_lfilter(target, b, a) 

3456 return anti 

3457 

3458 

3459@coroutine 

3460def co_dropsamples(target, q, nfir): 

3461 try: 

3462 states = States() 

3463 while True: 

3464 tr = (yield) 

3465 newdeltat = q * tr.deltat 

3466 ioffset = states.get(tr) 

3467 if ioffset is None: 

3468 # for fir filter, the first nfir samples are pulluted by 

3469 # boundary effects; cut it off. 

3470 # for iir this may be (much) more, we do not correct for that. 

3471 # put sample instances to a time which is a multiple of the 

3472 # new sampling interval. 

3473 newtmin_want = math.ceil( 

3474 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3475 - (nfir/2*tr.deltat) 

3476 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3477 if ioffset < 0: 

3478 ioffset = ioffset % q 

3479 

3480 newtmin_have = tr.tmin + ioffset * tr.deltat 

3481 newtr = tr.copy(data=False) 

3482 newtr.deltat = newdeltat 

3483 # because the fir kernel shifts data by nfir/2 samples: 

3484 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3485 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3486 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3487 target.send(newtr) 

3488 

3489 except GeneratorExit: 

3490 target.close() 

3491 

3492 

3493def co_downsample(target, q, n=None, ftype='fir'): 

3494 ''' 

3495 Successively downsample broken continuous trace data (coroutine). 

3496 

3497 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3498 data and sends new :py:class:`Trace` objects containing the downsampled 

3499 data to target. This is useful, if one wants to downsample a long 

3500 continuous time series, which is split into many successive traces without 

3501 producing filter artifacts and gaps at trace boundaries. 

3502 

3503 Filter states are kept *per channel*, specifically, for each (network, 

3504 station, location, channel) combination occuring in the input traces, a 

3505 separate state is created and maintained. This makes it possible to filter 

3506 multichannel or multistation data with only one :py:func:`co_lfilter` 

3507 instance. 

3508 

3509 Filter state is reset, when gaps occur. The sampling instances are choosen 

3510 so that they occur at (or as close as possible) to even multiples of the 

3511 sampling interval of the downsampled trace (based on system time). 

3512 ''' 

3513 

3514 b, a, n = util.decimate_coeffs(q, n, ftype) 

3515 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3516 

3517 

3518@coroutine 

3519def co_downsample_to(target, deltat): 

3520 

3521 decimators = {} 

3522 try: 

3523 while True: 

3524 tr = (yield) 

3525 ratio = deltat / tr.deltat 

3526 rratio = round(ratio) 

3527 if abs(rratio - ratio)/ratio > 0.0001: 

3528 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3529 

3530 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3531 if deci_seq not in decimators: 

3532 pipe = target 

3533 for q in deci_seq[::-1]: 

3534 pipe = co_downsample(pipe, q) 

3535 

3536 decimators[deci_seq] = pipe 

3537 

3538 decimators[deci_seq].send(tr) 

3539 

3540 except GeneratorExit: 

3541 for g in decimators.values(): 

3542 g.close() 

3543 

3544 

3545class DomainChoice(StringChoice): 

3546 choices = [ 

3547 'time_domain', 

3548 'frequency_domain', 

3549 'envelope', 

3550 'absolute', 

3551 'cc_max_norm'] 

3552 

3553 

3554class MisfitSetup(Object): 

3555 ''' 

3556 Contains misfit setup to be used in :py:func:`trace.misfit` 

3557 

3558 :param description: Description of the setup 

3559 :param norm: L-norm classifier 

3560 :param taper: Object of :py:class:`Taper` 

3561 :param filter: Object of :py:class:`FrequencyResponse` 

3562 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3563 'cc_max_norm'] 

3564 

3565 Can be dumped to a yaml file. 

3566 ''' 

3567 

3568 xmltagname = 'misfitsetup' 

3569 description = String.T(optional=True) 

3570 norm = Int.T(optional=False) 

3571 taper = Taper.T(optional=False) 

3572 filter = FrequencyResponse.T(optional=True) 

3573 domain = DomainChoice.T(default='time_domain') 

3574 

3575 

3576def equalize_sampling_rates(trace_1, trace_2): 

3577 ''' 

3578 Equalize sampling rates of two traces (reduce higher sampling rate to 

3579 lower). 

3580 

3581 :param trace_1: :py:class:`Trace` object 

3582 :param trace_2: :py:class:`Trace` object 

3583 

3584 Returns a copy of the resampled trace if resampling is needed. 

3585 ''' 

3586 

3587 if same_sampling_rate(trace_1, trace_2): 

3588 return trace_1, trace_2 

3589 

3590 if trace_1.deltat < trace_2.deltat: 

3591 t1_out = trace_1.copy() 

3592 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3593 logger.debug('Trace downsampled (return copy of trace): %s' 

3594 % '.'.join(t1_out.nslc_id)) 

3595 return t1_out, trace_2 

3596 

3597 elif trace_1.deltat > trace_2.deltat: 

3598 t2_out = trace_2.copy() 

3599 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3600 logger.debug('Trace downsampled (return copy of trace): %s' 

3601 % '.'.join(t2_out.nslc_id)) 

3602 return trace_1, t2_out 

3603 

3604 

3605def Lx_norm(u, v, norm=2): 

3606 ''' 

3607 Calculate the misfit denominator *m* and the normalization devisor *n* 

3608 according to norm. 

3609 

3610 The normalization divisor *n* is calculated from ``v``. 

3611 

3612 :param u: :py:class:`numpy.array` 

3613 :param v: :py:class:`numpy.array` 

3614 :param norm: (default = 2) 

3615 

3616 ``u`` and ``v`` must be of same size. 

3617 ''' 

3618 

3619 if norm == 1: 

3620 return ( 

3621 num.sum(num.abs(v-u)), 

3622 num.sum(num.abs(v))) 

3623 

3624 elif norm == 2: 

3625 return ( 

3626 num.sqrt(num.sum((v-u)**2)), 

3627 num.sqrt(num.sum(v**2))) 

3628 

3629 else: 

3630 return ( 

3631 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3632 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3633 

3634 

3635def do_downsample(tr, deltat): 

3636 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3637 tr = tr.copy() 

3638 tr.downsample_to(deltat, snap=True, demean=False) 

3639 else: 

3640 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3641 tr = tr.copy() 

3642 tr.snap() 

3643 return tr 

3644 

3645 

3646def do_extend(tr, tmin, tmax): 

3647 if tmin < tr.tmin or tmax > tr.tmax: 

3648 tr = tr.copy() 

3649 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3650 

3651 return tr 

3652 

3653 

3654def do_pre_taper(tr, taper): 

3655 return tr.taper(taper, inplace=False, chop=True) 

3656 

3657 

3658def do_fft(tr, filter): 

3659 if filter is None: 

3660 return tr 

3661 else: 

3662 ndata = tr.ydata.size 

3663 nfft = nextpow2(ndata) 

3664 padded = num.zeros(nfft, dtype=float) 

3665 padded[:ndata] = tr.ydata 

3666 spectrum = num.fft.rfft(padded) 

3667 df = 1.0 / (tr.deltat * nfft) 

3668 frequencies = num.arange(spectrum.size)*df 

3669 return [tr, frequencies, spectrum] 

3670 

3671 

3672def do_filter(inp, filter): 

3673 if filter is None: 

3674 return inp 

3675 else: 

3676 tr, frequencies, spectrum = inp 

3677 spectrum *= filter.evaluate(frequencies) 

3678 return [tr, frequencies, spectrum] 

3679 

3680 

3681def do_ifft(inp): 

3682 if isinstance(inp, Trace): 

3683 return inp 

3684 else: 

3685 tr, _, spectrum = inp 

3686 ndata = tr.ydata.size 

3687 tr = tr.copy(data=False) 

3688 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3689 return tr 

3690 

3691 

3692def check_alignment(t1, t2): 

3693 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3694 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3695 t1.ydata.shape != t2.ydata.shape: 

3696 raise MisalignedTraces( 

3697 'Cannot calculate misfit of %s and %s due to misaligned ' 

3698 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))