1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6This module provides basic signal processing for seismic traces. 

7''' 

8 

9from __future__ import division, absolute_import 

10 

11import time 

12import math 

13import copy 

14import logging 

15import hashlib 

16from collections import defaultdict 

17 

18import numpy as num 

19from scipy import signal 

20 

21from pyrocko import util, orthodrome, pchain, model 

22from pyrocko.util import reuse 

23from pyrocko.guts import Object, Float, Int, String, List, \ 

24 StringChoice, Timestamp 

25from pyrocko.guts_array import Array 

26from pyrocko.model import squirrel_content 

27 

28# backward compatibility 

29from pyrocko.util import UnavailableDecimation # noqa 

30from pyrocko.response import ( # noqa 

31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

32 ButterworthResponse, SampledResponse, IntegrationResponse, 

33 DifferentiationResponse, MultiplyResponse) 

34 

35 

36guts_prefix = 'pf' 

37 

38logger = logging.getLogger('pyrocko.trace') 

39 

40 

41@squirrel_content 

42class Trace(Object): 

43 

44 ''' 

45 Create new trace object. 

46 

47 A ``Trace`` object represents a single continuous strip of evenly sampled 

48 time series data. It is built from a 1D NumPy array containing the data 

49 samples and some attributes describing its beginning and ending time, its 

50 sampling rate and four string identifiers (its network, station, location 

51 and channel code). 

52 

53 :param network: network code 

54 :param station: station code 

55 :param location: location code 

56 :param channel: channel code 

57 :param extra: extra code 

58 :param tmin: system time of first sample in [s] 

59 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

60 computed from length of ``ydata``) 

61 :param deltat: sampling interval in [s] 

62 :param ydata: 1D numpy array with data samples (can be ``None`` when 

63 ``tmax`` is not ``None``) 

64 :param mtime: optional modification time 

65 

66 :param meta: additional meta information (not used, but maintained by the 

67 library) 

68 

69 The length of the network, station, location and channel codes is not 

70 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

71 have different limits on the lengths of these codes. The codes set here are 

72 silently truncated when the trace is stored 

73 ''' 

74 

75 network = String.T(default='', help='Deployment/network code (1-8)') 

76 station = String.T(default='STA', help='Station code (1-5)') 

77 location = String.T(default='', help='Location code (0-2)') 

78 channel = String.T(default='', help='Channel code (3)') 

79 extra = String.T(default='', help='Extra/custom code') 

80 

81 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

82 tmax = Timestamp.T() 

83 

84 deltat = Float.T(default=1.0) 

85 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

86 

87 mtime = Timestamp.T(optional=True) 

88 

89 cached_frequencies = {} 

90 

91 def __init__(self, network='', station='STA', location='', channel='', 

92 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

93 meta=None, extra=''): 

94 

95 Object.__init__(self, init_props=False) 

96 

97 time_float = util.get_time_float() 

98 

99 if not isinstance(tmin, time_float): 

100 tmin = Trace.tmin.regularize_extra(tmin) 

101 

102 if tmax is not None and not isinstance(tmax, time_float): 

103 tmax = Trace.tmax.regularize_extra(tmax) 

104 

105 if mtime is not None and not isinstance(mtime, time_float): 

106 mtime = Trace.mtime.regularize_extra(mtime) 

107 

108 self._growbuffer = None 

109 

110 tmin = time_float(tmin) 

111 if tmax is not None: 

112 tmax = time_float(tmax) 

113 

114 if mtime is None: 

115 mtime = time_float(time.time()) 

116 

117 self.network, self.station, self.location, self.channel, \ 

118 self.extra = [ 

119 reuse(x) for x in ( 

120 network, station, location, channel, extra)] 

121 

122 self.tmin = tmin 

123 self.deltat = deltat 

124 

125 if tmax is None: 

126 if ydata is not None: 

127 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

128 else: 

129 raise Exception( 

130 'fixme: trace must be created with tmax or ydata') 

131 else: 

132 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

133 self.tmax = self.tmin + (n - 1) * self.deltat 

134 

135 self.meta = meta 

136 self.ydata = ydata 

137 self.mtime = mtime 

138 self._update_ids() 

139 self.file = None 

140 self._pchain = None 

141 

142 def __str__(self): 

143 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

144 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

145 s += ' timerange: %s - %s\n' % ( 

146 util.time_to_str(self.tmin, format=fmt), 

147 util.time_to_str(self.tmax, format=fmt)) 

148 

149 s += ' delta t: %g\n' % self.deltat 

150 if self.meta: 

151 for k in sorted(self.meta.keys()): 

152 s += ' %s: %s\n' % (k, self.meta[k]) 

153 return s 

154 

155 @property 

156 def codes(self): 

157 from pyrocko.squirrel import model 

158 return model.CodesNSLCE( 

159 self.network, self.station, self.location, self.channel, 

160 self.extra) 

161 

162 @property 

163 def time_span(self): 

164 return self.tmin, self.tmax 

165 

166 @property 

167 def summary(self): 

168 return '%s %-16s %s %g' % ( 

169 self.__class__.__name__, self.str_codes, self.str_time_span, 

170 self.deltat) 

171 

172 def __getstate__(self): 

173 return (self.network, self.station, self.location, self.channel, 

174 self.tmin, self.tmax, self.deltat, self.mtime, 

175 self.ydata, self.meta, self.extra) 

176 

177 def __setstate__(self, state): 

178 if len(state) == 11: 

179 self.network, self.station, self.location, self.channel, \ 

180 self.tmin, self.tmax, self.deltat, self.mtime, \ 

181 self.ydata, self.meta, self.extra = state 

182 

183 elif len(state) == 12: 

184 # backward compatibility 0 

185 self.network, self.station, self.location, self.channel, \ 

186 self.tmin, self.tmax, self.deltat, self.mtime, \ 

187 self.ydata, self.meta, _, self.extra = state 

188 

189 elif len(state) == 10: 

190 # backward compatibility 1 

191 self.network, self.station, self.location, self.channel, \ 

192 self.tmin, self.tmax, self.deltat, self.mtime, \ 

193 self.ydata, self.meta = state 

194 

195 self.extra = '' 

196 

197 else: 

198 # backward compatibility 2 

199 self.network, self.station, self.location, self.channel, \ 

200 self.tmin, self.tmax, self.deltat, self.mtime = state 

201 self.ydata = None 

202 self.meta = None 

203 self.extra = '' 

204 

205 self._growbuffer = None 

206 self._update_ids() 

207 

208 def hash(self, unsafe=False): 

209 sha1 = hashlib.sha1() 

210 for code in self.nslc_id: 

211 sha1.update(code.encode()) 

212 sha1.update(self.tmin.hex().encode()) 

213 sha1.update(self.tmax.hex().encode()) 

214 sha1.update(self.deltat.hex().encode()) 

215 

216 if self.ydata is not None: 

217 if not unsafe: 

218 sha1.update(memoryview(self.ydata)) 

219 else: 

220 sha1.update(memoryview(self.ydata[:32])) 

221 sha1.update(memoryview(self.ydata[-32:])) 

222 

223 return sha1.hexdigest() 

224 

225 def name(self): 

226 ''' 

227 Get a short string description. 

228 ''' 

229 

230 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

231 util.time_to_str(self.tmin), 

232 util.time_to_str(self.tmax))) 

233 

234 return s 

235 

236 def __eq__(self, other): 

237 return ( 

238 isinstance(other, Trace) 

239 and self.network == other.network 

240 and self.station == other.station 

241 and self.location == other.location 

242 and self.channel == other.channel 

243 and (abs(self.deltat - other.deltat) 

244 < (self.deltat + other.deltat)*1e-6) 

245 and abs(self.tmin-other.tmin) < self.deltat*0.01 

246 and abs(self.tmax-other.tmax) < self.deltat*0.01 

247 and num.all(self.ydata == other.ydata)) 

248 

249 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

250 return ( 

251 self.network == other.network 

252 and self.station == other.station 

253 and self.location == other.location 

254 and self.channel == other.channel 

255 and (abs(self.deltat - other.deltat) 

256 < (self.deltat + other.deltat)*1e-6) 

257 and abs(self.tmin-other.tmin) < self.deltat*0.01 

258 and abs(self.tmax-other.tmax) < self.deltat*0.01 

259 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

260 

261 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

262 

263 assert self.network == other.network, \ 

264 'network codes differ: %s, %s' % (self.network, other.network) 

265 assert self.station == other.station, \ 

266 'station codes differ: %s, %s' % (self.station, other.station) 

267 assert self.location == other.location, \ 

268 'location codes differ: %s, %s' % (self.location, other.location) 

269 assert self.channel == other.channel, 'channel codes differ' 

270 assert (abs(self.deltat - other.deltat) 

271 < (self.deltat + other.deltat)*1e-6), \ 

272 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

273 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

274 'start times differ: %s, %s' % ( 

275 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

276 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

277 'end times differ: %s, %s' % ( 

278 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

279 

280 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

281 'trace values differ' 

282 

283 def __hash__(self): 

284 return id(self) 

285 

286 def __call__(self, t, clip=False, snap=round): 

287 it = int(snap((t-self.tmin)/self.deltat)) 

288 if clip: 

289 it = max(0, min(it, self.ydata.size-1)) 

290 else: 

291 if it < 0 or self.ydata.size <= it: 

292 raise IndexError() 

293 

294 return self.tmin+it*self.deltat, self.ydata[it] 

295 

296 def interpolate(self, t, clip=False): 

297 ''' 

298 Value of trace between supporting points through linear interpolation. 

299 

300 :param t: time instant 

301 :param clip: whether to clip indices to trace ends 

302 ''' 

303 

304 t0, y0 = self(t, clip=clip, snap=math.floor) 

305 t1, y1 = self(t, clip=clip, snap=math.ceil) 

306 if t0 == t1: 

307 return y0 

308 else: 

309 return y0+(t-t0)/(t1-t0)*(y1-y0) 

310 

311 def index_clip(self, i): 

312 ''' 

313 Clip index to valid range. 

314 ''' 

315 

316 return min(max(0, i), self.ydata.size) 

317 

318 def add(self, other, interpolate=True, left=0., right=0.): 

319 ''' 

320 Add values of other trace (self += other). 

321 

322 Add values of ``other`` trace to the values of ``self``, where it 

323 intersects with ``other``. This method does not change the extent of 

324 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

325 ``other`` to be added are interpolated at sampling instants of 

326 ``self``. Linear interpolation is performed. In this case the sampling 

327 rate of ``other`` must be equal to or lower than that of ``self``. If 

328 ``interpolate`` is ``False``, the sampling rates of the two traces must 

329 match. 

330 ''' 

331 

332 if interpolate: 

333 assert self.deltat <= other.deltat \ 

334 or same_sampling_rate(self, other) 

335 

336 other_xdata = other.get_xdata() 

337 xdata = self.get_xdata() 

338 self.ydata += num.interp( 

339 xdata, other_xdata, other.ydata, left=left, right=left) 

340 else: 

341 assert self.deltat == other.deltat 

342 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

343 ibeg = max(0, ioff) 

344 iend = min(self.data_len(), ioff+other.data_len()) 

345 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

346 

347 def mult(self, other, interpolate=True): 

348 ''' 

349 Muliply with values of other trace ``(self *= other)``. 

350 

351 Multiply values of ``other`` trace to the values of ``self``, where it 

352 intersects with ``other``. This method does not change the extent of 

353 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

354 ``other`` to be multiplied are interpolated at sampling instants of 

355 ``self``. Linear interpolation is performed. In this case the sampling 

356 rate of ``other`` must be equal to or lower than that of ``self``. If 

357 ``interpolate`` is ``False``, the sampling rates of the two traces must 

358 match. 

359 ''' 

360 

361 if interpolate: 

362 assert self.deltat <= other.deltat or \ 

363 same_sampling_rate(self, other) 

364 

365 other_xdata = other.get_xdata() 

366 xdata = self.get_xdata() 

367 self.ydata *= num.interp( 

368 xdata, other_xdata, other.ydata, left=0., right=0.) 

369 else: 

370 assert self.deltat == other.deltat 

371 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

372 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

373 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

374 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

375 

376 ibeg1 = self.index_clip(ibeg1) 

377 iend1 = self.index_clip(iend1) 

378 ibeg2 = self.index_clip(ibeg2) 

379 iend2 = self.index_clip(iend2) 

380 

381 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

382 

383 def max(self): 

384 ''' 

385 Get time and value of data maximum. 

386 ''' 

387 

388 i = num.argmax(self.ydata) 

389 return self.tmin + i*self.deltat, self.ydata[i] 

390 

391 def min(self): 

392 ''' 

393 Get time and value of data minimum. 

394 ''' 

395 

396 i = num.argmin(self.ydata) 

397 return self.tmin + i*self.deltat, self.ydata[i] 

398 

399 def absmax(self): 

400 ''' 

401 Get time and value of maximum of the absolute of data. 

402 ''' 

403 

404 tmi, mi = self.min() 

405 tma, ma = self.max() 

406 if abs(mi) > abs(ma): 

407 return tmi, abs(mi) 

408 else: 

409 return tma, abs(ma) 

410 

411 def set_codes( 

412 self, network=None, station=None, location=None, channel=None, 

413 extra=None): 

414 

415 ''' 

416 Set network, station, location, and channel codes. 

417 ''' 

418 

419 if network is not None: 

420 self.network = network 

421 if station is not None: 

422 self.station = station 

423 if location is not None: 

424 self.location = location 

425 if channel is not None: 

426 self.channel = channel 

427 if extra is not None: 

428 self.extra = extra 

429 

430 self._update_ids() 

431 

432 def set_network(self, network): 

433 self.network = network 

434 self._update_ids() 

435 

436 def set_station(self, station): 

437 self.station = station 

438 self._update_ids() 

439 

440 def set_location(self, location): 

441 self.location = location 

442 self._update_ids() 

443 

444 def set_channel(self, channel): 

445 self.channel = channel 

446 self._update_ids() 

447 

448 def overlaps(self, tmin, tmax): 

449 ''' 

450 Check if trace has overlap with a given time span. 

451 ''' 

452 return not (tmax < self.tmin or self.tmax < tmin) 

453 

454 def is_relevant(self, tmin, tmax, selector=None): 

455 ''' 

456 Check if trace has overlap with a given time span and matches a 

457 condition callback. (internal use) 

458 ''' 

459 

460 return not (tmax <= self.tmin or self.tmax < tmin) \ 

461 and (selector is None or selector(self)) 

462 

463 def _update_ids(self): 

464 ''' 

465 Update dependent ids. 

466 ''' 

467 

468 self.full_id = ( 

469 self.network, self.station, self.location, self.channel, self.tmin) 

470 self.nslc_id = reuse( 

471 (self.network, self.station, self.location, self.channel)) 

472 

473 def prune_from_reuse_cache(self): 

474 util.deuse(self.nslc_id) 

475 util.deuse(self.network) 

476 util.deuse(self.station) 

477 util.deuse(self.location) 

478 util.deuse(self.channel) 

479 

480 def set_mtime(self, mtime): 

481 ''' 

482 Set modification time of the trace. 

483 ''' 

484 

485 self.mtime = mtime 

486 

487 def get_xdata(self): 

488 ''' 

489 Create array for time axis. 

490 ''' 

491 

492 if self.ydata is None: 

493 raise NoData() 

494 

495 return self.tmin \ 

496 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

497 

498 def get_ydata(self): 

499 ''' 

500 Get data array. 

501 ''' 

502 

503 if self.ydata is None: 

504 raise NoData() 

505 

506 return self.ydata 

507 

508 def set_ydata(self, new_ydata): 

509 ''' 

510 Replace data array. 

511 ''' 

512 

513 self.drop_growbuffer() 

514 self.ydata = new_ydata 

515 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

516 

517 def data_len(self): 

518 if self.ydata is not None: 

519 return self.ydata.size 

520 else: 

521 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

522 

523 def drop_data(self): 

524 ''' 

525 Forget data, make dataless trace. 

526 ''' 

527 

528 self.drop_growbuffer() 

529 self.ydata = None 

530 

531 def drop_growbuffer(self): 

532 ''' 

533 Detach the traces grow buffer. 

534 ''' 

535 

536 self._growbuffer = None 

537 self._pchain = None 

538 

539 def copy(self, data=True): 

540 ''' 

541 Make a deep copy of the trace. 

542 ''' 

543 

544 tracecopy = copy.copy(self) 

545 tracecopy.drop_growbuffer() 

546 if data: 

547 tracecopy.ydata = self.ydata.copy() 

548 tracecopy.meta = copy.deepcopy(self.meta) 

549 return tracecopy 

550 

551 def crop_zeros(self): 

552 ''' 

553 Remove any zeros at beginning and end. 

554 ''' 

555 

556 indices = num.where(self.ydata != 0.0)[0] 

557 if indices.size == 0: 

558 raise NoData() 

559 

560 ibeg = indices[0] 

561 iend = indices[-1]+1 

562 if ibeg == 0 and iend == self.ydata.size-1: 

563 return 

564 

565 self.drop_growbuffer() 

566 self.ydata = self.ydata[ibeg:iend].copy() 

567 self.tmin = self.tmin+ibeg*self.deltat 

568 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

569 self._update_ids() 

570 

571 def append(self, data): 

572 ''' 

573 Append data to the end of the trace. 

574 

575 To make this method efficient when successively very few or even single 

576 samples are appended, a larger grow buffer is allocated upon first 

577 invocation. The traces data is then changed to be a view into the 

578 currently filled portion of the grow buffer array. 

579 ''' 

580 

581 assert self.ydata.dtype == data.dtype 

582 newlen = data.size + self.ydata.size 

583 if self._growbuffer is None or self._growbuffer.size < newlen: 

584 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

585 self._growbuffer[:self.ydata.size] = self.ydata 

586 self._growbuffer[self.ydata.size:newlen] = data 

587 self.ydata = self._growbuffer[:newlen] 

588 self.tmax = self.tmin + (newlen-1)*self.deltat 

589 

590 def chop( 

591 self, tmin, tmax, inplace=True, include_last=False, 

592 snap=(round, round), want_incomplete=True): 

593 

594 ''' 

595 Cut the trace to given time span. 

596 

597 If the ``inplace`` argument is True (the default) the trace is cut in 

598 place, otherwise a new trace with the cut part is returned. By 

599 default, the indices where to start and end the trace data array are 

600 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

601 using Python's :py:func:`round` function. This behaviour can be changed 

602 with the ``snap`` argument, which takes a tuple of two functions (one 

603 for the lower and one for the upper end) to be used instead of 

604 :py:func:`round`. The last sample is by default not included unless 

605 ``include_last`` is set to True. If the given time span exceeds the 

606 available time span of the trace, the available part is returned, 

607 unless ``want_incomplete`` is set to False - in that case, a 

608 :py:exc:`NoData` exception is raised. This exception is always raised, 

609 when the requested time span does dot overlap with the trace's time 

610 span. 

611 ''' 

612 

613 if want_incomplete: 

614 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

615 raise NoData() 

616 else: 

617 if tmin < self.tmin or self.tmax < tmax: 

618 raise NoData() 

619 

620 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

621 iplus = 0 

622 if include_last: 

623 iplus = 1 

624 

625 iend = min( 

626 self.data_len(), 

627 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

628 

629 if ibeg >= iend: 

630 raise NoData() 

631 

632 obj = self 

633 if not inplace: 

634 obj = self.copy(data=False) 

635 

636 self.drop_growbuffer() 

637 if self.ydata is not None: 

638 obj.ydata = self.ydata[ibeg:iend].copy() 

639 else: 

640 obj.ydata = None 

641 

642 obj.tmin = obj.tmin+ibeg*obj.deltat 

643 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

644 

645 obj._update_ids() 

646 

647 return obj 

648 

649 def downsample(self, ndecimate, snap=False, initials=None, demean=False, 

650 ftype='fir-remez'): 

651 ''' 

652 Downsample trace by a given integer factor. 

653 

654 Antialiasing filter details: 

655 

656 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum 

657 ripple of 0.05 dB in the passband. 

658 * ``fir``: A FIR filter using a Hamming window and 31 taps and a 

659 well-behaved phase response. 

660 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm 

661 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency. 

662 

663 Comparison of the digital filters: 

664 

665 .. figure :: ../../static/downsampling-filter-comparison.png 

666 :width: 60% 

667 :alt: Comparison of the downsampling filters. 

668 

669 :param ndecimate: decimation factor, avoid values larger than 8 

670 :param snap: whether to put the new sampling instances closest to 

671 multiples of the sampling rate. 

672 :param initials: ``None``, ``True``, or initial conditions for the 

673 anti-aliasing filter, obtained from a previous run. In the latter 

674 two cases the final state of the filter is returned instead of 

675 ``None``. 

676 :param demean: whether to demean the signal before filtering. 

677 :param ftype: which FIR filter to use, choose from 

678 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

679 ''' 

680 newdeltat = self.deltat*ndecimate 

681 if snap: 

682 ilag = int(round( 

683 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

684 / self.deltat)) 

685 else: 

686 ilag = 0 

687 

688 if snap and ilag > 0 and ilag < self.ydata.size: 

689 data = self.ydata.astype(num.float64) 

690 self.tmin += ilag*self.deltat 

691 else: 

692 data = self.ydata.astype(num.float64) 

693 

694 if demean: 

695 data -= num.mean(data) 

696 

697 if data.size != 0: 

698 result = util.decimate( 

699 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag) 

700 else: 

701 result = data 

702 

703 if initials is None: 

704 self.ydata, finals = result, None 

705 else: 

706 self.ydata, finals = result 

707 

708 self.deltat = reuse(self.deltat*ndecimate) 

709 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

710 self._update_ids() 

711 

712 return finals 

713 

714 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

715 initials=None, demean=False, ftype='fir-remez'): 

716 

717 ''' 

718 Downsample to given sampling rate. 

719 

720 Tries to downsample the trace to a target sampling interval of 

721 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

722 times. If ``allow_upsample_max`` is set to a value larger than 1, 

723 intermediate upsampling steps are allowed, in order to increase the 

724 number of possible downsampling ratios. 

725 

726 If the requested ratio is not supported, an exception of type 

727 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

728 

729 :param deltat: desired sampling interval in [s] 

730 :param allow_upsample_max: maximum upsampling of the signal to achieve 

731 the desired deltat. Default is ``1``. 

732 :param snap: whether to put the new sampling instances closest to 

733 multiples of the sampling rate. 

734 :param initials: ``None``, ``True``, or initial conditions for the 

735 anti-aliasing filter, obtained from a previous run. In the latter 

736 two cases the final state of the filter is returned instead of 

737 ``None``. 

738 :param demean: whether to demean the signal before filtering. 

739 :param ftype: which FIR filter to use, choose from 

740 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

741 See also: :meth:`Trace.downample` 

742 ''' 

743 

744 ratio = deltat/self.deltat 

745 rratio = round(ratio) 

746 

747 ok = False 

748 for upsratio in range(1, allow_upsample_max+1): 

749 dratio = (upsratio/self.deltat) / (1./deltat) 

750 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

751 util.decitab(int(round(dratio))): 

752 

753 ok = True 

754 break 

755 

756 if not ok: 

757 raise util.UnavailableDecimation('ratio = %g' % ratio) 

758 

759 if upsratio > 1: 

760 self.drop_growbuffer() 

761 ydata = self.ydata 

762 self.ydata = num.zeros( 

763 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

764 self.ydata[::upsratio] = ydata 

765 for i in range(1, upsratio): 

766 self.ydata[i::upsratio] = \ 

767 float(i)/upsratio * ydata[:-1] \ 

768 + float(upsratio-i)/upsratio * ydata[1:] 

769 self.deltat = self.deltat/upsratio 

770 

771 ratio = deltat/self.deltat 

772 rratio = round(ratio) 

773 

774 deci_seq = util.decitab(int(rratio)) 

775 finals = [] 

776 for i, ndecimate in enumerate(deci_seq): 

777 if ndecimate != 1: 

778 xinitials = None 

779 if initials is not None: 

780 xinitials = initials[i] 

781 finals.append(self.downsample( 

782 ndecimate, snap=snap, initials=xinitials, demean=demean, 

783 ftype=ftype)) 

784 

785 if initials is not None: 

786 return finals 

787 

788 def resample(self, deltat): 

789 ''' 

790 Resample to given sampling rate ``deltat``. 

791 

792 Resampling is performed in the frequency domain. 

793 ''' 

794 

795 ndata = self.ydata.size 

796 ntrans = nextpow2(ndata) 

797 fntrans2 = ntrans * self.deltat/deltat 

798 ntrans2 = int(round(fntrans2)) 

799 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

800 ndata2 = int(round(ndata*self.deltat/deltat2)) 

801 if abs(fntrans2 - ntrans2) > 1e-7: 

802 logger.warning( 

803 'resample: requested deltat %g could not be matched exactly: ' 

804 '%g' % (deltat, deltat2)) 

805 

806 data = self.ydata 

807 data_pad = num.zeros(ntrans, dtype=float) 

808 data_pad[:ndata] = data 

809 fdata = num.fft.rfft(data_pad) 

810 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

811 n = min(fdata.size, fdata2.size) 

812 fdata2[:n] = fdata[:n] 

813 data2 = num.fft.irfft(fdata2) 

814 data2 = data2[:ndata2] 

815 data2 *= float(ntrans2) / float(ntrans) 

816 self.deltat = deltat2 

817 self.set_ydata(data2) 

818 

819 def resample_simple(self, deltat): 

820 tyear = 3600*24*365. 

821 

822 if deltat == self.deltat: 

823 return 

824 

825 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

826 logger.warning( 

827 'resample_simple: less than one sample would have to be ' 

828 'inserted/deleted per year. Doing nothing.') 

829 return 

830 

831 ninterval = int(round(deltat / (self.deltat - deltat))) 

832 if abs(ninterval) < 20: 

833 logger.error( 

834 'resample_simple: sample insertion/deletion interval less ' 

835 'than 20. results would be erroneous.') 

836 raise ResamplingFailed() 

837 

838 delete = False 

839 if ninterval < 0: 

840 ninterval = - ninterval 

841 delete = True 

842 

843 tyearbegin = util.year_start(self.tmin) 

844 

845 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

846 

847 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

848 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

849 if nindices > 0: 

850 indices = ibegin + num.arange(nindices) * ninterval 

851 data_split = num.split(self.ydata, indices) 

852 data = [] 

853 for ln, h in zip(data_split[:-1], data_split[1:]): 

854 if delete: 

855 ln = ln[:-1] 

856 

857 data.append(ln) 

858 if not delete: 

859 if ln.size == 0: 

860 v = h[0] 

861 else: 

862 v = 0.5*(ln[-1] + h[0]) 

863 data.append(num.array([v], dtype=ln.dtype)) 

864 

865 data.append(data_split[-1]) 

866 

867 ydata_new = num.concatenate(data) 

868 

869 self.tmin = tyearbegin + nmin * deltat 

870 self.deltat = deltat 

871 self.set_ydata(ydata_new) 

872 

873 def stretch(self, tmin_new, tmax_new): 

874 ''' 

875 Stretch signal while preserving sample rate using sinc interpolation. 

876 

877 :param tmin_new: new time of first sample 

878 :param tmax_new: new time of last sample 

879 

880 This method can be used to correct for a small linear time drift or to 

881 introduce sub-sample time shifts. The amount of stretching is limited 

882 to 10% by the implementation and is expected to be much smaller than 

883 that by the approximations used. 

884 ''' 

885 

886 from pyrocko import signal_ext 

887 

888 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

889 t_control = num.array([tmin_new, tmax_new], dtype=float) 

890 

891 r = (tmax_new - tmin_new) / self.deltat + 1.0 

892 n_new = int(round(r)) 

893 if abs(n_new - r) > 0.001: 

894 n_new = int(math.floor(r)) 

895 

896 assert n_new >= 2 

897 

898 tmax_new = tmin_new + (n_new-1) * self.deltat 

899 

900 ydata_new = num.empty(n_new, dtype=float) 

901 signal_ext.antidrift(i_control, t_control, 

902 self.ydata.astype(float), 

903 tmin_new, self.deltat, ydata_new) 

904 

905 self.tmin = tmin_new 

906 self.set_ydata(ydata_new) 

907 self._update_ids() 

908 

909 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

910 raise_exception=False): 

911 

912 ''' 

913 Check if a given frequency is above the Nyquist frequency of the trace. 

914 

915 :param intro: string used to introduce the warning/error message 

916 :param warn: whether to emit a warning 

917 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

918 exception. 

919 ''' 

920 

921 if frequency >= 0.5/self.deltat: 

922 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

923 'frequency (%g Hz). (Trace %s)' \ 

924 % (intro, frequency, 0.5/self.deltat, self.name()) 

925 if warn: 

926 logger.warning(message) 

927 if raise_exception: 

928 raise AboveNyquist(message) 

929 

930 def lowpass(self, order, corner, nyquist_warn=True, 

931 nyquist_exception=False, demean=True): 

932 

933 ''' 

934 Apply Butterworth lowpass to the trace. 

935 

936 :param order: order of the filter 

937 :param corner: corner frequency of the filter 

938 

939 Mean is removed before filtering. 

940 ''' 

941 

942 self.nyquist_check( 

943 corner, 'Corner frequency of lowpass', nyquist_warn, 

944 nyquist_exception) 

945 

946 (b, a) = _get_cached_filter_coefs( 

947 order, [corner*2.0*self.deltat], btype='low') 

948 

949 if len(a) != order+1 or len(b) != order+1: 

950 logger.warning( 

951 'Erroneous filter coefficients returned by ' 

952 'scipy.signal.butter(). You may need to downsample the ' 

953 'signal before filtering.') 

954 

955 data = self.ydata.astype(num.float64) 

956 if demean: 

957 data -= num.mean(data) 

958 self.drop_growbuffer() 

959 self.ydata = signal.lfilter(b, a, data) 

960 

961 def highpass(self, order, corner, nyquist_warn=True, 

962 nyquist_exception=False, demean=True): 

963 

964 ''' 

965 Apply butterworth highpass to the trace. 

966 

967 :param order: order of the filter 

968 :param corner: corner frequency of the filter 

969 

970 Mean is removed before filtering. 

971 ''' 

972 

973 self.nyquist_check( 

974 corner, 'Corner frequency of highpass', nyquist_warn, 

975 nyquist_exception) 

976 

977 (b, a) = _get_cached_filter_coefs( 

978 order, [corner*2.0*self.deltat], btype='high') 

979 

980 data = self.ydata.astype(num.float64) 

981 if len(a) != order+1 or len(b) != order+1: 

982 logger.warning( 

983 'Erroneous filter coefficients returned by ' 

984 'scipy.signal.butter(). You may need to downsample the ' 

985 'signal before filtering.') 

986 if demean: 

987 data -= num.mean(data) 

988 self.drop_growbuffer() 

989 self.ydata = signal.lfilter(b, a, data) 

990 

991 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

992 ''' 

993 Apply butterworth bandpass to the trace. 

994 

995 :param order: order of the filter 

996 :param corner_hp: lower corner frequency of the filter 

997 :param corner_lp: upper corner frequency of the filter 

998 

999 Mean is removed before filtering. 

1000 ''' 

1001 

1002 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

1003 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

1004 (b, a) = _get_cached_filter_coefs( 

1005 order, 

1006 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1007 btype='band') 

1008 data = self.ydata.astype(num.float64) 

1009 if demean: 

1010 data -= num.mean(data) 

1011 self.drop_growbuffer() 

1012 self.ydata = signal.lfilter(b, a, data) 

1013 

1014 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1015 ''' 

1016 Apply bandstop (attenuates frequencies in band) to the trace. 

1017 

1018 :param order: order of the filter 

1019 :param corner_hp: lower corner frequency of the filter 

1020 :param corner_lp: upper corner frequency of the filter 

1021 

1022 Mean is removed before filtering. 

1023 ''' 

1024 

1025 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1026 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1027 (b, a) = _get_cached_filter_coefs( 

1028 order, 

1029 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1030 btype='bandstop') 

1031 data = self.ydata.astype(num.float64) 

1032 if demean: 

1033 data -= num.mean(data) 

1034 self.drop_growbuffer() 

1035 self.ydata = signal.lfilter(b, a, data) 

1036 

1037 def envelope(self, inplace=True): 

1038 ''' 

1039 Calculate the envelope of the trace. 

1040 

1041 :param inplace: calculate envelope in place 

1042 

1043 The calculation follows: 

1044 

1045 .. math:: 

1046 

1047 Y' = \\sqrt{Y^2+H(Y)^2} 

1048 

1049 where H is the Hilbert-Transform of the signal Y. 

1050 ''' 

1051 

1052 y = self.ydata.astype(float) 

1053 env = num.abs(hilbert(y)) 

1054 if inplace: 

1055 self.drop_growbuffer() 

1056 self.ydata = env 

1057 else: 

1058 tr = self.copy(data=False) 

1059 tr.ydata = env 

1060 return tr 

1061 

1062 def taper(self, taperer, inplace=True, chop=False): 

1063 ''' 

1064 Apply a :py:class:`Taper` to the trace. 

1065 

1066 :param taperer: instance of :py:class:`Taper` subclass 

1067 :param inplace: apply taper inplace 

1068 :param chop: if ``True``: exclude tapered parts from the resulting 

1069 trace 

1070 ''' 

1071 

1072 if not inplace: 

1073 tr = self.copy() 

1074 else: 

1075 tr = self 

1076 

1077 if chop: 

1078 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1079 tr.shift(i*tr.deltat) 

1080 tr.set_ydata(tr.ydata[i:i+n]) 

1081 

1082 taperer(tr.ydata, tr.tmin, tr.deltat) 

1083 

1084 if not inplace: 

1085 return tr 

1086 

1087 def whiten(self, order=6): 

1088 ''' 

1089 Whiten signal in time domain using autoregression and recursive filter. 

1090 

1091 :param order: order of the autoregression process 

1092 ''' 

1093 

1094 b, a = self.whitening_coefficients(order) 

1095 self.drop_growbuffer() 

1096 self.ydata = signal.lfilter(b, a, self.ydata) 

1097 

1098 def whitening_coefficients(self, order=6): 

1099 ar = yulewalker(self.ydata, order) 

1100 b, a = [1.] + ar.tolist(), [1.] 

1101 return b, a 

1102 

1103 def ampspec_whiten( 

1104 self, 

1105 width, 

1106 td_taper='auto', 

1107 fd_taper='auto', 

1108 pad_to_pow2=True, 

1109 demean=True): 

1110 

1111 ''' 

1112 Whiten signal via frequency domain using moving average on amplitude 

1113 spectra. 

1114 

1115 :param width: width of smoothing kernel [Hz] 

1116 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1117 ``None`` or ``'auto'``. 

1118 :param fd_taper: frequency domain taper, object of type 

1119 :py:class:`Taper` or ``None`` or ``'auto'``. 

1120 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1121 of 2^n 

1122 :param demean: whether to demean the signal before tapering 

1123 

1124 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1125 the spectrum is calculated and inversely weighted with a smoothed 

1126 version of its amplitude spectrum. A moving average is used for the 

1127 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1128 Finally, the smoothed and tapered spectrum is back-transformed into the 

1129 time domain. 

1130 

1131 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1132 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1133 ''' 

1134 

1135 ndata = self.data_len() 

1136 

1137 if pad_to_pow2: 

1138 ntrans = nextpow2(ndata) 

1139 else: 

1140 ntrans = ndata 

1141 

1142 df = 1./(ntrans*self.deltat) 

1143 nw = int(round(width/df)) 

1144 if ndata//2+1 <= nw: 

1145 raise TraceTooShort( 

1146 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1147 

1148 if td_taper == 'auto': 

1149 td_taper = CosFader(1./width) 

1150 

1151 if fd_taper == 'auto': 

1152 fd_taper = CosFader(width) 

1153 

1154 if td_taper: 

1155 self.taper(td_taper) 

1156 

1157 ydata = self.get_ydata().astype(float) 

1158 if demean: 

1159 ydata -= ydata.mean() 

1160 

1161 spec = num.fft.rfft(ydata, ntrans) 

1162 

1163 amp = num.abs(spec) 

1164 nspec = amp.size 

1165 csamp = num.cumsum(amp) 

1166 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1167 n1, n2 = nw//2, nw//2 + nspec - nw 

1168 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1169 amp_smoothed[:n1] = amp_smoothed[n1] 

1170 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1171 

1172 denom = amp_smoothed * amp 

1173 numer = amp 

1174 eps = num.mean(denom) * 1e-9 

1175 if eps == 0.0: 

1176 eps = 1e-9 

1177 

1178 numer += eps 

1179 denom += eps 

1180 spec *= numer/denom 

1181 

1182 if fd_taper: 

1183 fd_taper(spec, 0., df) 

1184 

1185 ydata = num.fft.irfft(spec) 

1186 self.set_ydata(ydata[:ndata]) 

1187 

1188 def _get_cached_freqs(self, nf, deltaf): 

1189 ck = (nf, deltaf) 

1190 if ck not in Trace.cached_frequencies: 

1191 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1192 nf, dtype=float) 

1193 

1194 return Trace.cached_frequencies[ck] 

1195 

1196 def bandpass_fft(self, corner_hp, corner_lp): 

1197 ''' 

1198 Apply boxcar bandbpass to trace (in spectral domain). 

1199 ''' 

1200 

1201 n = len(self.ydata) 

1202 n2 = nextpow2(n) 

1203 data = num.zeros(n2, dtype=num.float64) 

1204 data[:n] = self.ydata 

1205 fdata = num.fft.rfft(data) 

1206 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1207 fdata[0] = 0.0 

1208 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1209 data = num.fft.irfft(fdata) 

1210 self.drop_growbuffer() 

1211 self.ydata = data[:n] 

1212 

1213 def shift(self, tshift): 

1214 ''' 

1215 Time shift the trace. 

1216 ''' 

1217 

1218 self.tmin += tshift 

1219 self.tmax += tshift 

1220 self._update_ids() 

1221 

1222 def snap(self, inplace=True, interpolate=False): 

1223 ''' 

1224 Shift trace samples to nearest even multiples of the sampling rate. 

1225 

1226 :param inplace: (boolean) snap traces inplace 

1227 

1228 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1229 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1230 :py:func:`snap` returns the unsnapped instance of the original trace. 

1231 ''' 

1232 

1233 tmin = round(self.tmin/self.deltat)*self.deltat 

1234 tmax = tmin + (self.ydata.size-1)*self.deltat 

1235 

1236 if inplace: 

1237 xself = self 

1238 else: 

1239 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1240 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1241 return self 

1242 

1243 xself = self.copy() 

1244 

1245 if interpolate: 

1246 from pyrocko import signal_ext 

1247 n = xself.data_len() 

1248 ydata_new = num.empty(n, dtype=float) 

1249 i_control = num.array([0, n-1], dtype=num.int64) 

1250 tref = tmin 

1251 t_control = num.array( 

1252 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1253 dtype=float) 

1254 

1255 signal_ext.antidrift(i_control, t_control, 

1256 xself.ydata.astype(float), 

1257 float(tmin-tref), xself.deltat, ydata_new) 

1258 

1259 xself.ydata = ydata_new 

1260 

1261 xself.tmin = tmin 

1262 xself.tmax = tmax 

1263 xself._update_ids() 

1264 

1265 return xself 

1266 

1267 def fix_deltat_rounding_errors(self): 

1268 ''' 

1269 Try to undo sampling rate rounding errors. 

1270 

1271 See :py:func:`fix_deltat_rounding_errors`. 

1272 ''' 

1273 

1274 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1275 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1276 

1277 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1278 ''' 

1279 Run special STA/LTA filter where the short time window is centered on 

1280 the long time window. 

1281 

1282 :param tshort: length of short time window in [s] 

1283 :param tlong: length of long time window in [s] 

1284 :param quad: whether to square the data prior to applying the STA/LTA 

1285 filter 

1286 :param scalingmethod: integer key to select how output values are 

1287 scaled / normalized (``1``, ``2``, or ``3``) 

1288 

1289 ============= ====================================== =========== 

1290 Scalingmethod Implementation Range 

1291 ============= ====================================== =========== 

1292 ``1`` As/Al* Ts/Tl [0,1] 

1293 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1294 ``3`` Like ``2`` but clipping range at zero [0,1] 

1295 ============= ====================================== =========== 

1296 

1297 ''' 

1298 

1299 nshort = int(round(tshort/self.deltat)) 

1300 nlong = int(round(tlong/self.deltat)) 

1301 

1302 assert nshort < nlong 

1303 if nlong > len(self.ydata): 

1304 raise TraceTooShort( 

1305 'Samples in trace: %s, samples needed: %s' 

1306 % (len(self.ydata), nlong)) 

1307 

1308 if quad: 

1309 sqrdata = self.ydata**2 

1310 else: 

1311 sqrdata = self.ydata 

1312 

1313 mavg_short = moving_avg(sqrdata, nshort) 

1314 mavg_long = moving_avg(sqrdata, nlong) 

1315 

1316 self.drop_growbuffer() 

1317 

1318 if scalingmethod not in (1, 2, 3): 

1319 raise Exception('Invalid argument to scalingrange argument.') 

1320 

1321 if scalingmethod == 1: 

1322 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1323 elif scalingmethod in (2, 3): 

1324 self.ydata = (mavg_short/mavg_long - 1.) \ 

1325 / ((float(nlong)/float(nshort)) - 1) 

1326 

1327 if scalingmethod == 3: 

1328 self.ydata = num.maximum(self.ydata, 0.) 

1329 

1330 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1331 ''' 

1332 Run special STA/LTA filter where the short time window is overlapping 

1333 with the last part of the long time window. 

1334 

1335 :param tshort: length of short time window in [s] 

1336 :param tlong: length of long time window in [s] 

1337 :param quad: whether to square the data prior to applying the STA/LTA 

1338 filter 

1339 :param scalingmethod: integer key to select how output values are 

1340 scaled / normalized (``1``, ``2``, or ``3``) 

1341 

1342 ============= ====================================== =========== 

1343 Scalingmethod Implementation Range 

1344 ============= ====================================== =========== 

1345 ``1`` As/Al* Ts/Tl [0,1] 

1346 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1347 ``3`` Like ``2`` but clipping range at zero [0,1] 

1348 ============= ====================================== =========== 

1349 

1350 With ``scalingmethod=1``, the values produced by this variant of the 

1351 STA/LTA are equivalent to 

1352 

1353 .. math:: 

1354 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1355 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1356 

1357 where :math:`f_j` are the input samples, :math:`s` are the number of 

1358 samples in the short time window and :math:`l` are the number of 

1359 samples in the long time window. 

1360 ''' 

1361 

1362 n = self.data_len() 

1363 tmin = self.tmin 

1364 

1365 nshort = max(1, int(round(tshort/self.deltat))) 

1366 nlong = max(1, int(round(tlong/self.deltat))) 

1367 

1368 assert nshort < nlong 

1369 

1370 if nlong > len(self.ydata): 

1371 raise TraceTooShort( 

1372 'Samples in trace: %s, samples needed: %s' 

1373 % (len(self.ydata), nlong)) 

1374 

1375 if quad: 

1376 sqrdata = self.ydata**2 

1377 else: 

1378 sqrdata = self.ydata 

1379 

1380 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1381 if nlong % 2 != 0 and nshort % 2 == 0: 

1382 nshift += 1 

1383 

1384 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1385 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1386 

1387 self.drop_growbuffer() 

1388 

1389 if scalingmethod not in (1, 2, 3): 

1390 raise Exception('Invalid argument to scalingrange argument.') 

1391 

1392 if scalingmethod == 1: 

1393 ydata = mavg_short/mavg_long * nshort/nlong 

1394 elif scalingmethod in (2, 3): 

1395 ydata = (mavg_short/mavg_long - 1.) \ 

1396 / ((float(nlong)/float(nshort)) - 1) 

1397 

1398 if scalingmethod == 3: 

1399 ydata = num.maximum(self.ydata, 0.) 

1400 

1401 self.set_ydata(ydata) 

1402 

1403 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1404 

1405 self.chop( 

1406 tmin + (nlong - nshort) * self.deltat, 

1407 tmin + (n - nshort) * self.deltat) 

1408 

1409 def peaks(self, threshold, tsearch, 

1410 deadtime=False, 

1411 nblock_duration_detection=100): 

1412 

1413 ''' 

1414 Detect peaks above a given threshold (method 1). 

1415 

1416 From every instant, where the signal rises above ``threshold``, a time 

1417 length of ``tsearch`` seconds is searched for a maximum. A list with 

1418 tuples (time, value) for each detected peak is returned. The 

1419 ``deadtime`` argument turns on a special deadtime duration detection 

1420 algorithm useful in combination with recursive STA/LTA filters. 

1421 ''' 

1422 

1423 y = self.ydata 

1424 above = num.where(y > threshold, 1, 0) 

1425 deriv = num.zeros(y.size, dtype=num.int8) 

1426 deriv[1:] = above[1:]-above[:-1] 

1427 itrig_positions = num.nonzero(deriv > 0)[0] 

1428 tpeaks = [] 

1429 apeaks = [] 

1430 tzeros = [] 

1431 tzero = self.tmin 

1432 

1433 for itrig_pos in itrig_positions: 

1434 ibeg = itrig_pos 

1435 iend = min( 

1436 len(self.ydata), 

1437 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1438 ipeak = num.argmax(y[ibeg:iend]) 

1439 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1440 apeak = y[ibeg+ipeak] 

1441 

1442 if tpeak < tzero: 

1443 continue 

1444 

1445 if deadtime: 

1446 ibeg = itrig_pos 

1447 iblock = 0 

1448 nblock = nblock_duration_detection 

1449 totalsum = 0. 

1450 while True: 

1451 if ibeg+iblock*nblock >= len(y): 

1452 tzero = self.tmin + (len(y)-1) * self.deltat 

1453 break 

1454 

1455 logy = num.log( 

1456 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1457 logy[0] += totalsum 

1458 ysum = num.cumsum(logy) 

1459 totalsum = ysum[-1] 

1460 below = num.where(ysum <= 0., 1, 0) 

1461 deriv = num.zeros(ysum.size, dtype=num.int8) 

1462 deriv[1:] = below[1:]-below[:-1] 

1463 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1464 if len(izero_positions) > 0: 

1465 tzero = self.tmin + self.deltat * ( 

1466 ibeg + izero_positions[0]) 

1467 break 

1468 iblock += 1 

1469 else: 

1470 tzero = ibeg*self.deltat + self.tmin + tsearch 

1471 

1472 tpeaks.append(tpeak) 

1473 apeaks.append(apeak) 

1474 tzeros.append(tzero) 

1475 

1476 if deadtime: 

1477 return tpeaks, apeaks, tzeros 

1478 else: 

1479 return tpeaks, apeaks 

1480 

1481 def peaks2(self, threshold, tsearch): 

1482 

1483 ''' 

1484 Detect peaks above a given threshold (method 2). 

1485 

1486 This variant of peak detection is a bit more robust (and slower) than 

1487 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1488 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1489 iteratively the one with the maximum amplitude ``a[j]`` and time 

1490 ``t[j]`` is choosen and potential peaks within 

1491 ``t[j] - tsearch, t[j] + tsearch`` 

1492 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1493 no more potential peaks are left. 

1494 ''' 

1495 

1496 a = num.copy(self.ydata) 

1497 

1498 amin = num.min(a) 

1499 

1500 a[0] = amin 

1501 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1502 a[-1] = amin 

1503 

1504 data = [] 

1505 while True: 

1506 imax = num.argmax(a) 

1507 amax = a[imax] 

1508 

1509 if amax < threshold or amax == amin: 

1510 break 

1511 

1512 data.append((self.tmin + imax * self.deltat, amax)) 

1513 

1514 ntsearch = int(round(tsearch / self.deltat)) 

1515 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1516 

1517 if data: 

1518 data.sort() 

1519 tpeaks, apeaks = list(zip(*data)) 

1520 else: 

1521 tpeaks, apeaks = [], [] 

1522 

1523 return tpeaks, apeaks 

1524 

1525 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1526 ''' 

1527 Extend trace to given span. 

1528 

1529 :param tmin: begin time of new span 

1530 :param tmax: end time of new span 

1531 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1532 ``'median'`` 

1533 ''' 

1534 

1535 nold = self.ydata.size 

1536 

1537 if tmin is not None: 

1538 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1539 else: 

1540 nl = 0 

1541 

1542 if tmax is not None: 

1543 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1544 else: 

1545 nh = nold - 1 

1546 

1547 n = nh - nl + 1 

1548 data = num.zeros(n, dtype=self.ydata.dtype) 

1549 data[-nl:-nl + nold] = self.ydata 

1550 if self.ydata.size >= 1: 

1551 if fillmethod == 'repeat': 

1552 data[:-nl] = self.ydata[0] 

1553 data[-nl + nold:] = self.ydata[-1] 

1554 elif fillmethod == 'median': 

1555 v = num.median(self.ydata) 

1556 data[:-nl] = v 

1557 data[-nl + nold:] = v 

1558 elif fillmethod == 'mean': 

1559 v = num.mean(self.ydata) 

1560 data[:-nl] = v 

1561 data[-nl + nold:] = v 

1562 

1563 self.drop_growbuffer() 

1564 self.ydata = data 

1565 

1566 self.tmin += nl * self.deltat 

1567 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1568 

1569 self._update_ids() 

1570 

1571 def transfer(self, 

1572 tfade=0., 

1573 freqlimits=None, 

1574 transfer_function=None, 

1575 cut_off_fading=True, 

1576 demean=True, 

1577 invert=False): 

1578 

1579 ''' 

1580 Return new trace with transfer function applied (convolution). 

1581 

1582 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1583 at both ends of trace. 

1584 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1585 :param transfer_function: FrequencyResponse object; must provide a 

1586 method 'evaluate(freqs)', which returns the transfer function 

1587 coefficients at the frequencies 'freqs'. 

1588 :param cut_off_fading: whether to cut off rise/fall interval in output 

1589 trace. 

1590 :param demean: remove mean before applying transfer function 

1591 :param invert: set to True to do a deconvolution 

1592 ''' 

1593 

1594 if transfer_function is None: 

1595 transfer_function = FrequencyResponse() 

1596 

1597 if self.tmax - self.tmin <= tfade*2.: 

1598 raise TraceTooShort( 

1599 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1600 'trace length = %g, fading length = %g' 

1601 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1602 

1603 if freqlimits is None and ( 

1604 transfer_function is None or transfer_function.is_scalar()): 

1605 

1606 # special case for flat responses 

1607 

1608 output = self.copy() 

1609 data = self.ydata 

1610 ndata = data.size 

1611 

1612 if transfer_function is not None: 

1613 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1614 

1615 if invert: 

1616 c = 1.0/c 

1617 

1618 data *= c 

1619 

1620 if tfade != 0.0: 

1621 data *= costaper( 

1622 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1623 ndata, self.deltat) 

1624 

1625 output.ydata = data 

1626 

1627 else: 

1628 ndata = self.ydata.size 

1629 ntrans = nextpow2(ndata*1.2) 

1630 coefs = self._get_tapered_coefs( 

1631 ntrans, freqlimits, transfer_function, invert=invert) 

1632 

1633 data = self.ydata 

1634 

1635 data_pad = num.zeros(ntrans, dtype=float) 

1636 data_pad[:ndata] = data 

1637 if demean: 

1638 data_pad[:ndata] -= data.mean() 

1639 

1640 if tfade != 0.0: 

1641 data_pad[:ndata] *= costaper( 

1642 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1643 ndata, self.deltat) 

1644 

1645 fdata = num.fft.rfft(data_pad) 

1646 fdata *= coefs 

1647 ddata = num.fft.irfft(fdata) 

1648 output = self.copy() 

1649 output.ydata = ddata[:ndata] 

1650 

1651 if cut_off_fading and tfade != 0.0: 

1652 try: 

1653 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1654 except NoData: 

1655 raise TraceTooShort( 

1656 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1657 'trace length = %g, fading length = %g' 

1658 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1659 else: 

1660 output.ydata = output.ydata.copy() 

1661 

1662 return output 

1663 

1664 def differentiate(self, n=1, order=4, inplace=True): 

1665 ''' 

1666 Approximate first or second derivative of the trace. 

1667 

1668 :param n: 1 for first derivative, 2 for second 

1669 :param order: order of the approximation 2 and 4 are supported 

1670 :param inplace: if ``True`` the trace is differentiated in place, 

1671 otherwise a new trace object with the derivative is returned. 

1672 

1673 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1674 

1675 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1676 ''' 

1677 

1678 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1679 

1680 if inplace: 

1681 self.ydata = ddata 

1682 else: 

1683 output = self.copy(data=False) 

1684 output.set_ydata(ddata) 

1685 return output 

1686 

1687 def drop_chain_cache(self): 

1688 if self._pchain: 

1689 self._pchain.clear() 

1690 

1691 def init_chain(self): 

1692 self._pchain = pchain.Chain( 

1693 do_downsample, 

1694 do_extend, 

1695 do_pre_taper, 

1696 do_fft, 

1697 do_filter, 

1698 do_ifft) 

1699 

1700 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1701 if setup.domain == 'frequency_domain': 

1702 _, _, data = self._pchain( 

1703 (self, deltat), 

1704 (tmin, tmax), 

1705 (setup.taper,), 

1706 (setup.filter,), 

1707 (setup.filter,), 

1708 nocache=nocache) 

1709 

1710 return num.abs(data), num.abs(data) 

1711 

1712 else: 

1713 processed = self._pchain( 

1714 (self, deltat), 

1715 (tmin, tmax), 

1716 (setup.taper,), 

1717 (setup.filter,), 

1718 (setup.filter,), 

1719 (), 

1720 nocache=nocache) 

1721 

1722 if setup.domain == 'time_domain': 

1723 data = processed.get_ydata() 

1724 

1725 elif setup.domain == 'envelope': 

1726 processed = processed.envelope(inplace=False) 

1727 

1728 elif setup.domain == 'absolute': 

1729 processed.set_ydata(num.abs(processed.get_ydata())) 

1730 

1731 return processed.get_ydata(), processed 

1732 

1733 def misfit(self, candidate, setup, nocache=False, debug=False): 

1734 ''' 

1735 Calculate misfit and normalization factor against candidate trace. 

1736 

1737 :param candidate: :py:class:`Trace` object 

1738 :param setup: :py:class:`MisfitSetup` object 

1739 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1740 normalization divisor 

1741 

1742 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1743 with the higher sampling rate will be downsampled. 

1744 ''' 

1745 

1746 a = self 

1747 b = candidate 

1748 

1749 for tr in (a, b): 

1750 if not tr._pchain: 

1751 tr.init_chain() 

1752 

1753 deltat = max(a.deltat, b.deltat) 

1754 tmin = min(a.tmin, b.tmin) - deltat 

1755 tmax = max(a.tmax, b.tmax) + deltat 

1756 

1757 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1758 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1759 

1760 if setup.domain != 'cc_max_norm': 

1761 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1762 else: 

1763 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1764 ccmax = ctr.max()[1] 

1765 m = 0.5 - 0.5 * ccmax 

1766 n = 0.5 

1767 

1768 if debug: 

1769 return m, n, aproc, bproc 

1770 else: 

1771 return m, n 

1772 

1773 def spectrum(self, pad_to_pow2=False, tfade=None): 

1774 ''' 

1775 Get FFT spectrum of trace. 

1776 

1777 :param pad_to_pow2: whether to zero-pad the data to next larger 

1778 power-of-two length 

1779 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1780 shaped tapers to both 

1781 

1782 :returns: a tuple with (frequencies, values) 

1783 ''' 

1784 

1785 ndata = self.ydata.size 

1786 

1787 if pad_to_pow2: 

1788 ntrans = nextpow2(ndata) 

1789 else: 

1790 ntrans = ndata 

1791 

1792 if tfade is None: 

1793 ydata = self.ydata 

1794 else: 

1795 ydata = self.ydata * costaper( 

1796 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1797 ndata, self.deltat) 

1798 

1799 fydata = num.fft.rfft(ydata, ntrans) 

1800 df = 1./(ntrans*self.deltat) 

1801 fxdata = num.arange(len(fydata))*df 

1802 return fxdata, fydata 

1803 

1804 def multi_filter(self, filter_freqs, bandwidth): 

1805 

1806 class Gauss(FrequencyResponse): 

1807 f0 = Float.T() 

1808 a = Float.T(default=1.0) 

1809 

1810 def __init__(self, f0, a=1.0, **kwargs): 

1811 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1812 

1813 def evaluate(self, freqs): 

1814 omega0 = 2.*math.pi*self.f0 

1815 omega = 2.*math.pi*freqs 

1816 return num.exp(-((omega-omega0) 

1817 / (self.a*omega0))**2) 

1818 

1819 freqs, coefs = self.spectrum() 

1820 n = self.data_len() 

1821 nfilt = len(filter_freqs) 

1822 signal_tf = num.zeros((nfilt, n)) 

1823 centroid_freqs = num.zeros(nfilt) 

1824 for ifilt, f0 in enumerate(filter_freqs): 

1825 taper = Gauss(f0, a=bandwidth) 

1826 weights = taper.evaluate(freqs) 

1827 nhalf = freqs.size 

1828 analytic_spec = num.zeros(n, dtype=complex) 

1829 analytic_spec[:nhalf] = coefs*weights 

1830 

1831 enorm = num.abs(analytic_spec[:nhalf])**2 

1832 enorm /= num.sum(enorm) 

1833 

1834 if n % 2 == 0: 

1835 analytic_spec[1:nhalf-1] *= 2. 

1836 else: 

1837 analytic_spec[1:nhalf] *= 2. 

1838 

1839 analytic = num.fft.ifft(analytic_spec) 

1840 signal_tf[ifilt, :] = num.abs(analytic) 

1841 

1842 enorm = num.abs(analytic_spec[:nhalf])**2 

1843 enorm /= num.sum(enorm) 

1844 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1845 

1846 return centroid_freqs, signal_tf 

1847 

1848 def _get_tapered_coefs( 

1849 self, ntrans, freqlimits, transfer_function, invert=False): 

1850 

1851 deltaf = 1./(self.deltat*ntrans) 

1852 nfreqs = ntrans//2 + 1 

1853 transfer = num.ones(nfreqs, dtype=complex) 

1854 hi = snapper(nfreqs, deltaf) 

1855 if freqlimits is not None: 

1856 a, b, c, d = freqlimits 

1857 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1858 + hi(a)*deltaf 

1859 

1860 if invert: 

1861 coeffs = transfer_function.evaluate(freqs) 

1862 if num.any(coeffs == 0.0): 

1863 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1864 

1865 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1866 else: 

1867 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1868 

1869 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1870 else: 

1871 if invert: 

1872 raise Exception( 

1873 'transfer: `freqlimits` must be given when `invert` is ' 

1874 'set to `True`') 

1875 

1876 freqs = num.arange(nfreqs) * deltaf 

1877 tapered_transfer = transfer_function.evaluate(freqs) 

1878 

1879 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1880 return tapered_transfer 

1881 

1882 def fill_template(self, template, **additional): 

1883 ''' 

1884 Fill string template with trace metadata. 

1885 

1886 Uses normal python '%(placeholder)s' string templates. The following 

1887 placeholders are considered: ``network``, ``station``, ``location``, 

1888 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1889 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1890 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1891 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1892 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1893 microseconds, those with ``'_year'`` contain only the year. 

1894 ''' 

1895 

1896 template = template.replace('%n', '%(network)s')\ 

1897 .replace('%s', '%(station)s')\ 

1898 .replace('%l', '%(location)s')\ 

1899 .replace('%c', '%(channel)s')\ 

1900 .replace('%b', '%(tmin)s')\ 

1901 .replace('%e', '%(tmax)s')\ 

1902 .replace('%j', '%(julianday)s') 

1903 

1904 params = dict( 

1905 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1906 params['tmin'] = util.time_to_str( 

1907 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1908 params['tmax'] = util.time_to_str( 

1909 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1910 params['tmin_ms'] = util.time_to_str( 

1911 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1912 params['tmax_ms'] = util.time_to_str( 

1913 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1914 params['tmin_us'] = util.time_to_str( 

1915 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1916 params['tmax_us'] = util.time_to_str( 

1917 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1918 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1919 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1920 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1921 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1922 params['julianday'] = util.julian_day_of_year(self.tmin) 

1923 params.update(additional) 

1924 return template % params 

1925 

1926 def plot(self): 

1927 ''' 

1928 Show trace with matplotlib. 

1929 

1930 See also: :py:meth:`Trace.snuffle`. 

1931 ''' 

1932 

1933 import pylab 

1934 pylab.plot(self.get_xdata(), self.get_ydata()) 

1935 name = '%s %s %s - %s' % ( 

1936 self.channel, 

1937 self.station, 

1938 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1939 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1940 

1941 pylab.title(name) 

1942 pylab.show() 

1943 

1944 def snuffle(self, **kwargs): 

1945 ''' 

1946 Show trace in a snuffler window. 

1947 

1948 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1949 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1950 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1951 :param ntracks: float, number of tracks to be shown initially (default: 

1952 12) 

1953 :param follow: time interval (in seconds) for real time follow mode or 

1954 ``None`` 

1955 :param controls: bool, whether to show the main controls (default: 

1956 ``True``) 

1957 :param opengl: bool, whether to use opengl (default: ``False``) 

1958 ''' 

1959 

1960 return snuffle([self], **kwargs) 

1961 

1962 

1963def snuffle(traces, **kwargs): 

1964 ''' 

1965 Show traces in a snuffler window. 

1966 

1967 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1968 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1969 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1970 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1971 :param follow: time interval (in seconds) for real time follow mode or 

1972 ``None`` 

1973 :param controls: bool, whether to show the main controls (default: 

1974 ``True``) 

1975 :param opengl: bool, whether to use opengl (default: ``False``) 

1976 ''' 

1977 

1978 from pyrocko import pile 

1979 from pyrocko.gui import snuffler 

1980 p = pile.Pile() 

1981 if traces: 

1982 trf = pile.MemTracesFile(None, traces) 

1983 p.add_file(trf) 

1984 return snuffler.snuffle(p, **kwargs) 

1985 

1986 

1987class InfiniteResponse(Exception): 

1988 ''' 

1989 This exception is raised by :py:class:`Trace` operations when deconvolution 

1990 of a frequency response (instrument response transfer function) would 

1991 result in a division by zero. 

1992 ''' 

1993 

1994 

1995class MisalignedTraces(Exception): 

1996 ''' 

1997 This exception is raised by some :py:class:`Trace` operations when tmin, 

1998 tmax or number of samples do not match. 

1999 ''' 

2000 

2001 pass 

2002 

2003 

2004class NoData(Exception): 

2005 ''' 

2006 This exception is raised by some :py:class:`Trace` operations when no or 

2007 not enough data is available. 

2008 ''' 

2009 

2010 pass 

2011 

2012 

2013class AboveNyquist(Exception): 

2014 ''' 

2015 This exception is raised by some :py:class:`Trace` operations when given 

2016 frequencies are above the Nyquist frequency. 

2017 ''' 

2018 

2019 pass 

2020 

2021 

2022class TraceTooShort(Exception): 

2023 ''' 

2024 This exception is raised by some :py:class:`Trace` operations when the 

2025 trace is too short. 

2026 ''' 

2027 

2028 pass 

2029 

2030 

2031class ResamplingFailed(Exception): 

2032 pass 

2033 

2034 

2035def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2036 

2037 ''' 

2038 Get data range given traces grouped by selected pattern. 

2039 

2040 :param key: a callable which takes as single argument a trace and returns a 

2041 key for the grouping of the results. If this is ``None``, the default, 

2042 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2043 used. 

2044 :param mode: ``'minmax'`` or floating point number. If this is 

2045 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2046 number, mean +- standard deviation times ``mode`` is used. 

2047 

2048 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2049 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2050 extreme values on either end. 

2051 

2052 :returns: a dict with the combined data ranges. 

2053 

2054 Examples:: 

2055 

2056 ranges = minmax(traces, lambda tr: tr.channel) 

2057 print ranges['N'] # print min & max of all traces with channel == 'N' 

2058 print ranges['E'] # print min & max of all traces with channel == 'E' 

2059 

2060 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2061 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2062 # network == 'GR' and station == 'HAM3' 

2063 

2064 ranges = minmax(traces, lambda tr: None) 

2065 print ranges[None] # prints min & max of all traces 

2066 ''' 

2067 

2068 if key is None: 

2069 key = _default_key 

2070 

2071 ranges = defaultdict(list) 

2072 for trace in traces: 

2073 if isinstance(mode, str) and mode == 'minmax': 

2074 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2075 else: 

2076 mean = trace.ydata.mean() 

2077 std = trace.ydata.std() 

2078 mi, ma = mean-std*mode, mean+std*mode 

2079 

2080 k = key(trace) 

2081 ranges[k].append((mi, ma)) 

2082 

2083 for k in ranges: 

2084 mins, maxs = num.array(ranges[k]).T 

2085 if outer_mode == 'minmax': 

2086 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2087 elif outer_mode == 'robust': 

2088 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2089 

2090 return ranges 

2091 

2092 

2093def minmaxtime(traces, key=None): 

2094 

2095 ''' 

2096 Get time range given traces grouped by selected pattern. 

2097 

2098 :param key: a callable which takes as single argument a trace and returns a 

2099 key for the grouping of the results. If this is ``None``, the default, 

2100 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2101 used. 

2102 

2103 :returns: a dict with the combined data ranges. 

2104 ''' 

2105 

2106 if key is None: 

2107 key = _default_key 

2108 

2109 ranges = {} 

2110 for trace in traces: 

2111 mi, ma = trace.tmin, trace.tmax 

2112 k = key(trace) 

2113 if k not in ranges: 

2114 ranges[k] = mi, ma 

2115 else: 

2116 tmi, tma = ranges[k] 

2117 ranges[k] = min(tmi, mi), max(tma, ma) 

2118 

2119 return ranges 

2120 

2121 

2122def degapper( 

2123 traces, 

2124 maxgap=5, 

2125 fillmethod='interpolate', 

2126 deoverlap='use_second', 

2127 maxlap=None): 

2128 

2129 ''' 

2130 Try to connect traces and remove gaps. 

2131 

2132 This method will combine adjacent traces, which match in their network, 

2133 station, location and channel attributes. Overlapping parts are handled 

2134 according to the ``deoverlap`` argument. 

2135 

2136 :param traces: input traces, must be sorted by their full_id attribute. 

2137 :param maxgap: maximum number of samples to interpolate. 

2138 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2139 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2140 second trace (default), 'use_first' to use data from first trace, 

2141 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2142 values. 

2143 :param maxlap: maximum number of samples of overlap which are removed 

2144 

2145 :returns: list of traces 

2146 ''' 

2147 

2148 in_traces = traces 

2149 out_traces = [] 

2150 if not in_traces: 

2151 return out_traces 

2152 out_traces.append(in_traces.pop(0)) 

2153 while in_traces: 

2154 

2155 a = out_traces[-1] 

2156 b = in_traces.pop(0) 

2157 

2158 avirt, bvirt = a.ydata is None, b.ydata is None 

2159 assert avirt == bvirt, \ 

2160 'traces given to degapper() must either all have data or have ' \ 

2161 'no data.' 

2162 

2163 virtual = avirt and bvirt 

2164 

2165 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2166 and a.data_len() >= 1 and b.data_len() >= 1 

2167 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2168 

2169 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2170 idist = int(round(dist)) 

2171 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2172 # logger.warning('Cannot degap traces with displaced sampling ' 

2173 # '(%s, %s, %s, %s)' % a.nslc_id) 

2174 pass 

2175 else: 

2176 if 1 < idist <= maxgap: 

2177 if not virtual: 

2178 if fillmethod == 'interpolate': 

2179 filler = a.ydata[-1] + ( 

2180 ((1.0 + num.arange(idist-1, dtype=float)) 

2181 / idist) * (b.ydata[0]-a.ydata[-1]) 

2182 ).astype(a.ydata.dtype) 

2183 elif fillmethod == 'zeros': 

2184 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2185 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2186 a.tmax = b.tmax 

2187 if a.mtime and b.mtime: 

2188 a.mtime = max(a.mtime, b.mtime) 

2189 continue 

2190 

2191 elif idist == 1: 

2192 if not virtual: 

2193 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2194 a.tmax = b.tmax 

2195 if a.mtime and b.mtime: 

2196 a.mtime = max(a.mtime, b.mtime) 

2197 continue 

2198 

2199 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2200 if b.tmax > a.tmax: 

2201 if not virtual: 

2202 na = a.ydata.size 

2203 n = -idist+1 

2204 if deoverlap == 'use_second': 

2205 a.ydata = num.concatenate( 

2206 (a.ydata[:-n], b.ydata)) 

2207 elif deoverlap in ('use_first', 'crossfade_cos'): 

2208 a.ydata = num.concatenate( 

2209 (a.ydata, b.ydata[n:])) 

2210 elif deoverlap == 'add': 

2211 a.ydata[-n:] += b.ydata[:n] 

2212 a.ydata = num.concatenate( 

2213 (a.ydata, b.ydata[n:])) 

2214 else: 

2215 assert False, 'unknown deoverlap method' 

2216 

2217 if deoverlap == 'crossfade_cos': 

2218 n = -idist+1 

2219 taper = 0.5-0.5*num.cos( 

2220 (1.+num.arange(n))/(1.+n)*num.pi) 

2221 a.ydata[na-n:na] *= 1.-taper 

2222 a.ydata[na-n:na] += b.ydata[:n] * taper 

2223 

2224 a.tmax = b.tmax 

2225 if a.mtime and b.mtime: 

2226 a.mtime = max(a.mtime, b.mtime) 

2227 continue 

2228 else: 

2229 # make short second trace vanish 

2230 continue 

2231 

2232 if b.data_len() >= 1: 

2233 out_traces.append(b) 

2234 

2235 for tr in out_traces: 

2236 tr._update_ids() 

2237 

2238 return out_traces 

2239 

2240 

2241def rotate(traces, azimuth, in_channels, out_channels): 

2242 ''' 

2243 2D rotation of traces. 

2244 

2245 :param traces: list of input traces 

2246 :param azimuth: difference of the azimuths of the component directions 

2247 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2248 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2249 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2250 :returns: list of rotated traces 

2251 ''' 

2252 

2253 phi = azimuth/180.*math.pi 

2254 cphi = math.cos(phi) 

2255 sphi = math.sin(phi) 

2256 rotated = [] 

2257 in_channels = tuple(_channels_to_names(in_channels)) 

2258 out_channels = tuple(_channels_to_names(out_channels)) 

2259 for a in traces: 

2260 for b in traces: 

2261 if ((a.channel, b.channel) == in_channels and 

2262 a.nslc_id[:3] == b.nslc_id[:3] and 

2263 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2264 tmin = max(a.tmin, b.tmin) 

2265 tmax = min(a.tmax, b.tmax) 

2266 

2267 if tmin < tmax: 

2268 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2269 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2270 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2271 logger.warning( 

2272 'Cannot rotate traces with displaced sampling ' 

2273 '(%s, %s, %s, %s)' % a.nslc_id) 

2274 continue 

2275 

2276 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2277 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2278 ac.set_ydata(acydata) 

2279 bc.set_ydata(bcydata) 

2280 

2281 ac.set_codes(channel=out_channels[0]) 

2282 bc.set_codes(channel=out_channels[1]) 

2283 rotated.append(ac) 

2284 rotated.append(bc) 

2285 

2286 return rotated 

2287 

2288 

2289def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2290 ''' 

2291 Rotate traces from NE to RT system. 

2292 

2293 :param n: 

2294 North trace. 

2295 :type n: 

2296 :py:class:`~pyrocko.trace.Trace` 

2297 

2298 :param e: 

2299 East trace. 

2300 :type e: 

2301 :py:class:`~pyrocko.trace.Trace` 

2302 

2303 :param source: 

2304 Source of the recorded signal. 

2305 :type source: 

2306 :py:class:`pyrocko.gf.seismosizer.Source` 

2307 

2308 :param receiver: 

2309 Receiver of the recorded signal. 

2310 :type receiver: 

2311 :py:class:`pyrocko.model.location.Location` 

2312 

2313 :param out_channels: 

2314 Channel codes of the output channels (radial, transversal). 

2315 Default is ('R', 'T'). 

2316 . 

2317 :type out_channels 

2318 optional, tuple[str, str] 

2319 

2320 :returns: 

2321 Rotated traces (radial, transversal). 

2322 :rtype: 

2323 tuple[ 

2324 :py:class:`~pyrocko.trace.Trace`, 

2325 :py:class:`~pyrocko.trace.Trace`] 

2326 ''' 

2327 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2328 in_channels = n.channel, e.channel 

2329 out = rotate( 

2330 [n, e], azimuth, 

2331 in_channels=in_channels, 

2332 out_channels=out_channels) 

2333 

2334 assert len(out) == 2 

2335 for tr in out: 

2336 if tr.channel == out_channels[0]: 

2337 r = tr 

2338 elif tr.channel == out_channels[1]: 

2339 t = tr 

2340 else: 

2341 assert False 

2342 

2343 return r, t 

2344 

2345 

2346def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2347 out_channels=('L', 'Q', 'T')): 

2348 ''' 

2349 Rotate traces from ZNE to LQT system. 

2350 

2351 :param traces: list of traces in arbitrary order 

2352 :param backazimuth: backazimuth in degrees clockwise from north 

2353 :param incidence: incidence angle in degrees from vertical 

2354 :param in_channels: input channel names 

2355 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2356 :returns: list of transformed traces 

2357 ''' 

2358 i = incidence/180.*num.pi 

2359 b = backazimuth/180.*num.pi 

2360 

2361 ci = num.cos(i) 

2362 cb = num.cos(b) 

2363 si = num.sin(i) 

2364 sb = num.sin(b) 

2365 

2366 rotmat = num.array( 

2367 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2368 return project(traces, rotmat, in_channels, out_channels) 

2369 

2370 

2371def _decompose(a): 

2372 ''' 

2373 Decompose matrix into independent submatrices. 

2374 ''' 

2375 

2376 def depends(iout, a): 

2377 row = a[iout, :] 

2378 return set(num.arange(row.size).compress(row != 0.0)) 

2379 

2380 def provides(iin, a): 

2381 col = a[:, iin] 

2382 return set(num.arange(col.size).compress(col != 0.0)) 

2383 

2384 a = num.asarray(a) 

2385 outs = set(range(a.shape[0])) 

2386 systems = [] 

2387 while outs: 

2388 iout = outs.pop() 

2389 

2390 gout = set() 

2391 for iin in depends(iout, a): 

2392 gout.update(provides(iin, a)) 

2393 

2394 if not gout: 

2395 continue 

2396 

2397 gin = set() 

2398 for iout2 in gout: 

2399 gin.update(depends(iout2, a)) 

2400 

2401 if not gin: 

2402 continue 

2403 

2404 for iout2 in gout: 

2405 if iout2 in outs: 

2406 outs.remove(iout2) 

2407 

2408 gin = list(gin) 

2409 gin.sort() 

2410 gout = list(gout) 

2411 gout.sort() 

2412 

2413 systems.append((gin, gout, a[gout, :][:, gin])) 

2414 

2415 return systems 

2416 

2417 

2418def _channels_to_names(channels): 

2419 from pyrocko import squirrel 

2420 names = [] 

2421 for ch in channels: 

2422 if isinstance(ch, model.Channel): 

2423 names.append(ch.name) 

2424 elif isinstance(ch, squirrel.Channel): 

2425 names.append(ch.codes.channel) 

2426 else: 

2427 names.append(ch) 

2428 

2429 return names 

2430 

2431 

2432def project(traces, matrix, in_channels, out_channels): 

2433 ''' 

2434 Affine transform of three-component traces. 

2435 

2436 Compute matrix-vector product of three-component traces, to e.g. rotate 

2437 traces into a different basis. The traces are distinguished and ordered by 

2438 their channel attribute. The tranform is applied to overlapping parts of 

2439 any appropriate combinations of the input traces. This should allow this 

2440 function to be robust with data gaps. It also tries to apply the 

2441 tranformation to subsets of the channels, if this is possible, so that, if 

2442 for example a vertical compontent is missing, horizontal components can 

2443 still be rotated. 

2444 

2445 :param traces: list of traces in arbitrary order 

2446 :param matrix: tranformation matrix 

2447 :param in_channels: input channel names 

2448 :param out_channels: output channel names 

2449 :returns: list of transformed traces 

2450 ''' 

2451 

2452 in_channels = tuple(_channels_to_names(in_channels)) 

2453 out_channels = tuple(_channels_to_names(out_channels)) 

2454 systems = _decompose(matrix) 

2455 

2456 # fallback to full matrix if some are not quadratic 

2457 for iins, iouts, submatrix in systems: 

2458 if submatrix.shape[0] != submatrix.shape[1]: 

2459 if len(in_channels) != 3 or len(out_channels) != 3: 

2460 return [] 

2461 else: 

2462 return _project3(traces, matrix, in_channels, out_channels) 

2463 

2464 projected = [] 

2465 for iins, iouts, submatrix in systems: 

2466 in_cha = tuple([in_channels[iin] for iin in iins]) 

2467 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2468 if submatrix.shape[0] == 1: 

2469 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2470 elif submatrix.shape[1] == 2: 

2471 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2472 else: 

2473 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2474 

2475 return projected 

2476 

2477 

2478def project_dependencies(matrix, in_channels, out_channels): 

2479 ''' 

2480 Figure out what dependencies project() would produce. 

2481 ''' 

2482 

2483 in_channels = tuple(_channels_to_names(in_channels)) 

2484 out_channels = tuple(_channels_to_names(out_channels)) 

2485 systems = _decompose(matrix) 

2486 

2487 subpro = [] 

2488 for iins, iouts, submatrix in systems: 

2489 if submatrix.shape[0] != submatrix.shape[1]: 

2490 subpro.append((matrix, in_channels, out_channels)) 

2491 

2492 if not subpro: 

2493 for iins, iouts, submatrix in systems: 

2494 in_cha = tuple([in_channels[iin] for iin in iins]) 

2495 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2496 subpro.append((submatrix, in_cha, out_cha)) 

2497 

2498 deps = {} 

2499 for mat, in_cha, out_cha in subpro: 

2500 for oc in out_cha: 

2501 if oc not in deps: 

2502 deps[oc] = [] 

2503 

2504 for ic in in_cha: 

2505 deps[oc].append(ic) 

2506 

2507 return deps 

2508 

2509 

2510def _project1(traces, matrix, in_channels, out_channels): 

2511 assert len(in_channels) == 1 

2512 assert len(out_channels) == 1 

2513 assert matrix.shape == (1, 1) 

2514 

2515 projected = [] 

2516 for a in traces: 

2517 if not (a.channel,) == in_channels: 

2518 continue 

2519 

2520 ac = a.copy() 

2521 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2522 ac.set_codes(channel=out_channels[0]) 

2523 projected.append(ac) 

2524 

2525 return projected 

2526 

2527 

2528def _project2(traces, matrix, in_channels, out_channels): 

2529 assert len(in_channels) == 2 

2530 assert len(out_channels) == 2 

2531 assert matrix.shape == (2, 2) 

2532 projected = [] 

2533 for a in traces: 

2534 for b in traces: 

2535 if not ((a.channel, b.channel) == in_channels and 

2536 a.nslc_id[:3] == b.nslc_id[:3] and 

2537 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2538 continue 

2539 

2540 tmin = max(a.tmin, b.tmin) 

2541 tmax = min(a.tmax, b.tmax) 

2542 

2543 if tmin > tmax: 

2544 continue 

2545 

2546 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2547 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2548 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2549 logger.warning( 

2550 'Cannot project traces with displaced sampling ' 

2551 '(%s, %s, %s, %s)' % a.nslc_id) 

2552 continue 

2553 

2554 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2555 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2556 

2557 ac.set_ydata(acydata) 

2558 bc.set_ydata(bcydata) 

2559 

2560 ac.set_codes(channel=out_channels[0]) 

2561 bc.set_codes(channel=out_channels[1]) 

2562 

2563 projected.append(ac) 

2564 projected.append(bc) 

2565 

2566 return projected 

2567 

2568 

2569def _project3(traces, matrix, in_channels, out_channels): 

2570 assert len(in_channels) == 3 

2571 assert len(out_channels) == 3 

2572 assert matrix.shape == (3, 3) 

2573 projected = [] 

2574 for a in traces: 

2575 for b in traces: 

2576 for c in traces: 

2577 if not ((a.channel, b.channel, c.channel) == in_channels 

2578 and a.nslc_id[:3] == b.nslc_id[:3] 

2579 and b.nslc_id[:3] == c.nslc_id[:3] 

2580 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2581 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2582 

2583 continue 

2584 

2585 tmin = max(a.tmin, b.tmin, c.tmin) 

2586 tmax = min(a.tmax, b.tmax, c.tmax) 

2587 

2588 if tmin >= tmax: 

2589 continue 

2590 

2591 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2592 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2593 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2594 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2595 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2596 

2597 logger.warning( 

2598 'Cannot project traces with displaced sampling ' 

2599 '(%s, %s, %s, %s)' % a.nslc_id) 

2600 continue 

2601 

2602 acydata = num.dot( 

2603 matrix[0], 

2604 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2605 bcydata = num.dot( 

2606 matrix[1], 

2607 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2608 ccydata = num.dot( 

2609 matrix[2], 

2610 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2611 

2612 ac.set_ydata(acydata) 

2613 bc.set_ydata(bcydata) 

2614 cc.set_ydata(ccydata) 

2615 

2616 ac.set_codes(channel=out_channels[0]) 

2617 bc.set_codes(channel=out_channels[1]) 

2618 cc.set_codes(channel=out_channels[2]) 

2619 

2620 projected.append(ac) 

2621 projected.append(bc) 

2622 projected.append(cc) 

2623 

2624 return projected 

2625 

2626 

2627def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2628 ''' 

2629 Cross correlation of two traces. 

2630 

2631 :param a,b: input traces 

2632 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2633 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2634 :param use_fft: bool, whether to do cross correlation in spectral domain 

2635 

2636 :returns: trace containing cross correlation coefficients 

2637 

2638 This function computes the cross correlation between two traces. It 

2639 evaluates the discrete equivalent of 

2640 

2641 .. math:: 

2642 

2643 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2644 

2645 where the star denotes complex conjugate. Note, that the arguments here are 

2646 swapped when compared with the :py:func:`numpy.correlate` function, 

2647 which is internally called. This function should be safe even with older 

2648 versions of NumPy, where the correlate function has some problems. 

2649 

2650 A trace containing the cross correlation coefficients is returned. The time 

2651 information of the output trace is set so that the returned cross 

2652 correlation can be viewed directly as a function of time lag. 

2653 

2654 Example:: 

2655 

2656 # align two traces a and b containing a time shifted similar signal: 

2657 c = pyrocko.trace.correlate(a,b) 

2658 t, coef = c.max() # get time and value of maximum 

2659 b.shift(-t) # align b with a 

2660 

2661 ''' 

2662 

2663 assert_same_sampling_rate(a, b) 

2664 

2665 ya, yb = a.ydata, b.ydata 

2666 

2667 # need reversed order here: 

2668 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2669 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2670 

2671 if normalization == 'normal': 

2672 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2673 yc = yc/normfac 

2674 

2675 elif normalization == 'gliding': 

2676 if mode != 'valid': 

2677 assert False, 'gliding normalization currently only available ' \ 

2678 'with "valid" mode.' 

2679 

2680 if ya.size < yb.size: 

2681 yshort, ylong = ya, yb 

2682 else: 

2683 yshort, ylong = yb, ya 

2684 

2685 epsilon = 0.00001 

2686 normfac_short = num.sqrt(num.sum(yshort**2)) 

2687 normfac = normfac_short * num.sqrt( 

2688 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2689 + normfac_short*epsilon 

2690 

2691 if yb.size <= ya.size: 

2692 normfac = normfac[::-1] 

2693 

2694 yc /= normfac 

2695 

2696 c = a.copy() 

2697 c.set_ydata(yc) 

2698 c.set_codes(*merge_codes(a, b, '~')) 

2699 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2700 

2701 return c 

2702 

2703 

2704def deconvolve( 

2705 a, b, waterlevel, 

2706 tshift=0., 

2707 pad=0.5, 

2708 fd_taper=None, 

2709 pad_to_pow2=True): 

2710 

2711 same_sampling_rate(a, b) 

2712 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2713 deltat = a.deltat 

2714 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2715 

2716 ndata = max(a.data_len(), b.data_len()) 

2717 ndata_pad = ndata + npad 

2718 

2719 if pad_to_pow2: 

2720 ntrans = nextpow2(ndata_pad) 

2721 else: 

2722 ntrans = ndata 

2723 

2724 aspec = num.fft.rfft(a.ydata, ntrans) 

2725 bspec = num.fft.rfft(b.ydata, ntrans) 

2726 

2727 out = aspec * num.conj(bspec) 

2728 

2729 bautocorr = bspec*num.conj(bspec) 

2730 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2731 

2732 out /= denom 

2733 df = 1/(ntrans*deltat) 

2734 

2735 if fd_taper is not None: 

2736 fd_taper(out, 0.0, df) 

2737 

2738 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2739 c = a.copy(data=False) 

2740 c.set_ydata(ydata[:ndata]) 

2741 c.set_codes(*merge_codes(a, b, '/')) 

2742 return c 

2743 

2744 

2745def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2746 assert same_sampling_rate(a, b, eps), \ 

2747 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2748 

2749 

2750def same_sampling_rate(a, b, eps=1.0e-6): 

2751 ''' 

2752 Check if two traces have the same sampling rate. 

2753 

2754 :param a,b: input traces 

2755 :param eps: relative tolerance 

2756 ''' 

2757 

2758 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2759 

2760 

2761def fix_deltat_rounding_errors(deltat): 

2762 ''' 

2763 Try to undo sampling rate rounding errors. 

2764 

2765 Fix rounding errors of sampling intervals when these are read from single 

2766 precision floating point values. 

2767 

2768 Assumes that the true sampling rate or sampling interval was an integer 

2769 value. No correction will be applied if this would change the sampling 

2770 rate by more than 0.001%. 

2771 ''' 

2772 

2773 if deltat <= 1.0: 

2774 deltat_new = 1.0 / round(1.0 / deltat) 

2775 else: 

2776 deltat_new = round(deltat) 

2777 

2778 if abs(deltat_new - deltat) / deltat > 1e-5: 

2779 deltat_new = deltat 

2780 

2781 return deltat_new 

2782 

2783 

2784def merge_codes(a, b, sep='-'): 

2785 ''' 

2786 Merge network-station-location-channel codes of a pair of traces. 

2787 ''' 

2788 

2789 o = [] 

2790 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2791 if xa == xb: 

2792 o.append(xa) 

2793 else: 

2794 o.append(sep.join((xa, xb))) 

2795 return o 

2796 

2797 

2798class Taper(Object): 

2799 ''' 

2800 Base class for tapers. 

2801 

2802 Does nothing by default. 

2803 ''' 

2804 

2805 def __call__(self, y, x0, dx): 

2806 pass 

2807 

2808 

2809class CosTaper(Taper): 

2810 ''' 

2811 Cosine Taper. 

2812 

2813 :param a: start of fading in 

2814 :param b: end of fading in 

2815 :param c: start of fading out 

2816 :param d: end of fading out 

2817 ''' 

2818 

2819 a = Float.T() 

2820 b = Float.T() 

2821 c = Float.T() 

2822 d = Float.T() 

2823 

2824 def __init__(self, a, b, c, d): 

2825 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2826 

2827 def __call__(self, y, x0, dx): 

2828 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2829 

2830 def span(self, y, x0, dx): 

2831 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2832 

2833 def time_span(self): 

2834 return self.a, self.d 

2835 

2836 

2837class CosFader(Taper): 

2838 ''' 

2839 Cosine Fader. 

2840 

2841 :param xfade: fade in and fade out time in seconds (optional) 

2842 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2843 

2844 Only one argument can be set. The other should to be ``None``. 

2845 ''' 

2846 

2847 xfade = Float.T(optional=True) 

2848 xfrac = Float.T(optional=True) 

2849 

2850 def __init__(self, xfade=None, xfrac=None): 

2851 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2852 assert (xfade is None) != (xfrac is None) 

2853 self._xfade = xfade 

2854 self._xfrac = xfrac 

2855 

2856 def __call__(self, y, x0, dx): 

2857 

2858 xfade = self._xfade 

2859 

2860 xlen = (y.size - 1)*dx 

2861 if xfade is None: 

2862 xfade = xlen * self._xfrac 

2863 

2864 a = x0 

2865 b = x0 + xfade 

2866 c = x0 + xlen - xfade 

2867 d = x0 + xlen 

2868 

2869 apply_costaper(a, b, c, d, y, x0, dx) 

2870 

2871 def span(self, y, x0, dx): 

2872 return 0, y.size 

2873 

2874 def time_span(self): 

2875 return None, None 

2876 

2877 

2878def none_min(li): 

2879 if None in li: 

2880 return None 

2881 else: 

2882 return min(x for x in li if x is not None) 

2883 

2884 

2885def none_max(li): 

2886 if None in li: 

2887 return None 

2888 else: 

2889 return max(x for x in li if x is not None) 

2890 

2891 

2892class MultiplyTaper(Taper): 

2893 ''' 

2894 Multiplication of several tapers. 

2895 ''' 

2896 

2897 tapers = List.T(Taper.T()) 

2898 

2899 def __init__(self, tapers=None): 

2900 if tapers is None: 

2901 tapers = [] 

2902 

2903 Taper.__init__(self, tapers=tapers) 

2904 

2905 def __call__(self, y, x0, dx): 

2906 for taper in self.tapers: 

2907 taper(y, x0, dx) 

2908 

2909 def span(self, y, x0, dx): 

2910 spans = [] 

2911 for taper in self.tapers: 

2912 spans.append(taper.span(y, x0, dx)) 

2913 

2914 mins, maxs = list(zip(*spans)) 

2915 return min(mins), max(maxs) 

2916 

2917 def time_span(self): 

2918 spans = [] 

2919 for taper in self.tapers: 

2920 spans.append(taper.time_span()) 

2921 

2922 mins, maxs = list(zip(*spans)) 

2923 return none_min(mins), none_max(maxs) 

2924 

2925 

2926class GaussTaper(Taper): 

2927 ''' 

2928 Frequency domain Gaussian filter. 

2929 ''' 

2930 

2931 alpha = Float.T() 

2932 

2933 def __init__(self, alpha): 

2934 Taper.__init__(self, alpha=alpha) 

2935 self._alpha = alpha 

2936 

2937 def __call__(self, y, x0, dx): 

2938 f = x0 + num.arange(y.size)*dx 

2939 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2940 

2941 

2942cached_coefficients = {} 

2943 

2944 

2945def _get_cached_filter_coefs(order, corners, btype): 

2946 ck = (order, tuple(corners), btype) 

2947 if ck not in cached_coefficients: 

2948 if len(corners) == 0: 

2949 cached_coefficients[ck] = signal.butter( 

2950 order, corners[0], btype=btype) 

2951 else: 

2952 cached_coefficients[ck] = signal.butter( 

2953 order, corners, btype=btype) 

2954 

2955 return cached_coefficients[ck] 

2956 

2957 

2958class _globals(object): 

2959 _numpy_has_correlate_flip_bug = None 

2960 

2961 

2962def _default_key(tr): 

2963 return (tr.network, tr.station, tr.location, tr.channel) 

2964 

2965 

2966def numpy_has_correlate_flip_bug(): 

2967 ''' 

2968 Check if NumPy's correlate function reveals old behaviour. 

2969 ''' 

2970 

2971 if _globals._numpy_has_correlate_flip_bug is None: 

2972 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

2973 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

2974 ab = num.correlate(a, b, mode='same') 

2975 ba = num.correlate(b, a, mode='same') 

2976 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

2977 

2978 return _globals._numpy_has_correlate_flip_bug 

2979 

2980 

2981def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

2982 ''' 

2983 Call :py:func:`numpy.correlate` with fixes. 

2984 

2985 c[k] = sum_i a[i+k] * conj(b[i]) 

2986 

2987 Note that the result produced by newer numpy.correlate is always flipped 

2988 with respect to the formula given in its documentation (if ascending k 

2989 assumed for the output). 

2990 ''' 

2991 

2992 if use_fft: 

2993 if a.size < b.size: 

2994 c = signal.fftconvolve(b[::-1], a, mode=mode) 

2995 else: 

2996 c = signal.fftconvolve(a, b[::-1], mode=mode) 

2997 return c 

2998 

2999 else: 

3000 buggy = numpy_has_correlate_flip_bug() 

3001 

3002 a = num.asarray(a) 

3003 b = num.asarray(b) 

3004 

3005 if buggy: 

3006 b = num.conj(b) 

3007 

3008 c = num.correlate(a, b, mode=mode) 

3009 

3010 if buggy and a.size < b.size: 

3011 return c[::-1] 

3012 else: 

3013 return c 

3014 

3015 

3016def numpy_correlate_emulate(a, b, mode='valid'): 

3017 ''' 

3018 Slow version of :py:func:`numpy.correlate` for comparison. 

3019 ''' 

3020 

3021 a = num.asarray(a) 

3022 b = num.asarray(b) 

3023 kmin = -(b.size-1) 

3024 klen = a.size-kmin 

3025 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3026 kmin = int(kmin) 

3027 kmax = int(kmax) 

3028 klen = kmax - kmin + 1 

3029 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3030 for k in range(kmin, kmin+klen): 

3031 imin = max(0, -k) 

3032 ilen = min(b.size, a.size-k) - imin 

3033 c[k-kmin] = num.sum( 

3034 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3035 

3036 return c 

3037 

3038 

3039def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3040 ''' 

3041 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3042 ''' 

3043 

3044 a = num.asarray(a) 

3045 b = num.asarray(b) 

3046 

3047 kmin = -(b.size-1) 

3048 if mode == 'full': 

3049 klen = a.size-kmin 

3050 elif mode == 'same': 

3051 klen = max(a.size, b.size) 

3052 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3053 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3054 elif mode == 'valid': 

3055 klen = abs(a.size - b.size) + 1 

3056 kmin += min(a.size, b.size) - 1 

3057 

3058 return kmin, kmin + klen - 1 

3059 

3060 

3061def autocorr(x, nshifts): 

3062 ''' 

3063 Compute biased estimate of the first autocorrelation coefficients. 

3064 

3065 :param x: input array 

3066 :param nshifts: number of coefficients to calculate 

3067 ''' 

3068 

3069 mean = num.mean(x) 

3070 std = num.std(x) 

3071 n = x.size 

3072 xdm = x - mean 

3073 r = num.zeros(nshifts) 

3074 for k in range(nshifts): 

3075 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3076 

3077 return r 

3078 

3079 

3080def yulewalker(x, order): 

3081 ''' 

3082 Compute autoregression coefficients using Yule-Walker method. 

3083 

3084 :param x: input array 

3085 :param order: number of coefficients to produce 

3086 

3087 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3088 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3089 recursion which is normally used. 

3090 ''' 

3091 

3092 gamma = autocorr(x, order+1) 

3093 d = gamma[1:1+order] 

3094 a = num.zeros((order, order)) 

3095 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3096 for i in range(order): 

3097 ioff = order-i 

3098 a[i, :] = gamma2[ioff:ioff+order] 

3099 

3100 return num.dot(num.linalg.inv(a), -d) 

3101 

3102 

3103def moving_avg(x, n): 

3104 n = int(n) 

3105 cx = x.cumsum() 

3106 nn = len(x) 

3107 y = num.zeros(nn, dtype=cx.dtype) 

3108 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3109 y[:n//2] = y[n//2] 

3110 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3111 return y 

3112 

3113 

3114def moving_sum(x, n, mode='valid'): 

3115 n = int(n) 

3116 cx = x.cumsum() 

3117 nn = len(x) 

3118 

3119 if mode == 'valid': 

3120 if nn-n+1 <= 0: 

3121 return num.zeros(0, dtype=cx.dtype) 

3122 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3123 y[0] = cx[n-1] 

3124 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3125 

3126 if mode == 'full': 

3127 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3128 if n <= nn: 

3129 y[0:n] = cx[0:n] 

3130 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3131 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3132 else: 

3133 y[0:nn] = cx[0:nn] 

3134 y[nn:n] = cx[nn-1] 

3135 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3136 

3137 if mode == 'same': 

3138 n1 = (n-1)//2 

3139 y = num.zeros(nn, dtype=cx.dtype) 

3140 if n <= nn: 

3141 y[0:n-n1] = cx[n1:n] 

3142 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3143 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3144 else: 

3145 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3146 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3147 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3148 

3149 return y 

3150 

3151 

3152def nextpow2(i): 

3153 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3154 

3155 

3156def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3157 def snap(x): 

3158 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3159 return snap 

3160 

3161 

3162def snapper(nmax, delta, snapfun=math.ceil): 

3163 def snap(x): 

3164 return max(0, min(int(snapfun(x/delta)), nmax)) 

3165 return snap 

3166 

3167 

3168def apply_costaper(a, b, c, d, y, x0, dx): 

3169 hi = snapper_w_offset(y.size, x0, dx) 

3170 y[:hi(a)] = 0. 

3171 y[hi(a):hi(b)] *= 0.5 \ 

3172 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3173 y[hi(c):hi(d)] *= 0.5 \ 

3174 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3175 y[hi(d):] = 0. 

3176 

3177 

3178def span_costaper(a, b, c, d, y, x0, dx): 

3179 hi = snapper_w_offset(y.size, x0, dx) 

3180 return hi(a), hi(d) - hi(a) 

3181 

3182 

3183def costaper(a, b, c, d, nfreqs, deltaf): 

3184 hi = snapper(nfreqs, deltaf) 

3185 tap = num.zeros(nfreqs) 

3186 tap[hi(a):hi(b)] = 0.5 \ 

3187 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3188 tap[hi(b):hi(c)] = 1. 

3189 tap[hi(c):hi(d)] = 0.5 \ 

3190 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3191 

3192 return tap 

3193 

3194 

3195def t2ind(t, tdelta, snap=round): 

3196 return int(snap(t/tdelta)) 

3197 

3198 

3199def hilbert(x, N=None): 

3200 ''' 

3201 Return the hilbert transform of x of length N. 

3202 

3203 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3204 ''' 

3205 

3206 x = num.asarray(x) 

3207 if N is None: 

3208 N = len(x) 

3209 if N <= 0: 

3210 raise ValueError("N must be positive.") 

3211 if num.iscomplexobj(x): 

3212 logger.warning('imaginary part of x ignored.') 

3213 x = num.real(x) 

3214 

3215 Xf = num.fft.fft(x, N, axis=0) 

3216 h = num.zeros(N) 

3217 if N % 2 == 0: 

3218 h[0] = h[N//2] = 1 

3219 h[1:N//2] = 2 

3220 else: 

3221 h[0] = 1 

3222 h[1:(N+1)//2] = 2 

3223 

3224 if len(x.shape) > 1: 

3225 h = h[:, num.newaxis] 

3226 x = num.fft.ifft(Xf*h) 

3227 return x 

3228 

3229 

3230def near(a, b, eps): 

3231 return abs(a-b) < eps 

3232 

3233 

3234def coroutine(func): 

3235 def wrapper(*args, **kwargs): 

3236 gen = func(*args, **kwargs) 

3237 next(gen) 

3238 return gen 

3239 

3240 wrapper.__name__ = func.__name__ 

3241 wrapper.__dict__ = func.__dict__ 

3242 wrapper.__doc__ = func.__doc__ 

3243 return wrapper 

3244 

3245 

3246class States(object): 

3247 ''' 

3248 Utility to store channel-specific state in coroutines. 

3249 ''' 

3250 

3251 def __init__(self): 

3252 self._states = {} 

3253 

3254 def get(self, tr): 

3255 k = tr.nslc_id 

3256 if k in self._states: 

3257 tmin, deltat, dtype, value = self._states[k] 

3258 if (near(tmin, tr.tmin, deltat/100.) 

3259 and near(deltat, tr.deltat, deltat/10000.) 

3260 and dtype == tr.ydata.dtype): 

3261 

3262 return value 

3263 

3264 return None 

3265 

3266 def set(self, tr, value): 

3267 k = tr.nslc_id 

3268 if k in self._states and self._states[k][-1] is not value: 

3269 self.free(self._states[k][-1]) 

3270 

3271 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3272 

3273 def free(self, value): 

3274 pass 

3275 

3276 

3277@coroutine 

3278def co_list_append(list): 

3279 while True: 

3280 list.append((yield)) 

3281 

3282 

3283class ScipyBug(Exception): 

3284 pass 

3285 

3286 

3287@coroutine 

3288def co_lfilter(target, b, a): 

3289 ''' 

3290 Successively filter broken continuous trace data (coroutine). 

3291 

3292 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3293 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3294 objects containing the filtered data to target. This is useful, if one 

3295 wants to filter a long continuous time series, which is split into many 

3296 successive traces without producing filter artifacts at trace boundaries. 

3297 

3298 Filter states are kept *per channel*, specifically, for each (network, 

3299 station, location, channel) combination occuring in the input traces, a 

3300 separate state is created and maintained. This makes it possible to filter 

3301 multichannel or multistation data with only one :py:func:`co_lfilter` 

3302 instance. 

3303 

3304 Filter state is reset, when gaps occur. 

3305 

3306 Use it like this:: 

3307 

3308 from pyrocko.trace import co_lfilter, co_list_append 

3309 

3310 filtered_traces = [] 

3311 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3312 for trace in traces: 

3313 pipe.send(trace) 

3314 

3315 pipe.close() 

3316 

3317 ''' 

3318 

3319 try: 

3320 states = States() 

3321 output = None 

3322 while True: 

3323 input = (yield) 

3324 

3325 zi = states.get(input) 

3326 if zi is None: 

3327 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3328 

3329 output = input.copy(data=False) 

3330 try: 

3331 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3332 except ValueError: 

3333 raise ScipyBug( 

3334 'signal.lfilter failed: could be related to a bug ' 

3335 'in some older scipy versions, e.g. on opensuse42.1') 

3336 

3337 output.set_ydata(ydata) 

3338 states.set(input, zf) 

3339 target.send(output) 

3340 

3341 except GeneratorExit: 

3342 target.close() 

3343 

3344 

3345def co_antialias(target, q, n=None, ftype='fir'): 

3346 b, a, n = util.decimate_coeffs(q, n, ftype) 

3347 anti = co_lfilter(target, b, a) 

3348 return anti 

3349 

3350 

3351@coroutine 

3352def co_dropsamples(target, q, nfir): 

3353 try: 

3354 states = States() 

3355 while True: 

3356 tr = (yield) 

3357 newdeltat = q * tr.deltat 

3358 ioffset = states.get(tr) 

3359 if ioffset is None: 

3360 # for fir filter, the first nfir samples are pulluted by 

3361 # boundary effects; cut it off. 

3362 # for iir this may be (much) more, we do not correct for that. 

3363 # put sample instances to a time which is a multiple of the 

3364 # new sampling interval. 

3365 newtmin_want = math.ceil( 

3366 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3367 - (nfir/2*tr.deltat) 

3368 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3369 if ioffset < 0: 

3370 ioffset = ioffset % q 

3371 

3372 newtmin_have = tr.tmin + ioffset * tr.deltat 

3373 newtr = tr.copy(data=False) 

3374 newtr.deltat = newdeltat 

3375 # because the fir kernel shifts data by nfir/2 samples: 

3376 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3377 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3378 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3379 target.send(newtr) 

3380 

3381 except GeneratorExit: 

3382 target.close() 

3383 

3384 

3385def co_downsample(target, q, n=None, ftype='fir'): 

3386 ''' 

3387 Successively downsample broken continuous trace data (coroutine). 

3388 

3389 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3390 data and sends new :py:class:`Trace` objects containing the downsampled 

3391 data to target. This is useful, if one wants to downsample a long 

3392 continuous time series, which is split into many successive traces without 

3393 producing filter artifacts and gaps at trace boundaries. 

3394 

3395 Filter states are kept *per channel*, specifically, for each (network, 

3396 station, location, channel) combination occuring in the input traces, a 

3397 separate state is created and maintained. This makes it possible to filter 

3398 multichannel or multistation data with only one :py:func:`co_lfilter` 

3399 instance. 

3400 

3401 Filter state is reset, when gaps occur. The sampling instances are choosen 

3402 so that they occur at (or as close as possible) to even multiples of the 

3403 sampling interval of the downsampled trace (based on system time). 

3404 ''' 

3405 

3406 b, a, n = util.decimate_coeffs(q, n, ftype) 

3407 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3408 

3409 

3410@coroutine 

3411def co_downsample_to(target, deltat): 

3412 

3413 decimators = {} 

3414 try: 

3415 while True: 

3416 tr = (yield) 

3417 ratio = deltat / tr.deltat 

3418 rratio = round(ratio) 

3419 if abs(rratio - ratio)/ratio > 0.0001: 

3420 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3421 

3422 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3423 if deci_seq not in decimators: 

3424 pipe = target 

3425 for q in deci_seq[::-1]: 

3426 pipe = co_downsample(pipe, q) 

3427 

3428 decimators[deci_seq] = pipe 

3429 

3430 decimators[deci_seq].send(tr) 

3431 

3432 except GeneratorExit: 

3433 for g in decimators.values(): 

3434 g.close() 

3435 

3436 

3437class DomainChoice(StringChoice): 

3438 choices = [ 

3439 'time_domain', 

3440 'frequency_domain', 

3441 'envelope', 

3442 'absolute', 

3443 'cc_max_norm'] 

3444 

3445 

3446class MisfitSetup(Object): 

3447 ''' 

3448 Contains misfit setup to be used in :py:func:`trace.misfit` 

3449 

3450 :param description: Description of the setup 

3451 :param norm: L-norm classifier 

3452 :param taper: Object of :py:class:`Taper` 

3453 :param filter: Object of :py:class:`FrequencyResponse` 

3454 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3455 'cc_max_norm'] 

3456 

3457 Can be dumped to a yaml file. 

3458 ''' 

3459 

3460 xmltagname = 'misfitsetup' 

3461 description = String.T(optional=True) 

3462 norm = Int.T(optional=False) 

3463 taper = Taper.T(optional=False) 

3464 filter = FrequencyResponse.T(optional=True) 

3465 domain = DomainChoice.T(default='time_domain') 

3466 

3467 

3468def equalize_sampling_rates(trace_1, trace_2): 

3469 ''' 

3470 Equalize sampling rates of two traces (reduce higher sampling rate to 

3471 lower). 

3472 

3473 :param trace_1: :py:class:`Trace` object 

3474 :param trace_2: :py:class:`Trace` object 

3475 

3476 Returns a copy of the resampled trace if resampling is needed. 

3477 ''' 

3478 

3479 if same_sampling_rate(trace_1, trace_2): 

3480 return trace_1, trace_2 

3481 

3482 if trace_1.deltat < trace_2.deltat: 

3483 t1_out = trace_1.copy() 

3484 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3485 logger.debug('Trace downsampled (return copy of trace): %s' 

3486 % '.'.join(t1_out.nslc_id)) 

3487 return t1_out, trace_2 

3488 

3489 elif trace_1.deltat > trace_2.deltat: 

3490 t2_out = trace_2.copy() 

3491 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3492 logger.debug('Trace downsampled (return copy of trace): %s' 

3493 % '.'.join(t2_out.nslc_id)) 

3494 return trace_1, t2_out 

3495 

3496 

3497def Lx_norm(u, v, norm=2): 

3498 ''' 

3499 Calculate the misfit denominator *m* and the normalization devisor *n* 

3500 according to norm. 

3501 

3502 The normalization divisor *n* is calculated from ``v``. 

3503 

3504 :param u: :py:class:`numpy.array` 

3505 :param v: :py:class:`numpy.array` 

3506 :param norm: (default = 2) 

3507 

3508 ``u`` and ``v`` must be of same size. 

3509 ''' 

3510 

3511 if norm == 1: 

3512 return ( 

3513 num.sum(num.abs(v-u)), 

3514 num.sum(num.abs(v))) 

3515 

3516 elif norm == 2: 

3517 return ( 

3518 num.sqrt(num.sum((v-u)**2)), 

3519 num.sqrt(num.sum(v**2))) 

3520 

3521 else: 

3522 return ( 

3523 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3524 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3525 

3526 

3527def do_downsample(tr, deltat): 

3528 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3529 tr = tr.copy() 

3530 tr.downsample_to(deltat, snap=True, demean=False) 

3531 else: 

3532 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3533 tr = tr.copy() 

3534 tr.snap() 

3535 return tr 

3536 

3537 

3538def do_extend(tr, tmin, tmax): 

3539 if tmin < tr.tmin or tmax > tr.tmax: 

3540 tr = tr.copy() 

3541 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3542 

3543 return tr 

3544 

3545 

3546def do_pre_taper(tr, taper): 

3547 return tr.taper(taper, inplace=False, chop=True) 

3548 

3549 

3550def do_fft(tr, filter): 

3551 if filter is None: 

3552 return tr 

3553 else: 

3554 ndata = tr.ydata.size 

3555 nfft = nextpow2(ndata) 

3556 padded = num.zeros(nfft, dtype=float) 

3557 padded[:ndata] = tr.ydata 

3558 spectrum = num.fft.rfft(padded) 

3559 df = 1.0 / (tr.deltat * nfft) 

3560 frequencies = num.arange(spectrum.size)*df 

3561 return [tr, frequencies, spectrum] 

3562 

3563 

3564def do_filter(inp, filter): 

3565 if filter is None: 

3566 return inp 

3567 else: 

3568 tr, frequencies, spectrum = inp 

3569 spectrum *= filter.evaluate(frequencies) 

3570 return [tr, frequencies, spectrum] 

3571 

3572 

3573def do_ifft(inp): 

3574 if isinstance(inp, Trace): 

3575 return inp 

3576 else: 

3577 tr, _, spectrum = inp 

3578 ndata = tr.ydata.size 

3579 tr = tr.copy(data=False) 

3580 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3581 return tr 

3582 

3583 

3584def check_alignment(t1, t2): 

3585 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3586 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3587 t1.ydata.shape != t2.ydata.shape: 

3588 raise MisalignedTraces( 

3589 'Cannot calculate misfit of %s and %s due to misaligned ' 

3590 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))