1# https://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7This module provides basic signal processing for seismic traces. 

8''' 

9 

10import time 

11import math 

12import copy 

13import logging 

14import hashlib 

15from collections import defaultdict 

16 

17import numpy as num 

18from scipy import signal 

19 

20from pyrocko import util, orthodrome, pchain, model 

21from pyrocko.util import reuse 

22from pyrocko.guts import Object, Float, Int, String, List, \ 

23 StringChoice, Timestamp 

24from pyrocko.guts_array import Array 

25from pyrocko.model import squirrel_content 

26 

27# backward compatibility 

28from pyrocko.util import UnavailableDecimation # noqa 

29from pyrocko.response import ( # noqa 

30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

31 ButterworthResponse, SampledResponse, IntegrationResponse, 

32 DifferentiationResponse, MultiplyResponse) 

33 

34 

35guts_prefix = 'pf' 

36 

37logger = logging.getLogger('pyrocko.trace') 

38 

39 

40@squirrel_content 

41class Trace(Object): 

42 

43 ''' 

44 Create new trace object. 

45 

46 A ``Trace`` object represents a single continuous strip of evenly sampled 

47 time series data. It is built from a 1D NumPy array containing the data 

48 samples and some attributes describing its beginning and ending time, its 

49 sampling rate and four string identifiers (its network, station, location 

50 and channel code). 

51 

52 :param network: network code 

53 :param station: station code 

54 :param location: location code 

55 :param channel: channel code 

56 :param extra: extra code 

57 :param tmin: system time of first sample in [s] 

58 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

59 computed from length of ``ydata``) 

60 :param deltat: sampling interval in [s] 

61 :param ydata: 1D numpy array with data samples (can be ``None`` when 

62 ``tmax`` is not ``None``) 

63 :param mtime: optional modification time 

64 

65 :param meta: additional meta information (not used, but maintained by the 

66 library) 

67 

68 The length of the network, station, location and channel codes is not 

69 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

70 have different limits on the lengths of these codes. The codes set here are 

71 silently truncated when the trace is stored 

72 ''' 

73 

74 network = String.T(default='', help='Deployment/network code (1-8)') 

75 station = String.T(default='STA', help='Station code (1-5)') 

76 location = String.T(default='', help='Location code (0-2)') 

77 channel = String.T(default='', help='Channel code (3)') 

78 extra = String.T(default='', help='Extra/custom code') 

79 

80 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

81 tmax = Timestamp.T() 

82 

83 deltat = Float.T(default=1.0) 

84 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

85 

86 mtime = Timestamp.T(optional=True) 

87 

88 cached_frequencies = {} 

89 

90 def __init__(self, network='', station='STA', location='', channel='', 

91 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

92 meta=None, extra=''): 

93 

94 Object.__init__(self, init_props=False) 

95 

96 time_float = util.get_time_float() 

97 

98 if not isinstance(tmin, time_float): 

99 tmin = Trace.tmin.regularize_extra(tmin) 

100 

101 if tmax is not None and not isinstance(tmax, time_float): 

102 tmax = Trace.tmax.regularize_extra(tmax) 

103 

104 if mtime is not None and not isinstance(mtime, time_float): 

105 mtime = Trace.mtime.regularize_extra(mtime) 

106 

107 if ydata is not None and not isinstance(ydata, num.ndarray): 

108 ydata = Trace.ydata.regularize_extra(ydata) 

109 

110 self._growbuffer = None 

111 

112 tmin = time_float(tmin) 

113 if tmax is not None: 

114 tmax = time_float(tmax) 

115 

116 if mtime is None: 

117 mtime = time_float(time.time()) 

118 

119 self.network, self.station, self.location, self.channel, \ 

120 self.extra = [ 

121 reuse(x) for x in ( 

122 network, station, location, channel, extra)] 

123 

124 self.tmin = tmin 

125 self.deltat = deltat 

126 

127 if tmax is None: 

128 if ydata is not None: 

129 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

130 else: 

131 raise Exception( 

132 'fixme: trace must be created with tmax or ydata') 

133 else: 

134 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

135 self.tmax = self.tmin + (n - 1) * self.deltat 

136 

137 self.meta = meta 

138 self.ydata = ydata 

139 self.mtime = mtime 

140 self._update_ids() 

141 self.file = None 

142 self._pchain = None 

143 

144 def __str__(self): 

145 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

146 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

147 s += ' timerange: %s - %s\n' % ( 

148 util.time_to_str(self.tmin, format=fmt), 

149 util.time_to_str(self.tmax, format=fmt)) 

150 

151 s += ' delta t: %g\n' % self.deltat 

152 if self.meta: 

153 for k in sorted(self.meta.keys()): 

154 s += ' %s: %s\n' % (k, self.meta[k]) 

155 return s 

156 

157 @property 

158 def codes(self): 

159 from pyrocko.squirrel import model 

160 return model.CodesNSLCE( 

161 self.network, self.station, self.location, self.channel, 

162 self.extra) 

163 

164 @property 

165 def time_span(self): 

166 return self.tmin, self.tmax 

167 

168 @property 

169 def summary(self): 

170 return '%s %-16s %s %g' % ( 

171 self.__class__.__name__, self.str_codes, self.str_time_span, 

172 self.deltat) 

173 

174 def __getstate__(self): 

175 return (self.network, self.station, self.location, self.channel, 

176 self.tmin, self.tmax, self.deltat, self.mtime, 

177 self.ydata, self.meta, self.extra) 

178 

179 def __setstate__(self, state): 

180 if len(state) == 11: 

181 self.network, self.station, self.location, self.channel, \ 

182 self.tmin, self.tmax, self.deltat, self.mtime, \ 

183 self.ydata, self.meta, self.extra = state 

184 

185 elif len(state) == 12: 

186 # backward compatibility 0 

187 self.network, self.station, self.location, self.channel, \ 

188 self.tmin, self.tmax, self.deltat, self.mtime, \ 

189 self.ydata, self.meta, _, self.extra = state 

190 

191 elif len(state) == 10: 

192 # backward compatibility 1 

193 self.network, self.station, self.location, self.channel, \ 

194 self.tmin, self.tmax, self.deltat, self.mtime, \ 

195 self.ydata, self.meta = state 

196 

197 self.extra = '' 

198 

199 else: 

200 # backward compatibility 2 

201 self.network, self.station, self.location, self.channel, \ 

202 self.tmin, self.tmax, self.deltat, self.mtime = state 

203 self.ydata = None 

204 self.meta = None 

205 self.extra = '' 

206 

207 self._growbuffer = None 

208 self._update_ids() 

209 

210 def hash(self, unsafe=False): 

211 sha1 = hashlib.sha1() 

212 for code in self.nslc_id: 

213 sha1.update(code.encode()) 

214 sha1.update(self.tmin.hex().encode()) 

215 sha1.update(self.tmax.hex().encode()) 

216 sha1.update(self.deltat.hex().encode()) 

217 

218 if self.ydata is not None: 

219 if not unsafe: 

220 sha1.update(memoryview(self.ydata)) 

221 else: 

222 sha1.update(memoryview(self.ydata[:32])) 

223 sha1.update(memoryview(self.ydata[-32:])) 

224 

225 return sha1.hexdigest() 

226 

227 def name(self): 

228 ''' 

229 Get a short string description. 

230 ''' 

231 

232 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

233 util.time_to_str(self.tmin), 

234 util.time_to_str(self.tmax))) 

235 

236 return s 

237 

238 def __eq__(self, other): 

239 return ( 

240 isinstance(other, Trace) 

241 and self.network == other.network 

242 and self.station == other.station 

243 and self.location == other.location 

244 and self.channel == other.channel 

245 and (abs(self.deltat - other.deltat) 

246 < (self.deltat + other.deltat)*1e-6) 

247 and abs(self.tmin-other.tmin) < self.deltat*0.01 

248 and abs(self.tmax-other.tmax) < self.deltat*0.01 

249 and num.all(self.ydata == other.ydata)) 

250 

251 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

252 return ( 

253 self.network == other.network 

254 and self.station == other.station 

255 and self.location == other.location 

256 and self.channel == other.channel 

257 and (abs(self.deltat - other.deltat) 

258 < (self.deltat + other.deltat)*1e-6) 

259 and abs(self.tmin-other.tmin) < self.deltat*0.01 

260 and abs(self.tmax-other.tmax) < self.deltat*0.01 

261 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

262 

263 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

264 

265 assert self.network == other.network, \ 

266 'network codes differ: %s, %s' % (self.network, other.network) 

267 assert self.station == other.station, \ 

268 'station codes differ: %s, %s' % (self.station, other.station) 

269 assert self.location == other.location, \ 

270 'location codes differ: %s, %s' % (self.location, other.location) 

271 assert self.channel == other.channel, 'channel codes differ' 

272 assert (abs(self.deltat - other.deltat) 

273 < (self.deltat + other.deltat)*1e-6), \ 

274 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

275 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

276 'start times differ: %s, %s' % ( 

277 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

278 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

279 'end times differ: %s, %s' % ( 

280 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

281 

282 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

283 'trace values differ' 

284 

285 def __hash__(self): 

286 return id(self) 

287 

288 def __call__(self, t, clip=False, snap=round): 

289 it = int(snap((t-self.tmin)/self.deltat)) 

290 if clip: 

291 it = max(0, min(it, self.ydata.size-1)) 

292 else: 

293 if it < 0 or self.ydata.size <= it: 

294 raise IndexError() 

295 

296 return self.tmin+it*self.deltat, self.ydata[it] 

297 

298 def interpolate(self, t, clip=False): 

299 ''' 

300 Value of trace between supporting points through linear interpolation. 

301 

302 :param t: time instant 

303 :param clip: whether to clip indices to trace ends 

304 ''' 

305 

306 t0, y0 = self(t, clip=clip, snap=math.floor) 

307 t1, y1 = self(t, clip=clip, snap=math.ceil) 

308 if t0 == t1: 

309 return y0 

310 else: 

311 return y0+(t-t0)/(t1-t0)*(y1-y0) 

312 

313 def index_clip(self, i): 

314 ''' 

315 Clip index to valid range. 

316 ''' 

317 

318 return min(max(0, i), self.ydata.size) 

319 

320 def add(self, other, interpolate=True, left=0., right=0.): 

321 ''' 

322 Add values of other trace (self += other). 

323 

324 Add values of ``other`` trace to the values of ``self``, where it 

325 intersects with ``other``. This method does not change the extent of 

326 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

327 ``other`` to be added are interpolated at sampling instants of 

328 ``self``. Linear interpolation is performed. In this case the sampling 

329 rate of ``other`` must be equal to or lower than that of ``self``. If 

330 ``interpolate`` is ``False``, the sampling rates of the two traces must 

331 match. 

332 ''' 

333 

334 if interpolate: 

335 assert self.deltat <= other.deltat \ 

336 or same_sampling_rate(self, other) 

337 

338 other_xdata = other.get_xdata() 

339 xdata = self.get_xdata() 

340 self.ydata += num.interp( 

341 xdata, other_xdata, other.ydata, left=left, right=left) 

342 else: 

343 assert self.deltat == other.deltat 

344 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

345 ibeg = max(0, ioff) 

346 iend = min(self.data_len(), ioff+other.data_len()) 

347 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

348 

349 def mult(self, other, interpolate=True): 

350 ''' 

351 Muliply with values of other trace ``(self *= other)``. 

352 

353 Multiply values of ``other`` trace to the values of ``self``, where it 

354 intersects with ``other``. This method does not change the extent of 

355 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

356 ``other`` to be multiplied are interpolated at sampling instants of 

357 ``self``. Linear interpolation is performed. In this case the sampling 

358 rate of ``other`` must be equal to or lower than that of ``self``. If 

359 ``interpolate`` is ``False``, the sampling rates of the two traces must 

360 match. 

361 ''' 

362 

363 if interpolate: 

364 assert self.deltat <= other.deltat or \ 

365 same_sampling_rate(self, other) 

366 

367 other_xdata = other.get_xdata() 

368 xdata = self.get_xdata() 

369 self.ydata *= num.interp( 

370 xdata, other_xdata, other.ydata, left=0., right=0.) 

371 else: 

372 assert self.deltat == other.deltat 

373 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

374 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

375 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

376 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

377 

378 ibeg1 = self.index_clip(ibeg1) 

379 iend1 = self.index_clip(iend1) 

380 ibeg2 = self.index_clip(ibeg2) 

381 iend2 = self.index_clip(iend2) 

382 

383 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

384 

385 def max(self): 

386 ''' 

387 Get time and value of data maximum. 

388 ''' 

389 

390 i = num.argmax(self.ydata) 

391 return self.tmin + i*self.deltat, self.ydata[i] 

392 

393 def min(self): 

394 ''' 

395 Get time and value of data minimum. 

396 ''' 

397 

398 i = num.argmin(self.ydata) 

399 return self.tmin + i*self.deltat, self.ydata[i] 

400 

401 def absmax(self): 

402 ''' 

403 Get time and value of maximum of the absolute of data. 

404 ''' 

405 

406 tmi, mi = self.min() 

407 tma, ma = self.max() 

408 if abs(mi) > abs(ma): 

409 return tmi, abs(mi) 

410 else: 

411 return tma, abs(ma) 

412 

413 def set_codes( 

414 self, network=None, station=None, location=None, channel=None, 

415 extra=None): 

416 

417 ''' 

418 Set network, station, location, and channel codes. 

419 ''' 

420 

421 if network is not None: 

422 self.network = network 

423 if station is not None: 

424 self.station = station 

425 if location is not None: 

426 self.location = location 

427 if channel is not None: 

428 self.channel = channel 

429 if extra is not None: 

430 self.extra = extra 

431 

432 self._update_ids() 

433 

434 def set_network(self, network): 

435 self.network = network 

436 self._update_ids() 

437 

438 def set_station(self, station): 

439 self.station = station 

440 self._update_ids() 

441 

442 def set_location(self, location): 

443 self.location = location 

444 self._update_ids() 

445 

446 def set_channel(self, channel): 

447 self.channel = channel 

448 self._update_ids() 

449 

450 def overlaps(self, tmin, tmax): 

451 ''' 

452 Check if trace has overlap with a given time span. 

453 ''' 

454 return not (tmax < self.tmin or self.tmax < tmin) 

455 

456 def is_relevant(self, tmin, tmax, selector=None): 

457 ''' 

458 Check if trace has overlap with a given time span and matches a 

459 condition callback. (internal use) 

460 ''' 

461 

462 return not (tmax <= self.tmin or self.tmax < tmin) \ 

463 and (selector is None or selector(self)) 

464 

465 def _update_ids(self): 

466 ''' 

467 Update dependent ids. 

468 ''' 

469 

470 self.full_id = ( 

471 self.network, self.station, self.location, self.channel, self.tmin) 

472 self.nslc_id = reuse( 

473 (self.network, self.station, self.location, self.channel)) 

474 

475 def prune_from_reuse_cache(self): 

476 util.deuse(self.nslc_id) 

477 util.deuse(self.network) 

478 util.deuse(self.station) 

479 util.deuse(self.location) 

480 util.deuse(self.channel) 

481 

482 def set_mtime(self, mtime): 

483 ''' 

484 Set modification time of the trace. 

485 ''' 

486 

487 self.mtime = mtime 

488 

489 def get_xdata(self): 

490 ''' 

491 Create array for time axis. 

492 ''' 

493 

494 if self.ydata is None: 

495 raise NoData() 

496 

497 return self.tmin \ 

498 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

499 

500 def get_ydata(self): 

501 ''' 

502 Get data array. 

503 ''' 

504 

505 if self.ydata is None: 

506 raise NoData() 

507 

508 return self.ydata 

509 

510 def set_ydata(self, new_ydata): 

511 ''' 

512 Replace data array. 

513 ''' 

514 

515 self.drop_growbuffer() 

516 self.ydata = new_ydata 

517 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

518 

519 def data_len(self): 

520 if self.ydata is not None: 

521 return self.ydata.size 

522 else: 

523 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

524 

525 def drop_data(self): 

526 ''' 

527 Forget data, make dataless trace. 

528 ''' 

529 

530 self.drop_growbuffer() 

531 self.ydata = None 

532 

533 def drop_growbuffer(self): 

534 ''' 

535 Detach the traces grow buffer. 

536 ''' 

537 

538 self._growbuffer = None 

539 self._pchain = None 

540 

541 def copy(self, data=True): 

542 ''' 

543 Make a deep copy of the trace. 

544 ''' 

545 

546 tracecopy = copy.copy(self) 

547 tracecopy.drop_growbuffer() 

548 if data: 

549 tracecopy.ydata = self.ydata.copy() 

550 tracecopy.meta = copy.deepcopy(self.meta) 

551 return tracecopy 

552 

553 def crop_zeros(self): 

554 ''' 

555 Remove any zeros at beginning and end. 

556 ''' 

557 

558 indices = num.where(self.ydata != 0.0)[0] 

559 if indices.size == 0: 

560 raise NoData() 

561 

562 ibeg = indices[0] 

563 iend = indices[-1]+1 

564 if ibeg == 0 and iend == self.ydata.size-1: 

565 return 

566 

567 self.drop_growbuffer() 

568 self.ydata = self.ydata[ibeg:iend].copy() 

569 self.tmin = self.tmin+ibeg*self.deltat 

570 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

571 self._update_ids() 

572 

573 def append(self, data): 

574 ''' 

575 Append data to the end of the trace. 

576 

577 To make this method efficient when successively very few or even single 

578 samples are appended, a larger grow buffer is allocated upon first 

579 invocation. The traces data is then changed to be a view into the 

580 currently filled portion of the grow buffer array. 

581 ''' 

582 

583 assert self.ydata.dtype == data.dtype 

584 newlen = data.size + self.ydata.size 

585 if self._growbuffer is None or self._growbuffer.size < newlen: 

586 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

587 self._growbuffer[:self.ydata.size] = self.ydata 

588 self._growbuffer[self.ydata.size:newlen] = data 

589 self.ydata = self._growbuffer[:newlen] 

590 self.tmax = self.tmin + (newlen-1)*self.deltat 

591 

592 def chop( 

593 self, tmin, tmax, inplace=True, include_last=False, 

594 snap=(round, round), want_incomplete=True): 

595 

596 ''' 

597 Cut the trace to given time span. 

598 

599 If the ``inplace`` argument is True (the default) the trace is cut in 

600 place, otherwise a new trace with the cut part is returned. By 

601 default, the indices where to start and end the trace data array are 

602 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

603 using Python's :py:func:`round` function. This behaviour can be changed 

604 with the ``snap`` argument, which takes a tuple of two functions (one 

605 for the lower and one for the upper end) to be used instead of 

606 :py:func:`round`. The last sample is by default not included unless 

607 ``include_last`` is set to True. If the given time span exceeds the 

608 available time span of the trace, the available part is returned, 

609 unless ``want_incomplete`` is set to False - in that case, a 

610 :py:exc:`NoData` exception is raised. This exception is always raised, 

611 when the requested time span does dot overlap with the trace's time 

612 span. 

613 ''' 

614 

615 if want_incomplete: 

616 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

617 raise NoData() 

618 else: 

619 if tmin < self.tmin or self.tmax < tmax: 

620 raise NoData() 

621 

622 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

623 iplus = 0 

624 if include_last: 

625 iplus = 1 

626 

627 iend = min( 

628 self.data_len(), 

629 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

630 

631 if ibeg >= iend: 

632 raise NoData() 

633 

634 obj = self 

635 if not inplace: 

636 obj = self.copy(data=False) 

637 

638 self.drop_growbuffer() 

639 if self.ydata is not None: 

640 obj.ydata = self.ydata[ibeg:iend].copy() 

641 else: 

642 obj.ydata = None 

643 

644 obj.tmin = obj.tmin+ibeg*obj.deltat 

645 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

646 

647 obj._update_ids() 

648 

649 return obj 

650 

651 def downsample(self, ndecimate, snap=False, initials=None, demean=False, 

652 ftype='fir-remez'): 

653 ''' 

654 Downsample trace by a given integer factor. 

655 

656 Antialiasing filter details: 

657 

658 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum 

659 ripple of 0.05 dB in the passband. 

660 * ``fir``: A FIR filter using a Hamming window and 31 taps and a 

661 well-behaved phase response. 

662 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm 

663 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency. 

664 

665 Comparison of the digital filters: 

666 

667 .. figure :: ../../static/downsampling-filter-comparison.png 

668 :width: 60% 

669 :alt: Comparison of the downsampling filters. 

670 

671 :param ndecimate: decimation factor, avoid values larger than 8 

672 :param snap: whether to put the new sampling instances closest to 

673 multiples of the sampling rate. 

674 :param initials: ``None``, ``True``, or initial conditions for the 

675 anti-aliasing filter, obtained from a previous run. In the latter 

676 two cases the final state of the filter is returned instead of 

677 ``None``. 

678 :param demean: whether to demean the signal before filtering. 

679 :param ftype: which FIR filter to use, choose from 

680 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

681 ''' 

682 newdeltat = self.deltat*ndecimate 

683 if snap: 

684 ilag = int(round( 

685 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

686 / self.deltat)) 

687 else: 

688 ilag = 0 

689 

690 if snap and ilag > 0 and ilag < self.ydata.size: 

691 data = self.ydata.astype(num.float64) 

692 self.tmin += ilag*self.deltat 

693 else: 

694 data = self.ydata.astype(num.float64) 

695 

696 if demean: 

697 data -= num.mean(data) 

698 

699 if data.size != 0: 

700 result = util.decimate( 

701 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag) 

702 else: 

703 result = data 

704 

705 if initials is None: 

706 self.ydata, finals = result, None 

707 else: 

708 self.ydata, finals = result 

709 

710 self.deltat = reuse(self.deltat*ndecimate) 

711 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

712 self._update_ids() 

713 

714 return finals 

715 

716 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

717 initials=None, demean=False, ftype='fir-remez'): 

718 

719 ''' 

720 Downsample to given sampling rate. 

721 

722 Tries to downsample the trace to a target sampling interval of 

723 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

724 times. If ``allow_upsample_max`` is set to a value larger than 1, 

725 intermediate upsampling steps are allowed, in order to increase the 

726 number of possible downsampling ratios. 

727 

728 If the requested ratio is not supported, an exception of type 

729 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

730 

731 :param deltat: desired sampling interval in [s] 

732 :param allow_upsample_max: maximum upsampling of the signal to achieve 

733 the desired deltat. Default is ``1``. 

734 :param snap: whether to put the new sampling instances closest to 

735 multiples of the sampling rate. 

736 :param initials: ``None``, ``True``, or initial conditions for the 

737 anti-aliasing filter, obtained from a previous run. In the latter 

738 two cases the final state of the filter is returned instead of 

739 ``None``. 

740 :param demean: whether to demean the signal before filtering. 

741 :param ftype: which FIR filter to use, choose from 

742 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

743 See also: :meth:`Trace.downample` 

744 ''' 

745 

746 ratio = deltat/self.deltat 

747 rratio = round(ratio) 

748 

749 ok = False 

750 for upsratio in range(1, allow_upsample_max+1): 

751 dratio = (upsratio/self.deltat) / (1./deltat) 

752 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

753 util.decitab(int(round(dratio))): 

754 

755 ok = True 

756 break 

757 

758 if not ok: 

759 raise util.UnavailableDecimation('ratio = %g' % ratio) 

760 

761 if upsratio > 1: 

762 self.drop_growbuffer() 

763 ydata = self.ydata 

764 self.ydata = num.zeros( 

765 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

766 self.ydata[::upsratio] = ydata 

767 for i in range(1, upsratio): 

768 self.ydata[i::upsratio] = \ 

769 float(i)/upsratio * ydata[:-1] \ 

770 + float(upsratio-i)/upsratio * ydata[1:] 

771 self.deltat = self.deltat/upsratio 

772 

773 ratio = deltat/self.deltat 

774 rratio = round(ratio) 

775 

776 deci_seq = util.decitab(int(rratio)) 

777 finals = [] 

778 for i, ndecimate in enumerate(deci_seq): 

779 if ndecimate != 1: 

780 xinitials = None 

781 if initials is not None: 

782 xinitials = initials[i] 

783 finals.append(self.downsample( 

784 ndecimate, snap=snap, initials=xinitials, demean=demean, 

785 ftype=ftype)) 

786 

787 if initials is not None: 

788 return finals 

789 

790 def resample(self, deltat): 

791 ''' 

792 Resample to given sampling rate ``deltat``. 

793 

794 Resampling is performed in the frequency domain. 

795 ''' 

796 

797 ndata = self.ydata.size 

798 ntrans = nextpow2(ndata) 

799 fntrans2 = ntrans * self.deltat/deltat 

800 ntrans2 = int(round(fntrans2)) 

801 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

802 ndata2 = int(round(ndata*self.deltat/deltat2)) 

803 if abs(fntrans2 - ntrans2) > 1e-7: 

804 logger.warning( 

805 'resample: requested deltat %g could not be matched exactly: ' 

806 '%g' % (deltat, deltat2)) 

807 

808 data = self.ydata 

809 data_pad = num.zeros(ntrans, dtype=float) 

810 data_pad[:ndata] = data 

811 fdata = num.fft.rfft(data_pad) 

812 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

813 n = min(fdata.size, fdata2.size) 

814 fdata2[:n] = fdata[:n] 

815 data2 = num.fft.irfft(fdata2) 

816 data2 = data2[:ndata2] 

817 data2 *= float(ntrans2) / float(ntrans) 

818 self.deltat = deltat2 

819 self.set_ydata(data2) 

820 

821 def resample_simple(self, deltat): 

822 tyear = 3600*24*365. 

823 

824 if deltat == self.deltat: 

825 return 

826 

827 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

828 logger.warning( 

829 'resample_simple: less than one sample would have to be ' 

830 'inserted/deleted per year. Doing nothing.') 

831 return 

832 

833 ninterval = int(round(deltat / (self.deltat - deltat))) 

834 if abs(ninterval) < 20: 

835 logger.error( 

836 'resample_simple: sample insertion/deletion interval less ' 

837 'than 20. results would be erroneous.') 

838 raise ResamplingFailed() 

839 

840 delete = False 

841 if ninterval < 0: 

842 ninterval = - ninterval 

843 delete = True 

844 

845 tyearbegin = util.year_start(self.tmin) 

846 

847 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

848 

849 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

850 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

851 if nindices > 0: 

852 indices = ibegin + num.arange(nindices) * ninterval 

853 data_split = num.split(self.ydata, indices) 

854 data = [] 

855 for ln, h in zip(data_split[:-1], data_split[1:]): 

856 if delete: 

857 ln = ln[:-1] 

858 

859 data.append(ln) 

860 if not delete: 

861 if ln.size == 0: 

862 v = h[0] 

863 else: 

864 v = 0.5*(ln[-1] + h[0]) 

865 data.append(num.array([v], dtype=ln.dtype)) 

866 

867 data.append(data_split[-1]) 

868 

869 ydata_new = num.concatenate(data) 

870 

871 self.tmin = tyearbegin + nmin * deltat 

872 self.deltat = deltat 

873 self.set_ydata(ydata_new) 

874 

875 def stretch(self, tmin_new, tmax_new): 

876 ''' 

877 Stretch signal while preserving sample rate using sinc interpolation. 

878 

879 :param tmin_new: new time of first sample 

880 :param tmax_new: new time of last sample 

881 

882 This method can be used to correct for a small linear time drift or to 

883 introduce sub-sample time shifts. The amount of stretching is limited 

884 to 10% by the implementation and is expected to be much smaller than 

885 that by the approximations used. 

886 ''' 

887 

888 from pyrocko import signal_ext 

889 

890 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

891 t_control = num.array([tmin_new, tmax_new], dtype=float) 

892 

893 r = (tmax_new - tmin_new) / self.deltat + 1.0 

894 n_new = int(round(r)) 

895 if abs(n_new - r) > 0.001: 

896 n_new = int(math.floor(r)) 

897 

898 assert n_new >= 2 

899 

900 tmax_new = tmin_new + (n_new-1) * self.deltat 

901 

902 ydata_new = num.empty(n_new, dtype=float) 

903 signal_ext.antidrift(i_control, t_control, 

904 self.ydata.astype(float), 

905 tmin_new, self.deltat, ydata_new) 

906 

907 self.tmin = tmin_new 

908 self.set_ydata(ydata_new) 

909 self._update_ids() 

910 

911 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

912 raise_exception=False): 

913 

914 ''' 

915 Check if a given frequency is above the Nyquist frequency of the trace. 

916 

917 :param intro: string used to introduce the warning/error message 

918 :param warn: whether to emit a warning 

919 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

920 exception. 

921 ''' 

922 

923 if frequency >= 0.5/self.deltat: 

924 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

925 'frequency (%g Hz). (Trace %s)' \ 

926 % (intro, frequency, 0.5/self.deltat, self.name()) 

927 if warn: 

928 logger.warning(message) 

929 if raise_exception: 

930 raise AboveNyquist(message) 

931 

932 def lowpass(self, order, corner, nyquist_warn=True, 

933 nyquist_exception=False, demean=True): 

934 

935 ''' 

936 Apply Butterworth lowpass to the trace. 

937 

938 :param order: order of the filter 

939 :param corner: corner frequency of the filter 

940 

941 Mean is removed before filtering. 

942 ''' 

943 

944 self.nyquist_check( 

945 corner, 'Corner frequency of lowpass', nyquist_warn, 

946 nyquist_exception) 

947 

948 (b, a) = _get_cached_filter_coefs( 

949 order, [corner*2.0*self.deltat], btype='low') 

950 

951 if len(a) != order+1 or len(b) != order+1: 

952 logger.warning( 

953 'Erroneous filter coefficients returned by ' 

954 'scipy.signal.butter(). You may need to downsample the ' 

955 'signal before filtering.') 

956 

957 data = self.ydata.astype(num.float64) 

958 if demean: 

959 data -= num.mean(data) 

960 self.drop_growbuffer() 

961 self.ydata = signal.lfilter(b, a, data) 

962 

963 def highpass(self, order, corner, nyquist_warn=True, 

964 nyquist_exception=False, demean=True): 

965 

966 ''' 

967 Apply butterworth highpass to the trace. 

968 

969 :param order: order of the filter 

970 :param corner: corner frequency of the filter 

971 

972 Mean is removed before filtering. 

973 ''' 

974 

975 self.nyquist_check( 

976 corner, 'Corner frequency of highpass', nyquist_warn, 

977 nyquist_exception) 

978 

979 (b, a) = _get_cached_filter_coefs( 

980 order, [corner*2.0*self.deltat], btype='high') 

981 

982 data = self.ydata.astype(num.float64) 

983 if len(a) != order+1 or len(b) != order+1: 

984 logger.warning( 

985 'Erroneous filter coefficients returned by ' 

986 'scipy.signal.butter(). You may need to downsample the ' 

987 'signal before filtering.') 

988 if demean: 

989 data -= num.mean(data) 

990 self.drop_growbuffer() 

991 self.ydata = signal.lfilter(b, a, data) 

992 

993 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

994 ''' 

995 Apply butterworth bandpass to the trace. 

996 

997 :param order: order of the filter 

998 :param corner_hp: lower corner frequency of the filter 

999 :param corner_lp: upper corner frequency of the filter 

1000 

1001 Mean is removed before filtering. 

1002 ''' 

1003 

1004 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

1005 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

1006 (b, a) = _get_cached_filter_coefs( 

1007 order, 

1008 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1009 btype='band') 

1010 data = self.ydata.astype(num.float64) 

1011 if demean: 

1012 data -= num.mean(data) 

1013 self.drop_growbuffer() 

1014 self.ydata = signal.lfilter(b, a, data) 

1015 

1016 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1017 ''' 

1018 Apply bandstop (attenuates frequencies in band) to the trace. 

1019 

1020 :param order: order of the filter 

1021 :param corner_hp: lower corner frequency of the filter 

1022 :param corner_lp: upper corner frequency of the filter 

1023 

1024 Mean is removed before filtering. 

1025 ''' 

1026 

1027 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1028 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1029 (b, a) = _get_cached_filter_coefs( 

1030 order, 

1031 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1032 btype='bandstop') 

1033 data = self.ydata.astype(num.float64) 

1034 if demean: 

1035 data -= num.mean(data) 

1036 self.drop_growbuffer() 

1037 self.ydata = signal.lfilter(b, a, data) 

1038 

1039 def envelope(self, inplace=True): 

1040 ''' 

1041 Calculate the envelope of the trace. 

1042 

1043 :param inplace: calculate envelope in place 

1044 

1045 The calculation follows: 

1046 

1047 .. math:: 

1048 

1049 Y' = \\sqrt{Y^2+H(Y)^2} 

1050 

1051 where H is the Hilbert-Transform of the signal Y. 

1052 ''' 

1053 

1054 y = self.ydata.astype(float) 

1055 env = num.abs(hilbert(y)) 

1056 if inplace: 

1057 self.drop_growbuffer() 

1058 self.ydata = env 

1059 else: 

1060 tr = self.copy(data=False) 

1061 tr.ydata = env 

1062 return tr 

1063 

1064 def taper(self, taperer, inplace=True, chop=False): 

1065 ''' 

1066 Apply a :py:class:`Taper` to the trace. 

1067 

1068 :param taperer: instance of :py:class:`Taper` subclass 

1069 :param inplace: apply taper inplace 

1070 :param chop: if ``True``: exclude tapered parts from the resulting 

1071 trace 

1072 ''' 

1073 

1074 if not inplace: 

1075 tr = self.copy() 

1076 else: 

1077 tr = self 

1078 

1079 if chop: 

1080 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1081 tr.shift(i*tr.deltat) 

1082 tr.set_ydata(tr.ydata[i:i+n]) 

1083 

1084 taperer(tr.ydata, tr.tmin, tr.deltat) 

1085 

1086 if not inplace: 

1087 return tr 

1088 

1089 def whiten(self, order=6): 

1090 ''' 

1091 Whiten signal in time domain using autoregression and recursive filter. 

1092 

1093 :param order: order of the autoregression process 

1094 ''' 

1095 

1096 b, a = self.whitening_coefficients(order) 

1097 self.drop_growbuffer() 

1098 self.ydata = signal.lfilter(b, a, self.ydata) 

1099 

1100 def whitening_coefficients(self, order=6): 

1101 ar = yulewalker(self.ydata, order) 

1102 b, a = [1.] + ar.tolist(), [1.] 

1103 return b, a 

1104 

1105 def ampspec_whiten( 

1106 self, 

1107 width, 

1108 td_taper='auto', 

1109 fd_taper='auto', 

1110 pad_to_pow2=True, 

1111 demean=True): 

1112 

1113 ''' 

1114 Whiten signal via frequency domain using moving average on amplitude 

1115 spectra. 

1116 

1117 :param width: width of smoothing kernel [Hz] 

1118 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1119 ``None`` or ``'auto'``. 

1120 :param fd_taper: frequency domain taper, object of type 

1121 :py:class:`Taper` or ``None`` or ``'auto'``. 

1122 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1123 of 2^n 

1124 :param demean: whether to demean the signal before tapering 

1125 

1126 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1127 the spectrum is calculated and inversely weighted with a smoothed 

1128 version of its amplitude spectrum. A moving average is used for the 

1129 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1130 Finally, the smoothed and tapered spectrum is back-transformed into the 

1131 time domain. 

1132 

1133 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1134 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1135 ''' 

1136 

1137 ndata = self.data_len() 

1138 

1139 if pad_to_pow2: 

1140 ntrans = nextpow2(ndata) 

1141 else: 

1142 ntrans = ndata 

1143 

1144 df = 1./(ntrans*self.deltat) 

1145 nw = int(round(width/df)) 

1146 if ndata//2+1 <= nw: 

1147 raise TraceTooShort( 

1148 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1149 

1150 if td_taper == 'auto': 

1151 td_taper = CosFader(1./width) 

1152 

1153 if fd_taper == 'auto': 

1154 fd_taper = CosFader(width) 

1155 

1156 if td_taper: 

1157 self.taper(td_taper) 

1158 

1159 ydata = self.get_ydata().astype(float) 

1160 if demean: 

1161 ydata -= ydata.mean() 

1162 

1163 spec = num.fft.rfft(ydata, ntrans) 

1164 

1165 amp = num.abs(spec) 

1166 nspec = amp.size 

1167 csamp = num.cumsum(amp) 

1168 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1169 n1, n2 = nw//2, nw//2 + nspec - nw 

1170 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1171 amp_smoothed[:n1] = amp_smoothed[n1] 

1172 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1173 

1174 denom = amp_smoothed * amp 

1175 numer = amp 

1176 eps = num.mean(denom) * 1e-9 

1177 if eps == 0.0: 

1178 eps = 1e-9 

1179 

1180 numer += eps 

1181 denom += eps 

1182 spec *= numer/denom 

1183 

1184 if fd_taper: 

1185 fd_taper(spec, 0., df) 

1186 

1187 ydata = num.fft.irfft(spec) 

1188 self.set_ydata(ydata[:ndata]) 

1189 

1190 def _get_cached_freqs(self, nf, deltaf): 

1191 ck = (nf, deltaf) 

1192 if ck not in Trace.cached_frequencies: 

1193 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1194 nf, dtype=float) 

1195 

1196 return Trace.cached_frequencies[ck] 

1197 

1198 def bandpass_fft(self, corner_hp, corner_lp): 

1199 ''' 

1200 Apply boxcar bandbpass to trace (in spectral domain). 

1201 ''' 

1202 

1203 n = len(self.ydata) 

1204 n2 = nextpow2(n) 

1205 data = num.zeros(n2, dtype=num.float64) 

1206 data[:n] = self.ydata 

1207 fdata = num.fft.rfft(data) 

1208 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1209 fdata[0] = 0.0 

1210 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1211 data = num.fft.irfft(fdata) 

1212 self.drop_growbuffer() 

1213 self.ydata = data[:n] 

1214 

1215 def shift(self, tshift): 

1216 ''' 

1217 Time shift the trace. 

1218 ''' 

1219 

1220 self.tmin += tshift 

1221 self.tmax += tshift 

1222 self._update_ids() 

1223 

1224 def snap(self, inplace=True, interpolate=False): 

1225 ''' 

1226 Shift trace samples to nearest even multiples of the sampling rate. 

1227 

1228 :param inplace: (boolean) snap traces inplace 

1229 

1230 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1231 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1232 :py:func:`snap` returns the unsnapped instance of the original trace. 

1233 ''' 

1234 

1235 tmin = round(self.tmin/self.deltat)*self.deltat 

1236 tmax = tmin + (self.ydata.size-1)*self.deltat 

1237 

1238 if inplace: 

1239 xself = self 

1240 else: 

1241 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1242 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1243 return self 

1244 

1245 xself = self.copy() 

1246 

1247 if interpolate: 

1248 from pyrocko import signal_ext 

1249 n = xself.data_len() 

1250 ydata_new = num.empty(n, dtype=float) 

1251 i_control = num.array([0, n-1], dtype=num.int64) 

1252 tref = tmin 

1253 t_control = num.array( 

1254 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1255 dtype=float) 

1256 

1257 signal_ext.antidrift(i_control, t_control, 

1258 xself.ydata.astype(float), 

1259 float(tmin-tref), xself.deltat, ydata_new) 

1260 

1261 xself.ydata = ydata_new 

1262 

1263 xself.tmin = tmin 

1264 xself.tmax = tmax 

1265 xself._update_ids() 

1266 

1267 return xself 

1268 

1269 def fix_deltat_rounding_errors(self): 

1270 ''' 

1271 Try to undo sampling rate rounding errors. 

1272 

1273 See :py:func:`fix_deltat_rounding_errors`. 

1274 ''' 

1275 

1276 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1277 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1278 

1279 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1280 ''' 

1281 Run special STA/LTA filter where the short time window is centered on 

1282 the long time window. 

1283 

1284 :param tshort: length of short time window in [s] 

1285 :param tlong: length of long time window in [s] 

1286 :param quad: whether to square the data prior to applying the STA/LTA 

1287 filter 

1288 :param scalingmethod: integer key to select how output values are 

1289 scaled / normalized (``1``, ``2``, or ``3``) 

1290 

1291 ============= ====================================== =========== 

1292 Scalingmethod Implementation Range 

1293 ============= ====================================== =========== 

1294 ``1`` As/Al* Ts/Tl [0,1] 

1295 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1296 ``3`` Like ``2`` but clipping range at zero [0,1] 

1297 ============= ====================================== =========== 

1298 

1299 ''' 

1300 

1301 nshort = int(round(tshort/self.deltat)) 

1302 nlong = int(round(tlong/self.deltat)) 

1303 

1304 assert nshort < nlong 

1305 if nlong > len(self.ydata): 

1306 raise TraceTooShort( 

1307 'Samples in trace: %s, samples needed: %s' 

1308 % (len(self.ydata), nlong)) 

1309 

1310 if quad: 

1311 sqrdata = self.ydata**2 

1312 else: 

1313 sqrdata = self.ydata 

1314 

1315 mavg_short = moving_avg(sqrdata, nshort) 

1316 mavg_long = moving_avg(sqrdata, nlong) 

1317 

1318 self.drop_growbuffer() 

1319 

1320 if scalingmethod not in (1, 2, 3): 

1321 raise Exception('Invalid argument to scalingrange argument.') 

1322 

1323 if scalingmethod == 1: 

1324 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1325 elif scalingmethod in (2, 3): 

1326 self.ydata = (mavg_short/mavg_long - 1.) \ 

1327 / ((float(nlong)/float(nshort)) - 1) 

1328 

1329 if scalingmethod == 3: 

1330 self.ydata = num.maximum(self.ydata, 0.) 

1331 

1332 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1333 ''' 

1334 Run special STA/LTA filter where the short time window is overlapping 

1335 with the last part of the long time window. 

1336 

1337 :param tshort: length of short time window in [s] 

1338 :param tlong: length of long time window in [s] 

1339 :param quad: whether to square the data prior to applying the STA/LTA 

1340 filter 

1341 :param scalingmethod: integer key to select how output values are 

1342 scaled / normalized (``1``, ``2``, or ``3``) 

1343 

1344 ============= ====================================== =========== 

1345 Scalingmethod Implementation Range 

1346 ============= ====================================== =========== 

1347 ``1`` As/Al* Ts/Tl [0,1] 

1348 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1349 ``3`` Like ``2`` but clipping range at zero [0,1] 

1350 ============= ====================================== =========== 

1351 

1352 With ``scalingmethod=1``, the values produced by this variant of the 

1353 STA/LTA are equivalent to 

1354 

1355 .. math:: 

1356 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1357 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1358 

1359 where :math:`f_j` are the input samples, :math:`s` are the number of 

1360 samples in the short time window and :math:`l` are the number of 

1361 samples in the long time window. 

1362 ''' 

1363 

1364 n = self.data_len() 

1365 tmin = self.tmin 

1366 

1367 nshort = max(1, int(round(tshort/self.deltat))) 

1368 nlong = max(1, int(round(tlong/self.deltat))) 

1369 

1370 assert nshort < nlong 

1371 

1372 if nlong > len(self.ydata): 

1373 raise TraceTooShort( 

1374 'Samples in trace: %s, samples needed: %s' 

1375 % (len(self.ydata), nlong)) 

1376 

1377 if quad: 

1378 sqrdata = self.ydata**2 

1379 else: 

1380 sqrdata = self.ydata 

1381 

1382 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1383 if nlong % 2 != 0 and nshort % 2 == 0: 

1384 nshift += 1 

1385 

1386 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1387 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1388 

1389 self.drop_growbuffer() 

1390 

1391 if scalingmethod not in (1, 2, 3): 

1392 raise Exception('Invalid argument to scalingrange argument.') 

1393 

1394 if scalingmethod == 1: 

1395 ydata = mavg_short/mavg_long * nshort/nlong 

1396 elif scalingmethod in (2, 3): 

1397 ydata = (mavg_short/mavg_long - 1.) \ 

1398 / ((float(nlong)/float(nshort)) - 1) 

1399 

1400 if scalingmethod == 3: 

1401 ydata = num.maximum(self.ydata, 0.) 

1402 

1403 self.set_ydata(ydata) 

1404 

1405 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1406 

1407 self.chop( 

1408 tmin + (nlong - nshort) * self.deltat, 

1409 tmin + (n - nshort) * self.deltat) 

1410 

1411 def peaks(self, threshold, tsearch, 

1412 deadtime=False, 

1413 nblock_duration_detection=100): 

1414 

1415 ''' 

1416 Detect peaks above a given threshold (method 1). 

1417 

1418 From every instant, where the signal rises above ``threshold``, a time 

1419 length of ``tsearch`` seconds is searched for a maximum. A list with 

1420 tuples (time, value) for each detected peak is returned. The 

1421 ``deadtime`` argument turns on a special deadtime duration detection 

1422 algorithm useful in combination with recursive STA/LTA filters. 

1423 ''' 

1424 

1425 y = self.ydata 

1426 above = num.where(y > threshold, 1, 0) 

1427 deriv = num.zeros(y.size, dtype=num.int8) 

1428 deriv[1:] = above[1:]-above[:-1] 

1429 itrig_positions = num.nonzero(deriv > 0)[0] 

1430 tpeaks = [] 

1431 apeaks = [] 

1432 tzeros = [] 

1433 tzero = self.tmin 

1434 

1435 for itrig_pos in itrig_positions: 

1436 ibeg = itrig_pos 

1437 iend = min( 

1438 len(self.ydata), 

1439 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1440 ipeak = num.argmax(y[ibeg:iend]) 

1441 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1442 apeak = y[ibeg+ipeak] 

1443 

1444 if tpeak < tzero: 

1445 continue 

1446 

1447 if deadtime: 

1448 ibeg = itrig_pos 

1449 iblock = 0 

1450 nblock = nblock_duration_detection 

1451 totalsum = 0. 

1452 while True: 

1453 if ibeg+iblock*nblock >= len(y): 

1454 tzero = self.tmin + (len(y)-1) * self.deltat 

1455 break 

1456 

1457 logy = num.log( 

1458 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1459 logy[0] += totalsum 

1460 ysum = num.cumsum(logy) 

1461 totalsum = ysum[-1] 

1462 below = num.where(ysum <= 0., 1, 0) 

1463 deriv = num.zeros(ysum.size, dtype=num.int8) 

1464 deriv[1:] = below[1:]-below[:-1] 

1465 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1466 if len(izero_positions) > 0: 

1467 tzero = self.tmin + self.deltat * ( 

1468 ibeg + izero_positions[0]) 

1469 break 

1470 iblock += 1 

1471 else: 

1472 tzero = ibeg*self.deltat + self.tmin + tsearch 

1473 

1474 tpeaks.append(tpeak) 

1475 apeaks.append(apeak) 

1476 tzeros.append(tzero) 

1477 

1478 if deadtime: 

1479 return tpeaks, apeaks, tzeros 

1480 else: 

1481 return tpeaks, apeaks 

1482 

1483 def peaks2(self, threshold, tsearch): 

1484 

1485 ''' 

1486 Detect peaks above a given threshold (method 2). 

1487 

1488 This variant of peak detection is a bit more robust (and slower) than 

1489 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1490 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1491 iteratively the one with the maximum amplitude ``a[j]`` and time 

1492 ``t[j]`` is choosen and potential peaks within 

1493 ``t[j] - tsearch, t[j] + tsearch`` 

1494 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1495 no more potential peaks are left. 

1496 ''' 

1497 

1498 a = num.copy(self.ydata) 

1499 

1500 amin = num.min(a) 

1501 

1502 a[0] = amin 

1503 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1504 a[-1] = amin 

1505 

1506 data = [] 

1507 while True: 

1508 imax = num.argmax(a) 

1509 amax = a[imax] 

1510 

1511 if amax < threshold or amax == amin: 

1512 break 

1513 

1514 data.append((self.tmin + imax * self.deltat, amax)) 

1515 

1516 ntsearch = int(round(tsearch / self.deltat)) 

1517 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1518 

1519 if data: 

1520 data.sort() 

1521 tpeaks, apeaks = list(zip(*data)) 

1522 else: 

1523 tpeaks, apeaks = [], [] 

1524 

1525 return tpeaks, apeaks 

1526 

1527 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1528 ''' 

1529 Extend trace to given span. 

1530 

1531 :param tmin: begin time of new span 

1532 :param tmax: end time of new span 

1533 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1534 ``'median'`` 

1535 ''' 

1536 

1537 nold = self.ydata.size 

1538 

1539 if tmin is not None: 

1540 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1541 else: 

1542 nl = 0 

1543 

1544 if tmax is not None: 

1545 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1546 else: 

1547 nh = nold - 1 

1548 

1549 n = nh - nl + 1 

1550 data = num.zeros(n, dtype=self.ydata.dtype) 

1551 data[-nl:-nl + nold] = self.ydata 

1552 if self.ydata.size >= 1: 

1553 if fillmethod == 'repeat': 

1554 data[:-nl] = self.ydata[0] 

1555 data[-nl + nold:] = self.ydata[-1] 

1556 elif fillmethod == 'median': 

1557 v = num.median(self.ydata) 

1558 data[:-nl] = v 

1559 data[-nl + nold:] = v 

1560 elif fillmethod == 'mean': 

1561 v = num.mean(self.ydata) 

1562 data[:-nl] = v 

1563 data[-nl + nold:] = v 

1564 

1565 self.drop_growbuffer() 

1566 self.ydata = data 

1567 

1568 self.tmin += nl * self.deltat 

1569 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1570 

1571 self._update_ids() 

1572 

1573 def transfer(self, 

1574 tfade=0., 

1575 freqlimits=None, 

1576 transfer_function=None, 

1577 cut_off_fading=True, 

1578 demean=True, 

1579 invert=False): 

1580 

1581 ''' 

1582 Return new trace with transfer function applied (convolution). 

1583 

1584 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1585 at both ends of trace. 

1586 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1587 :param transfer_function: FrequencyResponse object; must provide a 

1588 method 'evaluate(freqs)', which returns the transfer function 

1589 coefficients at the frequencies 'freqs'. 

1590 :param cut_off_fading: whether to cut off rise/fall interval in output 

1591 trace. 

1592 :param demean: remove mean before applying transfer function 

1593 :param invert: set to True to do a deconvolution 

1594 ''' 

1595 

1596 if transfer_function is None: 

1597 transfer_function = FrequencyResponse() 

1598 

1599 if self.tmax - self.tmin <= tfade*2.: 

1600 raise TraceTooShort( 

1601 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1602 'trace length = %g, fading length = %g' 

1603 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1604 

1605 if freqlimits is None and ( 

1606 transfer_function is None or transfer_function.is_scalar()): 

1607 

1608 # special case for flat responses 

1609 

1610 output = self.copy() 

1611 data = self.ydata 

1612 ndata = data.size 

1613 

1614 if transfer_function is not None: 

1615 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1616 

1617 if invert: 

1618 c = 1.0/c 

1619 

1620 data *= c 

1621 

1622 if tfade != 0.0: 

1623 data *= costaper( 

1624 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1625 ndata, self.deltat) 

1626 

1627 output.ydata = data 

1628 

1629 else: 

1630 ndata = self.ydata.size 

1631 ntrans = nextpow2(ndata*1.2) 

1632 coefs = self._get_tapered_coefs( 

1633 ntrans, freqlimits, transfer_function, invert=invert) 

1634 

1635 data = self.ydata 

1636 

1637 data_pad = num.zeros(ntrans, dtype=float) 

1638 data_pad[:ndata] = data 

1639 if demean: 

1640 data_pad[:ndata] -= data.mean() 

1641 

1642 if tfade != 0.0: 

1643 data_pad[:ndata] *= costaper( 

1644 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1645 ndata, self.deltat) 

1646 

1647 fdata = num.fft.rfft(data_pad) 

1648 fdata *= coefs 

1649 ddata = num.fft.irfft(fdata) 

1650 output = self.copy() 

1651 output.ydata = ddata[:ndata] 

1652 

1653 if cut_off_fading and tfade != 0.0: 

1654 try: 

1655 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1656 except NoData: 

1657 raise TraceTooShort( 

1658 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1659 'trace length = %g, fading length = %g' 

1660 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1661 else: 

1662 output.ydata = output.ydata.copy() 

1663 

1664 return output 

1665 

1666 def differentiate(self, n=1, order=4, inplace=True): 

1667 ''' 

1668 Approximate first or second derivative of the trace. 

1669 

1670 :param n: 1 for first derivative, 2 for second 

1671 :param order: order of the approximation 2 and 4 are supported 

1672 :param inplace: if ``True`` the trace is differentiated in place, 

1673 otherwise a new trace object with the derivative is returned. 

1674 

1675 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1676 

1677 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1678 ''' 

1679 

1680 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1681 

1682 if inplace: 

1683 self.ydata = ddata 

1684 else: 

1685 output = self.copy(data=False) 

1686 output.set_ydata(ddata) 

1687 return output 

1688 

1689 def drop_chain_cache(self): 

1690 if self._pchain: 

1691 self._pchain.clear() 

1692 

1693 def init_chain(self): 

1694 self._pchain = pchain.Chain( 

1695 do_downsample, 

1696 do_extend, 

1697 do_pre_taper, 

1698 do_fft, 

1699 do_filter, 

1700 do_ifft) 

1701 

1702 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1703 if setup.domain == 'frequency_domain': 

1704 _, _, data = self._pchain( 

1705 (self, deltat), 

1706 (tmin, tmax), 

1707 (setup.taper,), 

1708 (setup.filter,), 

1709 (setup.filter,), 

1710 nocache=nocache) 

1711 

1712 return num.abs(data), num.abs(data) 

1713 

1714 else: 

1715 processed = self._pchain( 

1716 (self, deltat), 

1717 (tmin, tmax), 

1718 (setup.taper,), 

1719 (setup.filter,), 

1720 (setup.filter,), 

1721 (), 

1722 nocache=nocache) 

1723 

1724 if setup.domain == 'time_domain': 

1725 data = processed.get_ydata() 

1726 

1727 elif setup.domain == 'envelope': 

1728 processed = processed.envelope(inplace=False) 

1729 

1730 elif setup.domain == 'absolute': 

1731 processed.set_ydata(num.abs(processed.get_ydata())) 

1732 

1733 return processed.get_ydata(), processed 

1734 

1735 def misfit(self, candidate, setup, nocache=False, debug=False): 

1736 ''' 

1737 Calculate misfit and normalization factor against candidate trace. 

1738 

1739 :param candidate: :py:class:`Trace` object 

1740 :param setup: :py:class:`MisfitSetup` object 

1741 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1742 normalization divisor 

1743 

1744 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1745 with the higher sampling rate will be downsampled. 

1746 ''' 

1747 

1748 a = self 

1749 b = candidate 

1750 

1751 for tr in (a, b): 

1752 if not tr._pchain: 

1753 tr.init_chain() 

1754 

1755 deltat = max(a.deltat, b.deltat) 

1756 tmin = min(a.tmin, b.tmin) - deltat 

1757 tmax = max(a.tmax, b.tmax) + deltat 

1758 

1759 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1760 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1761 

1762 if setup.domain != 'cc_max_norm': 

1763 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1764 else: 

1765 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1766 ccmax = ctr.max()[1] 

1767 m = 0.5 - 0.5 * ccmax 

1768 n = 0.5 

1769 

1770 if debug: 

1771 return m, n, aproc, bproc 

1772 else: 

1773 return m, n 

1774 

1775 def spectrum(self, pad_to_pow2=False, tfade=None): 

1776 ''' 

1777 Get FFT spectrum of trace. 

1778 

1779 :param pad_to_pow2: whether to zero-pad the data to next larger 

1780 power-of-two length 

1781 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1782 shaped tapers to both 

1783 

1784 :returns: a tuple with (frequencies, values) 

1785 ''' 

1786 

1787 ndata = self.ydata.size 

1788 

1789 if pad_to_pow2: 

1790 ntrans = nextpow2(ndata) 

1791 else: 

1792 ntrans = ndata 

1793 

1794 if tfade is None: 

1795 ydata = self.ydata 

1796 else: 

1797 ydata = self.ydata * costaper( 

1798 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1799 ndata, self.deltat) 

1800 

1801 fydata = num.fft.rfft(ydata, ntrans) 

1802 df = 1./(ntrans*self.deltat) 

1803 fxdata = num.arange(len(fydata))*df 

1804 return fxdata, fydata 

1805 

1806 def multi_filter(self, filter_freqs, bandwidth): 

1807 

1808 class Gauss(FrequencyResponse): 

1809 f0 = Float.T() 

1810 a = Float.T(default=1.0) 

1811 

1812 def __init__(self, f0, a=1.0, **kwargs): 

1813 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1814 

1815 def evaluate(self, freqs): 

1816 omega0 = 2.*math.pi*self.f0 

1817 omega = 2.*math.pi*freqs 

1818 return num.exp(-((omega-omega0) 

1819 / (self.a*omega0))**2) 

1820 

1821 freqs, coefs = self.spectrum() 

1822 n = self.data_len() 

1823 nfilt = len(filter_freqs) 

1824 signal_tf = num.zeros((nfilt, n)) 

1825 centroid_freqs = num.zeros(nfilt) 

1826 for ifilt, f0 in enumerate(filter_freqs): 

1827 taper = Gauss(f0, a=bandwidth) 

1828 weights = taper.evaluate(freqs) 

1829 nhalf = freqs.size 

1830 analytic_spec = num.zeros(n, dtype=complex) 

1831 analytic_spec[:nhalf] = coefs*weights 

1832 

1833 enorm = num.abs(analytic_spec[:nhalf])**2 

1834 enorm /= num.sum(enorm) 

1835 

1836 if n % 2 == 0: 

1837 analytic_spec[1:nhalf-1] *= 2. 

1838 else: 

1839 analytic_spec[1:nhalf] *= 2. 

1840 

1841 analytic = num.fft.ifft(analytic_spec) 

1842 signal_tf[ifilt, :] = num.abs(analytic) 

1843 

1844 enorm = num.abs(analytic_spec[:nhalf])**2 

1845 enorm /= num.sum(enorm) 

1846 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1847 

1848 return centroid_freqs, signal_tf 

1849 

1850 def _get_tapered_coefs( 

1851 self, ntrans, freqlimits, transfer_function, invert=False): 

1852 

1853 deltaf = 1./(self.deltat*ntrans) 

1854 nfreqs = ntrans//2 + 1 

1855 transfer = num.ones(nfreqs, dtype=complex) 

1856 hi = snapper(nfreqs, deltaf) 

1857 if freqlimits is not None: 

1858 a, b, c, d = freqlimits 

1859 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1860 + hi(a)*deltaf 

1861 

1862 if invert: 

1863 coeffs = transfer_function.evaluate(freqs) 

1864 if num.any(coeffs == 0.0): 

1865 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1866 

1867 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1868 else: 

1869 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1870 

1871 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1872 else: 

1873 if invert: 

1874 raise Exception( 

1875 'transfer: `freqlimits` must be given when `invert` is ' 

1876 'set to `True`') 

1877 

1878 freqs = num.arange(nfreqs) * deltaf 

1879 tapered_transfer = transfer_function.evaluate(freqs) 

1880 

1881 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1882 return tapered_transfer 

1883 

1884 def fill_template(self, template, **additional): 

1885 ''' 

1886 Fill string template with trace metadata. 

1887 

1888 Uses normal python '%(placeholder)s' string templates. The following 

1889 placeholders are considered: ``network``, ``station``, ``location``, 

1890 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1891 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1892 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1893 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1894 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1895 microseconds, those with ``'_year'`` contain only the year. 

1896 ''' 

1897 

1898 template = template.replace('%n', '%(network)s')\ 

1899 .replace('%s', '%(station)s')\ 

1900 .replace('%l', '%(location)s')\ 

1901 .replace('%c', '%(channel)s')\ 

1902 .replace('%b', '%(tmin)s')\ 

1903 .replace('%e', '%(tmax)s')\ 

1904 .replace('%j', '%(julianday)s') 

1905 

1906 params = dict( 

1907 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1908 params['tmin'] = util.time_to_str( 

1909 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1910 params['tmax'] = util.time_to_str( 

1911 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1912 params['tmin_ms'] = util.time_to_str( 

1913 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1914 params['tmax_ms'] = util.time_to_str( 

1915 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1916 params['tmin_us'] = util.time_to_str( 

1917 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1918 params['tmax_us'] = util.time_to_str( 

1919 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1920 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1921 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1922 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1923 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1924 params['julianday'] = util.julian_day_of_year(self.tmin) 

1925 params.update(additional) 

1926 return template % params 

1927 

1928 def plot(self): 

1929 ''' 

1930 Show trace with matplotlib. 

1931 

1932 See also: :py:meth:`Trace.snuffle`. 

1933 ''' 

1934 

1935 import pylab 

1936 pylab.plot(self.get_xdata(), self.get_ydata()) 

1937 name = '%s %s %s - %s' % ( 

1938 self.channel, 

1939 self.station, 

1940 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1941 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1942 

1943 pylab.title(name) 

1944 pylab.show() 

1945 

1946 def snuffle(self, **kwargs): 

1947 ''' 

1948 Show trace in a snuffler window. 

1949 

1950 :param stations: list of :py:class:`pyrocko.model.Station` objects or 

1951 ``None`` 

1952 :param events: list of :py:class:`pyrocko.model.Event` objects or 

1953 ``None`` 

1954 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker` 

1955 objects or ``None`` 

1956 :param ntracks: float, number of tracks to be shown initially (default: 

1957 12) 

1958 :param follow: time interval (in seconds) for real time follow mode or 

1959 ``None`` 

1960 :param controls: bool, whether to show the main controls (default: 

1961 ``True``) 

1962 :param opengl: bool, whether to use opengl (default: ``False``) 

1963 ''' 

1964 

1965 return snuffle([self], **kwargs) 

1966 

1967 

1968def snuffle(traces, **kwargs): 

1969 ''' 

1970 Show traces in a snuffler window. 

1971 

1972 :param stations: list of :py:class:`pyrocko.model.Station` objects or 

1973 ``None`` 

1974 :param events: list of :py:class:`pyrocko.model.Event` objects or ``None`` 

1975 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker` 

1976 objects or ``None`` 

1977 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1978 :param follow: time interval (in seconds) for real time follow mode or 

1979 ``None`` 

1980 :param controls: bool, whether to show the main controls (default: 

1981 ``True``) 

1982 :param opengl: bool, whether to use opengl (default: ``False``) 

1983 ''' 

1984 

1985 from pyrocko import pile 

1986 from pyrocko.gui.snuffler import snuffler 

1987 p = pile.Pile() 

1988 if traces: 

1989 trf = pile.MemTracesFile(None, traces) 

1990 p.add_file(trf) 

1991 return snuffler.snuffle(p, **kwargs) 

1992 

1993 

1994class InfiniteResponse(Exception): 

1995 ''' 

1996 This exception is raised by :py:class:`Trace` operations when deconvolution 

1997 of a frequency response (instrument response transfer function) would 

1998 result in a division by zero. 

1999 ''' 

2000 

2001 

2002class MisalignedTraces(Exception): 

2003 ''' 

2004 This exception is raised by some :py:class:`Trace` operations when tmin, 

2005 tmax or number of samples do not match. 

2006 ''' 

2007 

2008 pass 

2009 

2010 

2011class NoData(Exception): 

2012 ''' 

2013 This exception is raised by some :py:class:`Trace` operations when no or 

2014 not enough data is available. 

2015 ''' 

2016 

2017 pass 

2018 

2019 

2020class AboveNyquist(Exception): 

2021 ''' 

2022 This exception is raised by some :py:class:`Trace` operations when given 

2023 frequencies are above the Nyquist frequency. 

2024 ''' 

2025 

2026 pass 

2027 

2028 

2029class TraceTooShort(Exception): 

2030 ''' 

2031 This exception is raised by some :py:class:`Trace` operations when the 

2032 trace is too short. 

2033 ''' 

2034 

2035 pass 

2036 

2037 

2038class ResamplingFailed(Exception): 

2039 pass 

2040 

2041 

2042def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2043 

2044 ''' 

2045 Get data range given traces grouped by selected pattern. 

2046 

2047 :param key: a callable which takes as single argument a trace and returns a 

2048 key for the grouping of the results. If this is ``None``, the default, 

2049 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2050 used. 

2051 :param mode: ``'minmax'`` or floating point number. If this is 

2052 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2053 number, mean +- standard deviation times ``mode`` is used. 

2054 

2055 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2056 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2057 extreme values on either end. 

2058 

2059 :returns: a dict with the combined data ranges. 

2060 

2061 Examples:: 

2062 

2063 ranges = minmax(traces, lambda tr: tr.channel) 

2064 print ranges['N'] # print min & max of all traces with channel == 'N' 

2065 print ranges['E'] # print min & max of all traces with channel == 'E' 

2066 

2067 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2068 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2069 # network == 'GR' and station == 'HAM3' 

2070 

2071 ranges = minmax(traces, lambda tr: None) 

2072 print ranges[None] # prints min & max of all traces 

2073 ''' 

2074 

2075 if key is None: 

2076 key = _default_key 

2077 

2078 ranges = defaultdict(list) 

2079 for trace in traces: 

2080 if isinstance(mode, str) and mode == 'minmax': 

2081 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2082 else: 

2083 mean = trace.ydata.mean() 

2084 std = trace.ydata.std() 

2085 mi, ma = mean-std*mode, mean+std*mode 

2086 

2087 k = key(trace) 

2088 ranges[k].append((mi, ma)) 

2089 

2090 for k in ranges: 

2091 mins, maxs = num.array(ranges[k]).T 

2092 if outer_mode == 'minmax': 

2093 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2094 elif outer_mode == 'robust': 

2095 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2096 

2097 return ranges 

2098 

2099 

2100def minmaxtime(traces, key=None): 

2101 

2102 ''' 

2103 Get time range given traces grouped by selected pattern. 

2104 

2105 :param key: a callable which takes as single argument a trace and returns a 

2106 key for the grouping of the results. If this is ``None``, the default, 

2107 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2108 used. 

2109 

2110 :returns: a dict with the combined data ranges. 

2111 ''' 

2112 

2113 if key is None: 

2114 key = _default_key 

2115 

2116 ranges = {} 

2117 for trace in traces: 

2118 mi, ma = trace.tmin, trace.tmax 

2119 k = key(trace) 

2120 if k not in ranges: 

2121 ranges[k] = mi, ma 

2122 else: 

2123 tmi, tma = ranges[k] 

2124 ranges[k] = min(tmi, mi), max(tma, ma) 

2125 

2126 return ranges 

2127 

2128 

2129def degapper( 

2130 traces, 

2131 maxgap=5, 

2132 fillmethod='interpolate', 

2133 deoverlap='use_second', 

2134 maxlap=None): 

2135 

2136 ''' 

2137 Try to connect traces and remove gaps. 

2138 

2139 This method will combine adjacent traces, which match in their network, 

2140 station, location and channel attributes. Overlapping parts are handled 

2141 according to the ``deoverlap`` argument. 

2142 

2143 :param traces: input traces, must be sorted by their full_id attribute. 

2144 :param maxgap: maximum number of samples to interpolate. 

2145 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2146 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2147 second trace (default), 'use_first' to use data from first trace, 

2148 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2149 values. 

2150 :param maxlap: maximum number of samples of overlap which are removed 

2151 

2152 :returns: list of traces 

2153 ''' 

2154 

2155 in_traces = traces 

2156 out_traces = [] 

2157 if not in_traces: 

2158 return out_traces 

2159 out_traces.append(in_traces.pop(0)) 

2160 while in_traces: 

2161 

2162 a = out_traces[-1] 

2163 b = in_traces.pop(0) 

2164 

2165 avirt, bvirt = a.ydata is None, b.ydata is None 

2166 assert avirt == bvirt, \ 

2167 'traces given to degapper() must either all have data or have ' \ 

2168 'no data.' 

2169 

2170 virtual = avirt and bvirt 

2171 

2172 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2173 and a.data_len() >= 1 and b.data_len() >= 1 

2174 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2175 

2176 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2177 idist = int(round(dist)) 

2178 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2179 # logger.warning('Cannot degap traces with displaced sampling ' 

2180 # '(%s, %s, %s, %s)' % a.nslc_id) 

2181 pass 

2182 else: 

2183 if 1 < idist <= maxgap: 

2184 if not virtual: 

2185 if fillmethod == 'interpolate': 

2186 filler = a.ydata[-1] + ( 

2187 ((1.0 + num.arange(idist-1, dtype=float)) 

2188 / idist) * (b.ydata[0]-a.ydata[-1]) 

2189 ).astype(a.ydata.dtype) 

2190 elif fillmethod == 'zeros': 

2191 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2192 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2193 a.tmax = b.tmax 

2194 if a.mtime and b.mtime: 

2195 a.mtime = max(a.mtime, b.mtime) 

2196 continue 

2197 

2198 elif idist == 1: 

2199 if not virtual: 

2200 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2201 a.tmax = b.tmax 

2202 if a.mtime and b.mtime: 

2203 a.mtime = max(a.mtime, b.mtime) 

2204 continue 

2205 

2206 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2207 if b.tmax > a.tmax: 

2208 if not virtual: 

2209 na = a.ydata.size 

2210 n = -idist+1 

2211 if deoverlap == 'use_second': 

2212 a.ydata = num.concatenate( 

2213 (a.ydata[:-n], b.ydata)) 

2214 elif deoverlap in ('use_first', 'crossfade_cos'): 

2215 a.ydata = num.concatenate( 

2216 (a.ydata, b.ydata[n:])) 

2217 elif deoverlap == 'add': 

2218 a.ydata[-n:] += b.ydata[:n] 

2219 a.ydata = num.concatenate( 

2220 (a.ydata, b.ydata[n:])) 

2221 else: 

2222 assert False, 'unknown deoverlap method' 

2223 

2224 if deoverlap == 'crossfade_cos': 

2225 n = -idist+1 

2226 taper = 0.5-0.5*num.cos( 

2227 (1.+num.arange(n))/(1.+n)*num.pi) 

2228 a.ydata[na-n:na] *= 1.-taper 

2229 a.ydata[na-n:na] += b.ydata[:n] * taper 

2230 

2231 a.tmax = b.tmax 

2232 if a.mtime and b.mtime: 

2233 a.mtime = max(a.mtime, b.mtime) 

2234 continue 

2235 else: 

2236 # make short second trace vanish 

2237 continue 

2238 

2239 if b.data_len() >= 1: 

2240 out_traces.append(b) 

2241 

2242 for tr in out_traces: 

2243 tr._update_ids() 

2244 

2245 return out_traces 

2246 

2247 

2248def rotate(traces, azimuth, in_channels, out_channels): 

2249 ''' 

2250 2D rotation of traces. 

2251 

2252 :param traces: list of input traces 

2253 :param azimuth: difference of the azimuths of the component directions 

2254 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2255 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2256 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2257 :returns: list of rotated traces 

2258 ''' 

2259 

2260 phi = azimuth/180.*math.pi 

2261 cphi = math.cos(phi) 

2262 sphi = math.sin(phi) 

2263 rotated = [] 

2264 in_channels = tuple(_channels_to_names(in_channels)) 

2265 out_channels = tuple(_channels_to_names(out_channels)) 

2266 for a in traces: 

2267 for b in traces: 

2268 if ((a.channel, b.channel) == in_channels and 

2269 a.nslc_id[:3] == b.nslc_id[:3] and 

2270 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2271 tmin = max(a.tmin, b.tmin) 

2272 tmax = min(a.tmax, b.tmax) 

2273 

2274 if tmin < tmax: 

2275 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2276 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2277 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2278 logger.warning( 

2279 'Cannot rotate traces with displaced sampling ' 

2280 '(%s, %s, %s, %s)' % a.nslc_id) 

2281 continue 

2282 

2283 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2284 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2285 ac.set_ydata(acydata) 

2286 bc.set_ydata(bcydata) 

2287 

2288 ac.set_codes(channel=out_channels[0]) 

2289 bc.set_codes(channel=out_channels[1]) 

2290 rotated.append(ac) 

2291 rotated.append(bc) 

2292 

2293 return rotated 

2294 

2295 

2296def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2297 ''' 

2298 Rotate traces from NE to RT system. 

2299 

2300 :param n: 

2301 North trace. 

2302 :type n: 

2303 :py:class:`~pyrocko.trace.Trace` 

2304 

2305 :param e: 

2306 East trace. 

2307 :type e: 

2308 :py:class:`~pyrocko.trace.Trace` 

2309 

2310 :param source: 

2311 Source of the recorded signal. 

2312 :type source: 

2313 :py:class:`pyrocko.gf.seismosizer.Source` 

2314 

2315 :param receiver: 

2316 Receiver of the recorded signal. 

2317 :type receiver: 

2318 :py:class:`pyrocko.model.location.Location` 

2319 

2320 :param out_channels: 

2321 Channel codes of the output channels (radial, transversal). 

2322 Default is ('R', 'T'). 

2323 . 

2324 :type out_channels 

2325 optional, tuple[str, str] 

2326 

2327 :returns: 

2328 Rotated traces (radial, transversal). 

2329 :rtype: 

2330 tuple[ 

2331 :py:class:`~pyrocko.trace.Trace`, 

2332 :py:class:`~pyrocko.trace.Trace`] 

2333 ''' 

2334 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2335 in_channels = n.channel, e.channel 

2336 out = rotate( 

2337 [n, e], azimuth, 

2338 in_channels=in_channels, 

2339 out_channels=out_channels) 

2340 

2341 assert len(out) == 2 

2342 for tr in out: 

2343 if tr.channel == out_channels[0]: 

2344 r = tr 

2345 elif tr.channel == out_channels[1]: 

2346 t = tr 

2347 else: 

2348 assert False 

2349 

2350 return r, t 

2351 

2352 

2353def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2354 out_channels=('L', 'Q', 'T')): 

2355 ''' 

2356 Rotate traces from ZNE to LQT system. 

2357 

2358 :param traces: list of traces in arbitrary order 

2359 :param backazimuth: backazimuth in degrees clockwise from north 

2360 :param incidence: incidence angle in degrees from vertical 

2361 :param in_channels: input channel names 

2362 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2363 :returns: list of transformed traces 

2364 ''' 

2365 i = incidence/180.*num.pi 

2366 b = backazimuth/180.*num.pi 

2367 

2368 ci = num.cos(i) 

2369 cb = num.cos(b) 

2370 si = num.sin(i) 

2371 sb = num.sin(b) 

2372 

2373 rotmat = num.array( 

2374 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2375 return project(traces, rotmat, in_channels, out_channels) 

2376 

2377 

2378def _decompose(a): 

2379 ''' 

2380 Decompose matrix into independent submatrices. 

2381 ''' 

2382 

2383 def depends(iout, a): 

2384 row = a[iout, :] 

2385 return set(num.arange(row.size).compress(row != 0.0)) 

2386 

2387 def provides(iin, a): 

2388 col = a[:, iin] 

2389 return set(num.arange(col.size).compress(col != 0.0)) 

2390 

2391 a = num.asarray(a) 

2392 outs = set(range(a.shape[0])) 

2393 systems = [] 

2394 while outs: 

2395 iout = outs.pop() 

2396 

2397 gout = set() 

2398 for iin in depends(iout, a): 

2399 gout.update(provides(iin, a)) 

2400 

2401 if not gout: 

2402 continue 

2403 

2404 gin = set() 

2405 for iout2 in gout: 

2406 gin.update(depends(iout2, a)) 

2407 

2408 if not gin: 

2409 continue 

2410 

2411 for iout2 in gout: 

2412 if iout2 in outs: 

2413 outs.remove(iout2) 

2414 

2415 gin = list(gin) 

2416 gin.sort() 

2417 gout = list(gout) 

2418 gout.sort() 

2419 

2420 systems.append((gin, gout, a[gout, :][:, gin])) 

2421 

2422 return systems 

2423 

2424 

2425def _channels_to_names(channels): 

2426 from pyrocko import squirrel 

2427 names = [] 

2428 for ch in channels: 

2429 if isinstance(ch, model.Channel): 

2430 names.append(ch.name) 

2431 elif isinstance(ch, squirrel.Channel): 

2432 names.append(ch.codes.channel) 

2433 else: 

2434 names.append(ch) 

2435 

2436 return names 

2437 

2438 

2439def project(traces, matrix, in_channels, out_channels): 

2440 ''' 

2441 Affine transform of three-component traces. 

2442 

2443 Compute matrix-vector product of three-component traces, to e.g. rotate 

2444 traces into a different basis. The traces are distinguished and ordered by 

2445 their channel attribute. The tranform is applied to overlapping parts of 

2446 any appropriate combinations of the input traces. This should allow this 

2447 function to be robust with data gaps. It also tries to apply the 

2448 tranformation to subsets of the channels, if this is possible, so that, if 

2449 for example a vertical compontent is missing, horizontal components can 

2450 still be rotated. 

2451 

2452 :param traces: list of traces in arbitrary order 

2453 :param matrix: tranformation matrix 

2454 :param in_channels: input channel names 

2455 :param out_channels: output channel names 

2456 :returns: list of transformed traces 

2457 ''' 

2458 

2459 in_channels = tuple(_channels_to_names(in_channels)) 

2460 out_channels = tuple(_channels_to_names(out_channels)) 

2461 systems = _decompose(matrix) 

2462 

2463 # fallback to full matrix if some are not quadratic 

2464 for iins, iouts, submatrix in systems: 

2465 if submatrix.shape[0] != submatrix.shape[1]: 

2466 if len(in_channels) != 3 or len(out_channels) != 3: 

2467 return [] 

2468 else: 

2469 return _project3(traces, matrix, in_channels, out_channels) 

2470 

2471 projected = [] 

2472 for iins, iouts, submatrix in systems: 

2473 in_cha = tuple([in_channels[iin] for iin in iins]) 

2474 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2475 if submatrix.shape[0] == 1: 

2476 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2477 elif submatrix.shape[1] == 2: 

2478 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2479 else: 

2480 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2481 

2482 return projected 

2483 

2484 

2485def project_dependencies(matrix, in_channels, out_channels): 

2486 ''' 

2487 Figure out what dependencies project() would produce. 

2488 ''' 

2489 

2490 in_channels = tuple(_channels_to_names(in_channels)) 

2491 out_channels = tuple(_channels_to_names(out_channels)) 

2492 systems = _decompose(matrix) 

2493 

2494 subpro = [] 

2495 for iins, iouts, submatrix in systems: 

2496 if submatrix.shape[0] != submatrix.shape[1]: 

2497 subpro.append((matrix, in_channels, out_channels)) 

2498 

2499 if not subpro: 

2500 for iins, iouts, submatrix in systems: 

2501 in_cha = tuple([in_channels[iin] for iin in iins]) 

2502 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2503 subpro.append((submatrix, in_cha, out_cha)) 

2504 

2505 deps = {} 

2506 for mat, in_cha, out_cha in subpro: 

2507 for oc in out_cha: 

2508 if oc not in deps: 

2509 deps[oc] = [] 

2510 

2511 for ic in in_cha: 

2512 deps[oc].append(ic) 

2513 

2514 return deps 

2515 

2516 

2517def _project1(traces, matrix, in_channels, out_channels): 

2518 assert len(in_channels) == 1 

2519 assert len(out_channels) == 1 

2520 assert matrix.shape == (1, 1) 

2521 

2522 projected = [] 

2523 for a in traces: 

2524 if not (a.channel,) == in_channels: 

2525 continue 

2526 

2527 ac = a.copy() 

2528 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2529 ac.set_codes(channel=out_channels[0]) 

2530 projected.append(ac) 

2531 

2532 return projected 

2533 

2534 

2535def _project2(traces, matrix, in_channels, out_channels): 

2536 assert len(in_channels) == 2 

2537 assert len(out_channels) == 2 

2538 assert matrix.shape == (2, 2) 

2539 projected = [] 

2540 for a in traces: 

2541 for b in traces: 

2542 if not ((a.channel, b.channel) == in_channels and 

2543 a.nslc_id[:3] == b.nslc_id[:3] and 

2544 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2545 continue 

2546 

2547 tmin = max(a.tmin, b.tmin) 

2548 tmax = min(a.tmax, b.tmax) 

2549 

2550 if tmin > tmax: 

2551 continue 

2552 

2553 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2554 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2555 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2556 logger.warning( 

2557 'Cannot project traces with displaced sampling ' 

2558 '(%s, %s, %s, %s)' % a.nslc_id) 

2559 continue 

2560 

2561 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2562 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2563 

2564 ac.set_ydata(acydata) 

2565 bc.set_ydata(bcydata) 

2566 

2567 ac.set_codes(channel=out_channels[0]) 

2568 bc.set_codes(channel=out_channels[1]) 

2569 

2570 projected.append(ac) 

2571 projected.append(bc) 

2572 

2573 return projected 

2574 

2575 

2576def _project3(traces, matrix, in_channels, out_channels): 

2577 assert len(in_channels) == 3 

2578 assert len(out_channels) == 3 

2579 assert matrix.shape == (3, 3) 

2580 projected = [] 

2581 for a in traces: 

2582 for b in traces: 

2583 for c in traces: 

2584 if not ((a.channel, b.channel, c.channel) == in_channels 

2585 and a.nslc_id[:3] == b.nslc_id[:3] 

2586 and b.nslc_id[:3] == c.nslc_id[:3] 

2587 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2588 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2589 

2590 continue 

2591 

2592 tmin = max(a.tmin, b.tmin, c.tmin) 

2593 tmax = min(a.tmax, b.tmax, c.tmax) 

2594 

2595 if tmin >= tmax: 

2596 continue 

2597 

2598 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2599 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2600 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2601 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2602 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2603 

2604 logger.warning( 

2605 'Cannot project traces with displaced sampling ' 

2606 '(%s, %s, %s, %s)' % a.nslc_id) 

2607 continue 

2608 

2609 acydata = num.dot( 

2610 matrix[0], 

2611 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2612 bcydata = num.dot( 

2613 matrix[1], 

2614 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2615 ccydata = num.dot( 

2616 matrix[2], 

2617 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2618 

2619 ac.set_ydata(acydata) 

2620 bc.set_ydata(bcydata) 

2621 cc.set_ydata(ccydata) 

2622 

2623 ac.set_codes(channel=out_channels[0]) 

2624 bc.set_codes(channel=out_channels[1]) 

2625 cc.set_codes(channel=out_channels[2]) 

2626 

2627 projected.append(ac) 

2628 projected.append(bc) 

2629 projected.append(cc) 

2630 

2631 return projected 

2632 

2633 

2634def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2635 ''' 

2636 Cross correlation of two traces. 

2637 

2638 :param a,b: input traces 

2639 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2640 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2641 :param use_fft: bool, whether to do cross correlation in spectral domain 

2642 

2643 :returns: trace containing cross correlation coefficients 

2644 

2645 This function computes the cross correlation between two traces. It 

2646 evaluates the discrete equivalent of 

2647 

2648 .. math:: 

2649 

2650 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2651 

2652 where the star denotes complex conjugate. Note, that the arguments here are 

2653 swapped when compared with the :py:func:`numpy.correlate` function, 

2654 which is internally called. This function should be safe even with older 

2655 versions of NumPy, where the correlate function has some problems. 

2656 

2657 A trace containing the cross correlation coefficients is returned. The time 

2658 information of the output trace is set so that the returned cross 

2659 correlation can be viewed directly as a function of time lag. 

2660 

2661 Example:: 

2662 

2663 # align two traces a and b containing a time shifted similar signal: 

2664 c = pyrocko.trace.correlate(a,b) 

2665 t, coef = c.max() # get time and value of maximum 

2666 b.shift(-t) # align b with a 

2667 

2668 ''' 

2669 

2670 assert_same_sampling_rate(a, b) 

2671 

2672 ya, yb = a.ydata, b.ydata 

2673 

2674 # need reversed order here: 

2675 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2676 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2677 

2678 if normalization == 'normal': 

2679 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2680 yc = yc/normfac 

2681 

2682 elif normalization == 'gliding': 

2683 if mode != 'valid': 

2684 assert False, 'gliding normalization currently only available ' \ 

2685 'with "valid" mode.' 

2686 

2687 if ya.size < yb.size: 

2688 yshort, ylong = ya, yb 

2689 else: 

2690 yshort, ylong = yb, ya 

2691 

2692 epsilon = 0.00001 

2693 normfac_short = num.sqrt(num.sum(yshort**2)) 

2694 normfac = normfac_short * num.sqrt( 

2695 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2696 + normfac_short*epsilon 

2697 

2698 if yb.size <= ya.size: 

2699 normfac = normfac[::-1] 

2700 

2701 yc /= normfac 

2702 

2703 c = a.copy() 

2704 c.set_ydata(yc) 

2705 c.set_codes(*merge_codes(a, b, '~')) 

2706 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2707 

2708 return c 

2709 

2710 

2711def deconvolve( 

2712 a, b, waterlevel, 

2713 tshift=0., 

2714 pad=0.5, 

2715 fd_taper=None, 

2716 pad_to_pow2=True): 

2717 

2718 same_sampling_rate(a, b) 

2719 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2720 deltat = a.deltat 

2721 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2722 

2723 ndata = max(a.data_len(), b.data_len()) 

2724 ndata_pad = ndata + npad 

2725 

2726 if pad_to_pow2: 

2727 ntrans = nextpow2(ndata_pad) 

2728 else: 

2729 ntrans = ndata 

2730 

2731 aspec = num.fft.rfft(a.ydata, ntrans) 

2732 bspec = num.fft.rfft(b.ydata, ntrans) 

2733 

2734 out = aspec * num.conj(bspec) 

2735 

2736 bautocorr = bspec*num.conj(bspec) 

2737 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2738 

2739 out /= denom 

2740 df = 1/(ntrans*deltat) 

2741 

2742 if fd_taper is not None: 

2743 fd_taper(out, 0.0, df) 

2744 

2745 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2746 c = a.copy(data=False) 

2747 c.set_ydata(ydata[:ndata]) 

2748 c.set_codes(*merge_codes(a, b, '/')) 

2749 return c 

2750 

2751 

2752def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2753 assert same_sampling_rate(a, b, eps), \ 

2754 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2755 

2756 

2757def same_sampling_rate(a, b, eps=1.0e-6): 

2758 ''' 

2759 Check if two traces have the same sampling rate. 

2760 

2761 :param a,b: input traces 

2762 :param eps: relative tolerance 

2763 ''' 

2764 

2765 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2766 

2767 

2768def fix_deltat_rounding_errors(deltat): 

2769 ''' 

2770 Try to undo sampling rate rounding errors. 

2771 

2772 Fix rounding errors of sampling intervals when these are read from single 

2773 precision floating point values. 

2774 

2775 Assumes that the true sampling rate or sampling interval was an integer 

2776 value. No correction will be applied if this would change the sampling 

2777 rate by more than 0.001%. 

2778 ''' 

2779 

2780 if deltat <= 1.0: 

2781 deltat_new = 1.0 / round(1.0 / deltat) 

2782 else: 

2783 deltat_new = round(deltat) 

2784 

2785 if abs(deltat_new - deltat) / deltat > 1e-5: 

2786 deltat_new = deltat 

2787 

2788 return deltat_new 

2789 

2790 

2791def merge_codes(a, b, sep='-'): 

2792 ''' 

2793 Merge network-station-location-channel codes of a pair of traces. 

2794 ''' 

2795 

2796 o = [] 

2797 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2798 if xa == xb: 

2799 o.append(xa) 

2800 else: 

2801 o.append(sep.join((xa, xb))) 

2802 return o 

2803 

2804 

2805class Taper(Object): 

2806 ''' 

2807 Base class for tapers. 

2808 

2809 Does nothing by default. 

2810 ''' 

2811 

2812 def __call__(self, y, x0, dx): 

2813 pass 

2814 

2815 

2816class CosTaper(Taper): 

2817 ''' 

2818 Cosine Taper. 

2819 

2820 :param a: start of fading in 

2821 :param b: end of fading in 

2822 :param c: start of fading out 

2823 :param d: end of fading out 

2824 ''' 

2825 

2826 a = Float.T() 

2827 b = Float.T() 

2828 c = Float.T() 

2829 d = Float.T() 

2830 

2831 def __init__(self, a, b, c, d): 

2832 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2833 

2834 def __call__(self, y, x0, dx): 

2835 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2836 

2837 def span(self, y, x0, dx): 

2838 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2839 

2840 def time_span(self): 

2841 return self.a, self.d 

2842 

2843 

2844class CosFader(Taper): 

2845 ''' 

2846 Cosine Fader. 

2847 

2848 :param xfade: fade in and fade out time in seconds (optional) 

2849 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2850 

2851 Only one argument can be set. The other should to be ``None``. 

2852 ''' 

2853 

2854 xfade = Float.T(optional=True) 

2855 xfrac = Float.T(optional=True) 

2856 

2857 def __init__(self, xfade=None, xfrac=None): 

2858 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2859 assert (xfade is None) != (xfrac is None) 

2860 self._xfade = xfade 

2861 self._xfrac = xfrac 

2862 

2863 def __call__(self, y, x0, dx): 

2864 

2865 xfade = self._xfade 

2866 

2867 xlen = (y.size - 1)*dx 

2868 if xfade is None: 

2869 xfade = xlen * self._xfrac 

2870 

2871 a = x0 

2872 b = x0 + xfade 

2873 c = x0 + xlen - xfade 

2874 d = x0 + xlen 

2875 

2876 apply_costaper(a, b, c, d, y, x0, dx) 

2877 

2878 def span(self, y, x0, dx): 

2879 return 0, y.size 

2880 

2881 def time_span(self): 

2882 return None, None 

2883 

2884 

2885def none_min(li): 

2886 if None in li: 

2887 return None 

2888 else: 

2889 return min(x for x in li if x is not None) 

2890 

2891 

2892def none_max(li): 

2893 if None in li: 

2894 return None 

2895 else: 

2896 return max(x for x in li if x is not None) 

2897 

2898 

2899class MultiplyTaper(Taper): 

2900 ''' 

2901 Multiplication of several tapers. 

2902 ''' 

2903 

2904 tapers = List.T(Taper.T()) 

2905 

2906 def __init__(self, tapers=None): 

2907 if tapers is None: 

2908 tapers = [] 

2909 

2910 Taper.__init__(self, tapers=tapers) 

2911 

2912 def __call__(self, y, x0, dx): 

2913 for taper in self.tapers: 

2914 taper(y, x0, dx) 

2915 

2916 def span(self, y, x0, dx): 

2917 spans = [] 

2918 for taper in self.tapers: 

2919 spans.append(taper.span(y, x0, dx)) 

2920 

2921 mins, maxs = list(zip(*spans)) 

2922 return min(mins), max(maxs) 

2923 

2924 def time_span(self): 

2925 spans = [] 

2926 for taper in self.tapers: 

2927 spans.append(taper.time_span()) 

2928 

2929 mins, maxs = list(zip(*spans)) 

2930 return none_min(mins), none_max(maxs) 

2931 

2932 

2933class GaussTaper(Taper): 

2934 ''' 

2935 Frequency domain Gaussian filter. 

2936 ''' 

2937 

2938 alpha = Float.T() 

2939 

2940 def __init__(self, alpha): 

2941 Taper.__init__(self, alpha=alpha) 

2942 self._alpha = alpha 

2943 

2944 def __call__(self, y, x0, dx): 

2945 f = x0 + num.arange(y.size)*dx 

2946 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2947 

2948 

2949cached_coefficients = {} 

2950 

2951 

2952def _get_cached_filter_coefs(order, corners, btype): 

2953 ck = (order, tuple(corners), btype) 

2954 if ck not in cached_coefficients: 

2955 if len(corners) == 0: 

2956 cached_coefficients[ck] = signal.butter( 

2957 order, corners[0], btype=btype) 

2958 else: 

2959 cached_coefficients[ck] = signal.butter( 

2960 order, corners, btype=btype) 

2961 

2962 return cached_coefficients[ck] 

2963 

2964 

2965class _globals(object): 

2966 _numpy_has_correlate_flip_bug = None 

2967 

2968 

2969def _default_key(tr): 

2970 return (tr.network, tr.station, tr.location, tr.channel) 

2971 

2972 

2973def numpy_has_correlate_flip_bug(): 

2974 ''' 

2975 Check if NumPy's correlate function reveals old behaviour. 

2976 ''' 

2977 

2978 if _globals._numpy_has_correlate_flip_bug is None: 

2979 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

2980 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

2981 ab = num.correlate(a, b, mode='same') 

2982 ba = num.correlate(b, a, mode='same') 

2983 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

2984 

2985 return _globals._numpy_has_correlate_flip_bug 

2986 

2987 

2988def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

2989 ''' 

2990 Call :py:func:`numpy.correlate` with fixes. 

2991 

2992 c[k] = sum_i a[i+k] * conj(b[i]) 

2993 

2994 Note that the result produced by newer numpy.correlate is always flipped 

2995 with respect to the formula given in its documentation (if ascending k 

2996 assumed for the output). 

2997 ''' 

2998 

2999 if use_fft: 

3000 if a.size < b.size: 

3001 c = signal.fftconvolve(b[::-1], a, mode=mode) 

3002 else: 

3003 c = signal.fftconvolve(a, b[::-1], mode=mode) 

3004 return c 

3005 

3006 else: 

3007 buggy = numpy_has_correlate_flip_bug() 

3008 

3009 a = num.asarray(a) 

3010 b = num.asarray(b) 

3011 

3012 if buggy: 

3013 b = num.conj(b) 

3014 

3015 c = num.correlate(a, b, mode=mode) 

3016 

3017 if buggy and a.size < b.size: 

3018 return c[::-1] 

3019 else: 

3020 return c 

3021 

3022 

3023def numpy_correlate_emulate(a, b, mode='valid'): 

3024 ''' 

3025 Slow version of :py:func:`numpy.correlate` for comparison. 

3026 ''' 

3027 

3028 a = num.asarray(a) 

3029 b = num.asarray(b) 

3030 kmin = -(b.size-1) 

3031 klen = a.size-kmin 

3032 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3033 kmin = int(kmin) 

3034 kmax = int(kmax) 

3035 klen = kmax - kmin + 1 

3036 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3037 for k in range(kmin, kmin+klen): 

3038 imin = max(0, -k) 

3039 ilen = min(b.size, a.size-k) - imin 

3040 c[k-kmin] = num.sum( 

3041 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3042 

3043 return c 

3044 

3045 

3046def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3047 ''' 

3048 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3049 ''' 

3050 

3051 a = num.asarray(a) 

3052 b = num.asarray(b) 

3053 

3054 kmin = -(b.size-1) 

3055 if mode == 'full': 

3056 klen = a.size-kmin 

3057 elif mode == 'same': 

3058 klen = max(a.size, b.size) 

3059 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3060 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3061 elif mode == 'valid': 

3062 klen = abs(a.size - b.size) + 1 

3063 kmin += min(a.size, b.size) - 1 

3064 

3065 return kmin, kmin + klen - 1 

3066 

3067 

3068def autocorr(x, nshifts): 

3069 ''' 

3070 Compute biased estimate of the first autocorrelation coefficients. 

3071 

3072 :param x: input array 

3073 :param nshifts: number of coefficients to calculate 

3074 ''' 

3075 

3076 mean = num.mean(x) 

3077 std = num.std(x) 

3078 n = x.size 

3079 xdm = x - mean 

3080 r = num.zeros(nshifts) 

3081 for k in range(nshifts): 

3082 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3083 

3084 return r 

3085 

3086 

3087def yulewalker(x, order): 

3088 ''' 

3089 Compute autoregression coefficients using Yule-Walker method. 

3090 

3091 :param x: input array 

3092 :param order: number of coefficients to produce 

3093 

3094 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3095 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3096 recursion which is normally used. 

3097 ''' 

3098 

3099 gamma = autocorr(x, order+1) 

3100 d = gamma[1:1+order] 

3101 a = num.zeros((order, order)) 

3102 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3103 for i in range(order): 

3104 ioff = order-i 

3105 a[i, :] = gamma2[ioff:ioff+order] 

3106 

3107 return num.dot(num.linalg.inv(a), -d) 

3108 

3109 

3110def moving_avg(x, n): 

3111 n = int(n) 

3112 cx = x.cumsum() 

3113 nn = len(x) 

3114 y = num.zeros(nn, dtype=cx.dtype) 

3115 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3116 y[:n//2] = y[n//2] 

3117 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3118 return y 

3119 

3120 

3121def moving_sum(x, n, mode='valid'): 

3122 n = int(n) 

3123 cx = x.cumsum() 

3124 nn = len(x) 

3125 

3126 if mode == 'valid': 

3127 if nn-n+1 <= 0: 

3128 return num.zeros(0, dtype=cx.dtype) 

3129 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3130 y[0] = cx[n-1] 

3131 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3132 

3133 if mode == 'full': 

3134 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3135 if n <= nn: 

3136 y[0:n] = cx[0:n] 

3137 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3138 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3139 else: 

3140 y[0:nn] = cx[0:nn] 

3141 y[nn:n] = cx[nn-1] 

3142 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3143 

3144 if mode == 'same': 

3145 n1 = (n-1)//2 

3146 y = num.zeros(nn, dtype=cx.dtype) 

3147 if n <= nn: 

3148 y[0:n-n1] = cx[n1:n] 

3149 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3150 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3151 else: 

3152 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3153 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3154 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3155 

3156 return y 

3157 

3158 

3159def nextpow2(i): 

3160 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3161 

3162 

3163def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3164 def snap(x): 

3165 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3166 return snap 

3167 

3168 

3169def snapper(nmax, delta, snapfun=math.ceil): 

3170 def snap(x): 

3171 return max(0, min(int(snapfun(x/delta)), nmax)) 

3172 return snap 

3173 

3174 

3175def apply_costaper(a, b, c, d, y, x0, dx): 

3176 hi = snapper_w_offset(y.size, x0, dx) 

3177 y[:hi(a)] = 0. 

3178 y[hi(a):hi(b)] *= 0.5 \ 

3179 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3180 y[hi(c):hi(d)] *= 0.5 \ 

3181 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3182 y[hi(d):] = 0. 

3183 

3184 

3185def span_costaper(a, b, c, d, y, x0, dx): 

3186 hi = snapper_w_offset(y.size, x0, dx) 

3187 return hi(a), hi(d) - hi(a) 

3188 

3189 

3190def costaper(a, b, c, d, nfreqs, deltaf): 

3191 hi = snapper(nfreqs, deltaf) 

3192 tap = num.zeros(nfreqs) 

3193 tap[hi(a):hi(b)] = 0.5 \ 

3194 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3195 tap[hi(b):hi(c)] = 1. 

3196 tap[hi(c):hi(d)] = 0.5 \ 

3197 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3198 

3199 return tap 

3200 

3201 

3202def t2ind(t, tdelta, snap=round): 

3203 return int(snap(t/tdelta)) 

3204 

3205 

3206def hilbert(x, N=None): 

3207 ''' 

3208 Return the hilbert transform of x of length N. 

3209 

3210 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3211 ''' 

3212 

3213 x = num.asarray(x) 

3214 if N is None: 

3215 N = len(x) 

3216 if N <= 0: 

3217 raise ValueError("N must be positive.") 

3218 if num.iscomplexobj(x): 

3219 logger.warning('imaginary part of x ignored.') 

3220 x = num.real(x) 

3221 

3222 Xf = num.fft.fft(x, N, axis=0) 

3223 h = num.zeros(N) 

3224 if N % 2 == 0: 

3225 h[0] = h[N//2] = 1 

3226 h[1:N//2] = 2 

3227 else: 

3228 h[0] = 1 

3229 h[1:(N+1)//2] = 2 

3230 

3231 if len(x.shape) > 1: 

3232 h = h[:, num.newaxis] 

3233 x = num.fft.ifft(Xf*h) 

3234 return x 

3235 

3236 

3237def near(a, b, eps): 

3238 return abs(a-b) < eps 

3239 

3240 

3241def coroutine(func): 

3242 def wrapper(*args, **kwargs): 

3243 gen = func(*args, **kwargs) 

3244 next(gen) 

3245 return gen 

3246 

3247 wrapper.__name__ = func.__name__ 

3248 wrapper.__dict__ = func.__dict__ 

3249 wrapper.__doc__ = func.__doc__ 

3250 return wrapper 

3251 

3252 

3253class States(object): 

3254 ''' 

3255 Utility to store channel-specific state in coroutines. 

3256 ''' 

3257 

3258 def __init__(self): 

3259 self._states = {} 

3260 

3261 def get(self, tr): 

3262 k = tr.nslc_id 

3263 if k in self._states: 

3264 tmin, deltat, dtype, value = self._states[k] 

3265 if (near(tmin, tr.tmin, deltat/100.) 

3266 and near(deltat, tr.deltat, deltat/10000.) 

3267 and dtype == tr.ydata.dtype): 

3268 

3269 return value 

3270 

3271 return None 

3272 

3273 def set(self, tr, value): 

3274 k = tr.nslc_id 

3275 if k in self._states and self._states[k][-1] is not value: 

3276 self.free(self._states[k][-1]) 

3277 

3278 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3279 

3280 def free(self, value): 

3281 pass 

3282 

3283 

3284@coroutine 

3285def co_list_append(list): 

3286 while True: 

3287 list.append((yield)) 

3288 

3289 

3290class ScipyBug(Exception): 

3291 pass 

3292 

3293 

3294@coroutine 

3295def co_lfilter(target, b, a): 

3296 ''' 

3297 Successively filter broken continuous trace data (coroutine). 

3298 

3299 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3300 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3301 objects containing the filtered data to target. This is useful, if one 

3302 wants to filter a long continuous time series, which is split into many 

3303 successive traces without producing filter artifacts at trace boundaries. 

3304 

3305 Filter states are kept *per channel*, specifically, for each (network, 

3306 station, location, channel) combination occuring in the input traces, a 

3307 separate state is created and maintained. This makes it possible to filter 

3308 multichannel or multistation data with only one :py:func:`co_lfilter` 

3309 instance. 

3310 

3311 Filter state is reset, when gaps occur. 

3312 

3313 Use it like this:: 

3314 

3315 from pyrocko.trace import co_lfilter, co_list_append 

3316 

3317 filtered_traces = [] 

3318 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3319 for trace in traces: 

3320 pipe.send(trace) 

3321 

3322 pipe.close() 

3323 

3324 ''' 

3325 

3326 try: 

3327 states = States() 

3328 output = None 

3329 while True: 

3330 input = (yield) 

3331 

3332 zi = states.get(input) 

3333 if zi is None: 

3334 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3335 

3336 output = input.copy(data=False) 

3337 try: 

3338 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3339 except ValueError: 

3340 raise ScipyBug( 

3341 'signal.lfilter failed: could be related to a bug ' 

3342 'in some older scipy versions, e.g. on opensuse42.1') 

3343 

3344 output.set_ydata(ydata) 

3345 states.set(input, zf) 

3346 target.send(output) 

3347 

3348 except GeneratorExit: 

3349 target.close() 

3350 

3351 

3352def co_antialias(target, q, n=None, ftype='fir'): 

3353 b, a, n = util.decimate_coeffs(q, n, ftype) 

3354 anti = co_lfilter(target, b, a) 

3355 return anti 

3356 

3357 

3358@coroutine 

3359def co_dropsamples(target, q, nfir): 

3360 try: 

3361 states = States() 

3362 while True: 

3363 tr = (yield) 

3364 newdeltat = q * tr.deltat 

3365 ioffset = states.get(tr) 

3366 if ioffset is None: 

3367 # for fir filter, the first nfir samples are pulluted by 

3368 # boundary effects; cut it off. 

3369 # for iir this may be (much) more, we do not correct for that. 

3370 # put sample instances to a time which is a multiple of the 

3371 # new sampling interval. 

3372 newtmin_want = math.ceil( 

3373 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3374 - (nfir/2*tr.deltat) 

3375 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3376 if ioffset < 0: 

3377 ioffset = ioffset % q 

3378 

3379 newtmin_have = tr.tmin + ioffset * tr.deltat 

3380 newtr = tr.copy(data=False) 

3381 newtr.deltat = newdeltat 

3382 # because the fir kernel shifts data by nfir/2 samples: 

3383 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3384 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3385 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3386 target.send(newtr) 

3387 

3388 except GeneratorExit: 

3389 target.close() 

3390 

3391 

3392def co_downsample(target, q, n=None, ftype='fir'): 

3393 ''' 

3394 Successively downsample broken continuous trace data (coroutine). 

3395 

3396 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3397 data and sends new :py:class:`Trace` objects containing the downsampled 

3398 data to target. This is useful, if one wants to downsample a long 

3399 continuous time series, which is split into many successive traces without 

3400 producing filter artifacts and gaps at trace boundaries. 

3401 

3402 Filter states are kept *per channel*, specifically, for each (network, 

3403 station, location, channel) combination occuring in the input traces, a 

3404 separate state is created and maintained. This makes it possible to filter 

3405 multichannel or multistation data with only one :py:func:`co_lfilter` 

3406 instance. 

3407 

3408 Filter state is reset, when gaps occur. The sampling instances are choosen 

3409 so that they occur at (or as close as possible) to even multiples of the 

3410 sampling interval of the downsampled trace (based on system time). 

3411 ''' 

3412 

3413 b, a, n = util.decimate_coeffs(q, n, ftype) 

3414 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3415 

3416 

3417@coroutine 

3418def co_downsample_to(target, deltat): 

3419 

3420 decimators = {} 

3421 try: 

3422 while True: 

3423 tr = (yield) 

3424 ratio = deltat / tr.deltat 

3425 rratio = round(ratio) 

3426 if abs(rratio - ratio)/ratio > 0.0001: 

3427 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3428 

3429 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3430 if deci_seq not in decimators: 

3431 pipe = target 

3432 for q in deci_seq[::-1]: 

3433 pipe = co_downsample(pipe, q) 

3434 

3435 decimators[deci_seq] = pipe 

3436 

3437 decimators[deci_seq].send(tr) 

3438 

3439 except GeneratorExit: 

3440 for g in decimators.values(): 

3441 g.close() 

3442 

3443 

3444class DomainChoice(StringChoice): 

3445 choices = [ 

3446 'time_domain', 

3447 'frequency_domain', 

3448 'envelope', 

3449 'absolute', 

3450 'cc_max_norm'] 

3451 

3452 

3453class MisfitSetup(Object): 

3454 ''' 

3455 Contains misfit setup to be used in :py:func:`trace.misfit` 

3456 

3457 :param description: Description of the setup 

3458 :param norm: L-norm classifier 

3459 :param taper: Object of :py:class:`Taper` 

3460 :param filter: Object of :py:class:`FrequencyResponse` 

3461 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3462 'cc_max_norm'] 

3463 

3464 Can be dumped to a yaml file. 

3465 ''' 

3466 

3467 xmltagname = 'misfitsetup' 

3468 description = String.T(optional=True) 

3469 norm = Int.T(optional=False) 

3470 taper = Taper.T(optional=False) 

3471 filter = FrequencyResponse.T(optional=True) 

3472 domain = DomainChoice.T(default='time_domain') 

3473 

3474 

3475def equalize_sampling_rates(trace_1, trace_2): 

3476 ''' 

3477 Equalize sampling rates of two traces (reduce higher sampling rate to 

3478 lower). 

3479 

3480 :param trace_1: :py:class:`Trace` object 

3481 :param trace_2: :py:class:`Trace` object 

3482 

3483 Returns a copy of the resampled trace if resampling is needed. 

3484 ''' 

3485 

3486 if same_sampling_rate(trace_1, trace_2): 

3487 return trace_1, trace_2 

3488 

3489 if trace_1.deltat < trace_2.deltat: 

3490 t1_out = trace_1.copy() 

3491 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3492 logger.debug('Trace downsampled (return copy of trace): %s' 

3493 % '.'.join(t1_out.nslc_id)) 

3494 return t1_out, trace_2 

3495 

3496 elif trace_1.deltat > trace_2.deltat: 

3497 t2_out = trace_2.copy() 

3498 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3499 logger.debug('Trace downsampled (return copy of trace): %s' 

3500 % '.'.join(t2_out.nslc_id)) 

3501 return trace_1, t2_out 

3502 

3503 

3504def Lx_norm(u, v, norm=2): 

3505 ''' 

3506 Calculate the misfit denominator *m* and the normalization devisor *n* 

3507 according to norm. 

3508 

3509 The normalization divisor *n* is calculated from ``v``. 

3510 

3511 :param u: :py:class:`numpy.array` 

3512 :param v: :py:class:`numpy.array` 

3513 :param norm: (default = 2) 

3514 

3515 ``u`` and ``v`` must be of same size. 

3516 ''' 

3517 

3518 if norm == 1: 

3519 return ( 

3520 num.sum(num.abs(v-u)), 

3521 num.sum(num.abs(v))) 

3522 

3523 elif norm == 2: 

3524 return ( 

3525 num.sqrt(num.sum((v-u)**2)), 

3526 num.sqrt(num.sum(v**2))) 

3527 

3528 else: 

3529 return ( 

3530 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3531 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3532 

3533 

3534def do_downsample(tr, deltat): 

3535 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3536 tr = tr.copy() 

3537 tr.downsample_to(deltat, snap=True, demean=False) 

3538 else: 

3539 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3540 tr = tr.copy() 

3541 tr.snap() 

3542 return tr 

3543 

3544 

3545def do_extend(tr, tmin, tmax): 

3546 if tmin < tr.tmin or tmax > tr.tmax: 

3547 tr = tr.copy() 

3548 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3549 

3550 return tr 

3551 

3552 

3553def do_pre_taper(tr, taper): 

3554 return tr.taper(taper, inplace=False, chop=True) 

3555 

3556 

3557def do_fft(tr, filter): 

3558 if filter is None: 

3559 return tr 

3560 else: 

3561 ndata = tr.ydata.size 

3562 nfft = nextpow2(ndata) 

3563 padded = num.zeros(nfft, dtype=float) 

3564 padded[:ndata] = tr.ydata 

3565 spectrum = num.fft.rfft(padded) 

3566 df = 1.0 / (tr.deltat * nfft) 

3567 frequencies = num.arange(spectrum.size)*df 

3568 return [tr, frequencies, spectrum] 

3569 

3570 

3571def do_filter(inp, filter): 

3572 if filter is None: 

3573 return inp 

3574 else: 

3575 tr, frequencies, spectrum = inp 

3576 spectrum *= filter.evaluate(frequencies) 

3577 return [tr, frequencies, spectrum] 

3578 

3579 

3580def do_ifft(inp): 

3581 if isinstance(inp, Trace): 

3582 return inp 

3583 else: 

3584 tr, _, spectrum = inp 

3585 ndata = tr.ydata.size 

3586 tr = tr.copy(data=False) 

3587 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3588 return tr 

3589 

3590 

3591def check_alignment(t1, t2): 

3592 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3593 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3594 t1.ydata.shape != t2.ydata.shape: 

3595 raise MisalignedTraces( 

3596 'Cannot calculate misfit of %s and %s due to misaligned ' 

3597 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))