1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6This module provides basic signal processing for seismic traces. 

7''' 

8 

9from __future__ import division, absolute_import 

10 

11import time 

12import math 

13import copy 

14import logging 

15import hashlib 

16from collections import defaultdict 

17 

18import numpy as num 

19from scipy import signal 

20 

21from pyrocko import util, orthodrome, pchain, model 

22from pyrocko.util import reuse 

23from pyrocko.guts import Object, Float, Int, String, List, \ 

24 StringChoice, Timestamp 

25from pyrocko.guts_array import Array 

26from pyrocko.model import squirrel_content 

27 

28# backward compatibility 

29from pyrocko.util import UnavailableDecimation # noqa 

30from pyrocko.response import ( # noqa 

31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

32 ButterworthResponse, SampledResponse, IntegrationResponse, 

33 DifferentiationResponse, MultiplyResponse) 

34 

35 

36guts_prefix = 'pf' 

37 

38logger = logging.getLogger('pyrocko.trace') 

39 

40 

41@squirrel_content 

42class Trace(Object): 

43 

44 ''' 

45 Create new trace object. 

46 

47 A ``Trace`` object represents a single continuous strip of evenly sampled 

48 time series data. It is built from a 1D NumPy array containing the data 

49 samples and some attributes describing its beginning and ending time, its 

50 sampling rate and four string identifiers (its network, station, location 

51 and channel code). 

52 

53 :param network: network code 

54 :param station: station code 

55 :param location: location code 

56 :param channel: channel code 

57 :param extra: extra code 

58 :param tmin: system time of first sample in [s] 

59 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

60 computed from length of ``ydata``) 

61 :param deltat: sampling interval in [s] 

62 :param ydata: 1D numpy array with data samples (can be ``None`` when 

63 ``tmax`` is not ``None``) 

64 :param mtime: optional modification time 

65 

66 :param meta: additional meta information (not used, but maintained by the 

67 library) 

68 

69 The length of the network, station, location and channel codes is not 

70 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

71 have different limits on the lengths of these codes. The codes set here are 

72 silently truncated when the trace is stored 

73 ''' 

74 

75 network = String.T(default='', help='Deployment/network code (1-8)') 

76 station = String.T(default='STA', help='Station code (1-5)') 

77 location = String.T(default='', help='Location code (0-2)') 

78 channel = String.T(default='', help='Channel code (3)') 

79 extra = String.T(default='', help='Extra/custom code') 

80 

81 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

82 tmax = Timestamp.T() 

83 

84 deltat = Float.T(default=1.0) 

85 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

86 

87 mtime = Timestamp.T(optional=True) 

88 

89 cached_frequencies = {} 

90 

91 def __init__(self, network='', station='STA', location='', channel='', 

92 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

93 meta=None, extra=''): 

94 

95 Object.__init__(self, init_props=False) 

96 

97 time_float = util.get_time_float() 

98 

99 if not isinstance(tmin, time_float): 

100 tmin = Trace.tmin.regularize_extra(tmin) 

101 

102 if tmax is not None and not isinstance(tmax, time_float): 

103 tmax = Trace.tmax.regularize_extra(tmax) 

104 

105 if mtime is not None and not isinstance(mtime, time_float): 

106 mtime = Trace.mtime.regularize_extra(mtime) 

107 

108 if ydata is not None and not isinstance(ydata, num.ndarray): 

109 ydata = Trace.ydata.regularize_extra(ydata) 

110 

111 self._growbuffer = None 

112 

113 tmin = time_float(tmin) 

114 if tmax is not None: 

115 tmax = time_float(tmax) 

116 

117 if mtime is None: 

118 mtime = time_float(time.time()) 

119 

120 self.network, self.station, self.location, self.channel, \ 

121 self.extra = [ 

122 reuse(x) for x in ( 

123 network, station, location, channel, extra)] 

124 

125 self.tmin = tmin 

126 self.deltat = deltat 

127 

128 if tmax is None: 

129 if ydata is not None: 

130 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

131 else: 

132 raise Exception( 

133 'fixme: trace must be created with tmax or ydata') 

134 else: 

135 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

136 self.tmax = self.tmin + (n - 1) * self.deltat 

137 

138 self.meta = meta 

139 self.ydata = ydata 

140 self.mtime = mtime 

141 self._update_ids() 

142 self.file = None 

143 self._pchain = None 

144 

145 def __str__(self): 

146 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

147 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

148 s += ' timerange: %s - %s\n' % ( 

149 util.time_to_str(self.tmin, format=fmt), 

150 util.time_to_str(self.tmax, format=fmt)) 

151 

152 s += ' delta t: %g\n' % self.deltat 

153 if self.meta: 

154 for k in sorted(self.meta.keys()): 

155 s += ' %s: %s\n' % (k, self.meta[k]) 

156 return s 

157 

158 @property 

159 def codes(self): 

160 from pyrocko.squirrel import model 

161 return model.CodesNSLCE( 

162 self.network, self.station, self.location, self.channel, 

163 self.extra) 

164 

165 @property 

166 def time_span(self): 

167 return self.tmin, self.tmax 

168 

169 @property 

170 def summary(self): 

171 return '%s %-16s %s %g' % ( 

172 self.__class__.__name__, self.str_codes, self.str_time_span, 

173 self.deltat) 

174 

175 def __getstate__(self): 

176 return (self.network, self.station, self.location, self.channel, 

177 self.tmin, self.tmax, self.deltat, self.mtime, 

178 self.ydata, self.meta, self.extra) 

179 

180 def __setstate__(self, state): 

181 if len(state) == 11: 

182 self.network, self.station, self.location, self.channel, \ 

183 self.tmin, self.tmax, self.deltat, self.mtime, \ 

184 self.ydata, self.meta, self.extra = state 

185 

186 elif len(state) == 12: 

187 # backward compatibility 0 

188 self.network, self.station, self.location, self.channel, \ 

189 self.tmin, self.tmax, self.deltat, self.mtime, \ 

190 self.ydata, self.meta, _, self.extra = state 

191 

192 elif len(state) == 10: 

193 # backward compatibility 1 

194 self.network, self.station, self.location, self.channel, \ 

195 self.tmin, self.tmax, self.deltat, self.mtime, \ 

196 self.ydata, self.meta = state 

197 

198 self.extra = '' 

199 

200 else: 

201 # backward compatibility 2 

202 self.network, self.station, self.location, self.channel, \ 

203 self.tmin, self.tmax, self.deltat, self.mtime = state 

204 self.ydata = None 

205 self.meta = None 

206 self.extra = '' 

207 

208 self._growbuffer = None 

209 self._update_ids() 

210 

211 def hash(self, unsafe=False): 

212 sha1 = hashlib.sha1() 

213 for code in self.nslc_id: 

214 sha1.update(code.encode()) 

215 sha1.update(self.tmin.hex().encode()) 

216 sha1.update(self.tmax.hex().encode()) 

217 sha1.update(self.deltat.hex().encode()) 

218 

219 if self.ydata is not None: 

220 if not unsafe: 

221 sha1.update(memoryview(self.ydata)) 

222 else: 

223 sha1.update(memoryview(self.ydata[:32])) 

224 sha1.update(memoryview(self.ydata[-32:])) 

225 

226 return sha1.hexdigest() 

227 

228 def name(self): 

229 ''' 

230 Get a short string description. 

231 ''' 

232 

233 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

234 util.time_to_str(self.tmin), 

235 util.time_to_str(self.tmax))) 

236 

237 return s 

238 

239 def __eq__(self, other): 

240 return ( 

241 isinstance(other, Trace) 

242 and self.network == other.network 

243 and self.station == other.station 

244 and self.location == other.location 

245 and self.channel == other.channel 

246 and (abs(self.deltat - other.deltat) 

247 < (self.deltat + other.deltat)*1e-6) 

248 and abs(self.tmin-other.tmin) < self.deltat*0.01 

249 and abs(self.tmax-other.tmax) < self.deltat*0.01 

250 and num.all(self.ydata == other.ydata)) 

251 

252 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

253 return ( 

254 self.network == other.network 

255 and self.station == other.station 

256 and self.location == other.location 

257 and self.channel == other.channel 

258 and (abs(self.deltat - other.deltat) 

259 < (self.deltat + other.deltat)*1e-6) 

260 and abs(self.tmin-other.tmin) < self.deltat*0.01 

261 and abs(self.tmax-other.tmax) < self.deltat*0.01 

262 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

263 

264 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

265 

266 assert self.network == other.network, \ 

267 'network codes differ: %s, %s' % (self.network, other.network) 

268 assert self.station == other.station, \ 

269 'station codes differ: %s, %s' % (self.station, other.station) 

270 assert self.location == other.location, \ 

271 'location codes differ: %s, %s' % (self.location, other.location) 

272 assert self.channel == other.channel, 'channel codes differ' 

273 assert (abs(self.deltat - other.deltat) 

274 < (self.deltat + other.deltat)*1e-6), \ 

275 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

276 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

277 'start times differ: %s, %s' % ( 

278 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

279 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

280 'end times differ: %s, %s' % ( 

281 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

282 

283 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

284 'trace values differ' 

285 

286 def __hash__(self): 

287 return id(self) 

288 

289 def __call__(self, t, clip=False, snap=round): 

290 it = int(snap((t-self.tmin)/self.deltat)) 

291 if clip: 

292 it = max(0, min(it, self.ydata.size-1)) 

293 else: 

294 if it < 0 or self.ydata.size <= it: 

295 raise IndexError() 

296 

297 return self.tmin+it*self.deltat, self.ydata[it] 

298 

299 def interpolate(self, t, clip=False): 

300 ''' 

301 Value of trace between supporting points through linear interpolation. 

302 

303 :param t: time instant 

304 :param clip: whether to clip indices to trace ends 

305 ''' 

306 

307 t0, y0 = self(t, clip=clip, snap=math.floor) 

308 t1, y1 = self(t, clip=clip, snap=math.ceil) 

309 if t0 == t1: 

310 return y0 

311 else: 

312 return y0+(t-t0)/(t1-t0)*(y1-y0) 

313 

314 def index_clip(self, i): 

315 ''' 

316 Clip index to valid range. 

317 ''' 

318 

319 return min(max(0, i), self.ydata.size) 

320 

321 def add(self, other, interpolate=True, left=0., right=0.): 

322 ''' 

323 Add values of other trace (self += other). 

324 

325 Add values of ``other`` trace to the values of ``self``, where it 

326 intersects with ``other``. This method does not change the extent of 

327 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

328 ``other`` to be added are interpolated at sampling instants of 

329 ``self``. Linear interpolation is performed. In this case the sampling 

330 rate of ``other`` must be equal to or lower than that of ``self``. If 

331 ``interpolate`` is ``False``, the sampling rates of the two traces must 

332 match. 

333 ''' 

334 

335 if interpolate: 

336 assert self.deltat <= other.deltat \ 

337 or same_sampling_rate(self, other) 

338 

339 other_xdata = other.get_xdata() 

340 xdata = self.get_xdata() 

341 self.ydata += num.interp( 

342 xdata, other_xdata, other.ydata, left=left, right=left) 

343 else: 

344 assert self.deltat == other.deltat 

345 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

346 ibeg = max(0, ioff) 

347 iend = min(self.data_len(), ioff+other.data_len()) 

348 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

349 

350 def mult(self, other, interpolate=True): 

351 ''' 

352 Muliply with values of other trace ``(self *= other)``. 

353 

354 Multiply values of ``other`` trace to the values of ``self``, where it 

355 intersects with ``other``. This method does not change the extent of 

356 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

357 ``other`` to be multiplied are interpolated at sampling instants of 

358 ``self``. Linear interpolation is performed. In this case the sampling 

359 rate of ``other`` must be equal to or lower than that of ``self``. If 

360 ``interpolate`` is ``False``, the sampling rates of the two traces must 

361 match. 

362 ''' 

363 

364 if interpolate: 

365 assert self.deltat <= other.deltat or \ 

366 same_sampling_rate(self, other) 

367 

368 other_xdata = other.get_xdata() 

369 xdata = self.get_xdata() 

370 self.ydata *= num.interp( 

371 xdata, other_xdata, other.ydata, left=0., right=0.) 

372 else: 

373 assert self.deltat == other.deltat 

374 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

375 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

376 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

377 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

378 

379 ibeg1 = self.index_clip(ibeg1) 

380 iend1 = self.index_clip(iend1) 

381 ibeg2 = self.index_clip(ibeg2) 

382 iend2 = self.index_clip(iend2) 

383 

384 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

385 

386 def max(self): 

387 ''' 

388 Get time and value of data maximum. 

389 ''' 

390 

391 i = num.argmax(self.ydata) 

392 return self.tmin + i*self.deltat, self.ydata[i] 

393 

394 def min(self): 

395 ''' 

396 Get time and value of data minimum. 

397 ''' 

398 

399 i = num.argmin(self.ydata) 

400 return self.tmin + i*self.deltat, self.ydata[i] 

401 

402 def absmax(self): 

403 ''' 

404 Get time and value of maximum of the absolute of data. 

405 ''' 

406 

407 tmi, mi = self.min() 

408 tma, ma = self.max() 

409 if abs(mi) > abs(ma): 

410 return tmi, abs(mi) 

411 else: 

412 return tma, abs(ma) 

413 

414 def set_codes( 

415 self, network=None, station=None, location=None, channel=None, 

416 extra=None): 

417 

418 ''' 

419 Set network, station, location, and channel codes. 

420 ''' 

421 

422 if network is not None: 

423 self.network = network 

424 if station is not None: 

425 self.station = station 

426 if location is not None: 

427 self.location = location 

428 if channel is not None: 

429 self.channel = channel 

430 if extra is not None: 

431 self.extra = extra 

432 

433 self._update_ids() 

434 

435 def set_network(self, network): 

436 self.network = network 

437 self._update_ids() 

438 

439 def set_station(self, station): 

440 self.station = station 

441 self._update_ids() 

442 

443 def set_location(self, location): 

444 self.location = location 

445 self._update_ids() 

446 

447 def set_channel(self, channel): 

448 self.channel = channel 

449 self._update_ids() 

450 

451 def overlaps(self, tmin, tmax): 

452 ''' 

453 Check if trace has overlap with a given time span. 

454 ''' 

455 return not (tmax < self.tmin or self.tmax < tmin) 

456 

457 def is_relevant(self, tmin, tmax, selector=None): 

458 ''' 

459 Check if trace has overlap with a given time span and matches a 

460 condition callback. (internal use) 

461 ''' 

462 

463 return not (tmax <= self.tmin or self.tmax < tmin) \ 

464 and (selector is None or selector(self)) 

465 

466 def _update_ids(self): 

467 ''' 

468 Update dependent ids. 

469 ''' 

470 

471 self.full_id = ( 

472 self.network, self.station, self.location, self.channel, self.tmin) 

473 self.nslc_id = reuse( 

474 (self.network, self.station, self.location, self.channel)) 

475 

476 def prune_from_reuse_cache(self): 

477 util.deuse(self.nslc_id) 

478 util.deuse(self.network) 

479 util.deuse(self.station) 

480 util.deuse(self.location) 

481 util.deuse(self.channel) 

482 

483 def set_mtime(self, mtime): 

484 ''' 

485 Set modification time of the trace. 

486 ''' 

487 

488 self.mtime = mtime 

489 

490 def get_xdata(self): 

491 ''' 

492 Create array for time axis. 

493 ''' 

494 

495 if self.ydata is None: 

496 raise NoData() 

497 

498 return self.tmin \ 

499 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

500 

501 def get_ydata(self): 

502 ''' 

503 Get data array. 

504 ''' 

505 

506 if self.ydata is None: 

507 raise NoData() 

508 

509 return self.ydata 

510 

511 def set_ydata(self, new_ydata): 

512 ''' 

513 Replace data array. 

514 ''' 

515 

516 self.drop_growbuffer() 

517 self.ydata = new_ydata 

518 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

519 

520 def data_len(self): 

521 if self.ydata is not None: 

522 return self.ydata.size 

523 else: 

524 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

525 

526 def drop_data(self): 

527 ''' 

528 Forget data, make dataless trace. 

529 ''' 

530 

531 self.drop_growbuffer() 

532 self.ydata = None 

533 

534 def drop_growbuffer(self): 

535 ''' 

536 Detach the traces grow buffer. 

537 ''' 

538 

539 self._growbuffer = None 

540 self._pchain = None 

541 

542 def copy(self, data=True): 

543 ''' 

544 Make a deep copy of the trace. 

545 ''' 

546 

547 tracecopy = copy.copy(self) 

548 tracecopy.drop_growbuffer() 

549 if data: 

550 tracecopy.ydata = self.ydata.copy() 

551 tracecopy.meta = copy.deepcopy(self.meta) 

552 return tracecopy 

553 

554 def crop_zeros(self): 

555 ''' 

556 Remove any zeros at beginning and end. 

557 ''' 

558 

559 indices = num.where(self.ydata != 0.0)[0] 

560 if indices.size == 0: 

561 raise NoData() 

562 

563 ibeg = indices[0] 

564 iend = indices[-1]+1 

565 if ibeg == 0 and iend == self.ydata.size-1: 

566 return 

567 

568 self.drop_growbuffer() 

569 self.ydata = self.ydata[ibeg:iend].copy() 

570 self.tmin = self.tmin+ibeg*self.deltat 

571 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

572 self._update_ids() 

573 

574 def append(self, data): 

575 ''' 

576 Append data to the end of the trace. 

577 

578 To make this method efficient when successively very few or even single 

579 samples are appended, a larger grow buffer is allocated upon first 

580 invocation. The traces data is then changed to be a view into the 

581 currently filled portion of the grow buffer array. 

582 ''' 

583 

584 assert self.ydata.dtype == data.dtype 

585 newlen = data.size + self.ydata.size 

586 if self._growbuffer is None or self._growbuffer.size < newlen: 

587 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

588 self._growbuffer[:self.ydata.size] = self.ydata 

589 self._growbuffer[self.ydata.size:newlen] = data 

590 self.ydata = self._growbuffer[:newlen] 

591 self.tmax = self.tmin + (newlen-1)*self.deltat 

592 

593 def chop( 

594 self, tmin, tmax, inplace=True, include_last=False, 

595 snap=(round, round), want_incomplete=True): 

596 

597 ''' 

598 Cut the trace to given time span. 

599 

600 If the ``inplace`` argument is True (the default) the trace is cut in 

601 place, otherwise a new trace with the cut part is returned. By 

602 default, the indices where to start and end the trace data array are 

603 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

604 using Python's :py:func:`round` function. This behaviour can be changed 

605 with the ``snap`` argument, which takes a tuple of two functions (one 

606 for the lower and one for the upper end) to be used instead of 

607 :py:func:`round`. The last sample is by default not included unless 

608 ``include_last`` is set to True. If the given time span exceeds the 

609 available time span of the trace, the available part is returned, 

610 unless ``want_incomplete`` is set to False - in that case, a 

611 :py:exc:`NoData` exception is raised. This exception is always raised, 

612 when the requested time span does dot overlap with the trace's time 

613 span. 

614 ''' 

615 

616 if want_incomplete: 

617 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

618 raise NoData() 

619 else: 

620 if tmin < self.tmin or self.tmax < tmax: 

621 raise NoData() 

622 

623 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

624 iplus = 0 

625 if include_last: 

626 iplus = 1 

627 

628 iend = min( 

629 self.data_len(), 

630 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

631 

632 if ibeg >= iend: 

633 raise NoData() 

634 

635 obj = self 

636 if not inplace: 

637 obj = self.copy(data=False) 

638 

639 self.drop_growbuffer() 

640 if self.ydata is not None: 

641 obj.ydata = self.ydata[ibeg:iend].copy() 

642 else: 

643 obj.ydata = None 

644 

645 obj.tmin = obj.tmin+ibeg*obj.deltat 

646 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

647 

648 obj._update_ids() 

649 

650 return obj 

651 

652 def downsample(self, ndecimate, snap=False, initials=None, demean=False, 

653 ftype='fir-remez'): 

654 ''' 

655 Downsample trace by a given integer factor. 

656 

657 Antialiasing filter details: 

658 

659 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum 

660 ripple of 0.05 dB in the passband. 

661 * ``fir``: A FIR filter using a Hamming window and 31 taps and a 

662 well-behaved phase response. 

663 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm 

664 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency. 

665 

666 Comparison of the digital filters: 

667 

668 .. figure :: ../../static/downsampling-filter-comparison.png 

669 :width: 60% 

670 :alt: Comparison of the downsampling filters. 

671 

672 :param ndecimate: decimation factor, avoid values larger than 8 

673 :param snap: whether to put the new sampling instances closest to 

674 multiples of the sampling rate. 

675 :param initials: ``None``, ``True``, or initial conditions for the 

676 anti-aliasing filter, obtained from a previous run. In the latter 

677 two cases the final state of the filter is returned instead of 

678 ``None``. 

679 :param demean: whether to demean the signal before filtering. 

680 :param ftype: which FIR filter to use, choose from 

681 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

682 ''' 

683 newdeltat = self.deltat*ndecimate 

684 if snap: 

685 ilag = int(round( 

686 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

687 / self.deltat)) 

688 else: 

689 ilag = 0 

690 

691 if snap and ilag > 0 and ilag < self.ydata.size: 

692 data = self.ydata.astype(num.float64) 

693 self.tmin += ilag*self.deltat 

694 else: 

695 data = self.ydata.astype(num.float64) 

696 

697 if demean: 

698 data -= num.mean(data) 

699 

700 if data.size != 0: 

701 result = util.decimate( 

702 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag) 

703 else: 

704 result = data 

705 

706 if initials is None: 

707 self.ydata, finals = result, None 

708 else: 

709 self.ydata, finals = result 

710 

711 self.deltat = reuse(self.deltat*ndecimate) 

712 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

713 self._update_ids() 

714 

715 return finals 

716 

717 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

718 initials=None, demean=False, ftype='fir-remez'): 

719 

720 ''' 

721 Downsample to given sampling rate. 

722 

723 Tries to downsample the trace to a target sampling interval of 

724 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

725 times. If ``allow_upsample_max`` is set to a value larger than 1, 

726 intermediate upsampling steps are allowed, in order to increase the 

727 number of possible downsampling ratios. 

728 

729 If the requested ratio is not supported, an exception of type 

730 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

731 

732 :param deltat: desired sampling interval in [s] 

733 :param allow_upsample_max: maximum upsampling of the signal to achieve 

734 the desired deltat. Default is ``1``. 

735 :param snap: whether to put the new sampling instances closest to 

736 multiples of the sampling rate. 

737 :param initials: ``None``, ``True``, or initial conditions for the 

738 anti-aliasing filter, obtained from a previous run. In the latter 

739 two cases the final state of the filter is returned instead of 

740 ``None``. 

741 :param demean: whether to demean the signal before filtering. 

742 :param ftype: which FIR filter to use, choose from 

743 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

744 See also: :meth:`Trace.downample` 

745 ''' 

746 

747 ratio = deltat/self.deltat 

748 rratio = round(ratio) 

749 

750 ok = False 

751 for upsratio in range(1, allow_upsample_max+1): 

752 dratio = (upsratio/self.deltat) / (1./deltat) 

753 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

754 util.decitab(int(round(dratio))): 

755 

756 ok = True 

757 break 

758 

759 if not ok: 

760 raise util.UnavailableDecimation('ratio = %g' % ratio) 

761 

762 if upsratio > 1: 

763 self.drop_growbuffer() 

764 ydata = self.ydata 

765 self.ydata = num.zeros( 

766 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

767 self.ydata[::upsratio] = ydata 

768 for i in range(1, upsratio): 

769 self.ydata[i::upsratio] = \ 

770 float(i)/upsratio * ydata[:-1] \ 

771 + float(upsratio-i)/upsratio * ydata[1:] 

772 self.deltat = self.deltat/upsratio 

773 

774 ratio = deltat/self.deltat 

775 rratio = round(ratio) 

776 

777 deci_seq = util.decitab(int(rratio)) 

778 finals = [] 

779 for i, ndecimate in enumerate(deci_seq): 

780 if ndecimate != 1: 

781 xinitials = None 

782 if initials is not None: 

783 xinitials = initials[i] 

784 finals.append(self.downsample( 

785 ndecimate, snap=snap, initials=xinitials, demean=demean, 

786 ftype=ftype)) 

787 

788 if initials is not None: 

789 return finals 

790 

791 def resample(self, deltat): 

792 ''' 

793 Resample to given sampling rate ``deltat``. 

794 

795 Resampling is performed in the frequency domain. 

796 ''' 

797 

798 ndata = self.ydata.size 

799 ntrans = nextpow2(ndata) 

800 fntrans2 = ntrans * self.deltat/deltat 

801 ntrans2 = int(round(fntrans2)) 

802 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

803 ndata2 = int(round(ndata*self.deltat/deltat2)) 

804 if abs(fntrans2 - ntrans2) > 1e-7: 

805 logger.warning( 

806 'resample: requested deltat %g could not be matched exactly: ' 

807 '%g' % (deltat, deltat2)) 

808 

809 data = self.ydata 

810 data_pad = num.zeros(ntrans, dtype=float) 

811 data_pad[:ndata] = data 

812 fdata = num.fft.rfft(data_pad) 

813 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

814 n = min(fdata.size, fdata2.size) 

815 fdata2[:n] = fdata[:n] 

816 data2 = num.fft.irfft(fdata2) 

817 data2 = data2[:ndata2] 

818 data2 *= float(ntrans2) / float(ntrans) 

819 self.deltat = deltat2 

820 self.set_ydata(data2) 

821 

822 def resample_simple(self, deltat): 

823 tyear = 3600*24*365. 

824 

825 if deltat == self.deltat: 

826 return 

827 

828 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

829 logger.warning( 

830 'resample_simple: less than one sample would have to be ' 

831 'inserted/deleted per year. Doing nothing.') 

832 return 

833 

834 ninterval = int(round(deltat / (self.deltat - deltat))) 

835 if abs(ninterval) < 20: 

836 logger.error( 

837 'resample_simple: sample insertion/deletion interval less ' 

838 'than 20. results would be erroneous.') 

839 raise ResamplingFailed() 

840 

841 delete = False 

842 if ninterval < 0: 

843 ninterval = - ninterval 

844 delete = True 

845 

846 tyearbegin = util.year_start(self.tmin) 

847 

848 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

849 

850 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

851 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

852 if nindices > 0: 

853 indices = ibegin + num.arange(nindices) * ninterval 

854 data_split = num.split(self.ydata, indices) 

855 data = [] 

856 for ln, h in zip(data_split[:-1], data_split[1:]): 

857 if delete: 

858 ln = ln[:-1] 

859 

860 data.append(ln) 

861 if not delete: 

862 if ln.size == 0: 

863 v = h[0] 

864 else: 

865 v = 0.5*(ln[-1] + h[0]) 

866 data.append(num.array([v], dtype=ln.dtype)) 

867 

868 data.append(data_split[-1]) 

869 

870 ydata_new = num.concatenate(data) 

871 

872 self.tmin = tyearbegin + nmin * deltat 

873 self.deltat = deltat 

874 self.set_ydata(ydata_new) 

875 

876 def stretch(self, tmin_new, tmax_new): 

877 ''' 

878 Stretch signal while preserving sample rate using sinc interpolation. 

879 

880 :param tmin_new: new time of first sample 

881 :param tmax_new: new time of last sample 

882 

883 This method can be used to correct for a small linear time drift or to 

884 introduce sub-sample time shifts. The amount of stretching is limited 

885 to 10% by the implementation and is expected to be much smaller than 

886 that by the approximations used. 

887 ''' 

888 

889 from pyrocko import signal_ext 

890 

891 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

892 t_control = num.array([tmin_new, tmax_new], dtype=float) 

893 

894 r = (tmax_new - tmin_new) / self.deltat + 1.0 

895 n_new = int(round(r)) 

896 if abs(n_new - r) > 0.001: 

897 n_new = int(math.floor(r)) 

898 

899 assert n_new >= 2 

900 

901 tmax_new = tmin_new + (n_new-1) * self.deltat 

902 

903 ydata_new = num.empty(n_new, dtype=float) 

904 signal_ext.antidrift(i_control, t_control, 

905 self.ydata.astype(float), 

906 tmin_new, self.deltat, ydata_new) 

907 

908 self.tmin = tmin_new 

909 self.set_ydata(ydata_new) 

910 self._update_ids() 

911 

912 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

913 raise_exception=False): 

914 

915 ''' 

916 Check if a given frequency is above the Nyquist frequency of the trace. 

917 

918 :param intro: string used to introduce the warning/error message 

919 :param warn: whether to emit a warning 

920 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

921 exception. 

922 ''' 

923 

924 if frequency >= 0.5/self.deltat: 

925 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

926 'frequency (%g Hz). (Trace %s)' \ 

927 % (intro, frequency, 0.5/self.deltat, self.name()) 

928 if warn: 

929 logger.warning(message) 

930 if raise_exception: 

931 raise AboveNyquist(message) 

932 

933 def lowpass(self, order, corner, nyquist_warn=True, 

934 nyquist_exception=False, demean=True): 

935 

936 ''' 

937 Apply Butterworth lowpass to the trace. 

938 

939 :param order: order of the filter 

940 :param corner: corner frequency of the filter 

941 

942 Mean is removed before filtering. 

943 ''' 

944 

945 self.nyquist_check( 

946 corner, 'Corner frequency of lowpass', nyquist_warn, 

947 nyquist_exception) 

948 

949 (b, a) = _get_cached_filter_coefs( 

950 order, [corner*2.0*self.deltat], btype='low') 

951 

952 if len(a) != order+1 or len(b) != order+1: 

953 logger.warning( 

954 'Erroneous filter coefficients returned by ' 

955 'scipy.signal.butter(). You may need to downsample the ' 

956 'signal before filtering.') 

957 

958 data = self.ydata.astype(num.float64) 

959 if demean: 

960 data -= num.mean(data) 

961 self.drop_growbuffer() 

962 self.ydata = signal.lfilter(b, a, data) 

963 

964 def highpass(self, order, corner, nyquist_warn=True, 

965 nyquist_exception=False, demean=True): 

966 

967 ''' 

968 Apply butterworth highpass to the trace. 

969 

970 :param order: order of the filter 

971 :param corner: corner frequency of the filter 

972 

973 Mean is removed before filtering. 

974 ''' 

975 

976 self.nyquist_check( 

977 corner, 'Corner frequency of highpass', nyquist_warn, 

978 nyquist_exception) 

979 

980 (b, a) = _get_cached_filter_coefs( 

981 order, [corner*2.0*self.deltat], btype='high') 

982 

983 data = self.ydata.astype(num.float64) 

984 if len(a) != order+1 or len(b) != order+1: 

985 logger.warning( 

986 'Erroneous filter coefficients returned by ' 

987 'scipy.signal.butter(). You may need to downsample the ' 

988 'signal before filtering.') 

989 if demean: 

990 data -= num.mean(data) 

991 self.drop_growbuffer() 

992 self.ydata = signal.lfilter(b, a, data) 

993 

994 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

995 ''' 

996 Apply butterworth bandpass to the trace. 

997 

998 :param order: order of the filter 

999 :param corner_hp: lower corner frequency of the filter 

1000 :param corner_lp: upper corner frequency of the filter 

1001 

1002 Mean is removed before filtering. 

1003 ''' 

1004 

1005 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

1006 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

1007 (b, a) = _get_cached_filter_coefs( 

1008 order, 

1009 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1010 btype='band') 

1011 data = self.ydata.astype(num.float64) 

1012 if demean: 

1013 data -= num.mean(data) 

1014 self.drop_growbuffer() 

1015 self.ydata = signal.lfilter(b, a, data) 

1016 

1017 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1018 ''' 

1019 Apply bandstop (attenuates frequencies in band) to the trace. 

1020 

1021 :param order: order of the filter 

1022 :param corner_hp: lower corner frequency of the filter 

1023 :param corner_lp: upper corner frequency of the filter 

1024 

1025 Mean is removed before filtering. 

1026 ''' 

1027 

1028 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1029 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1030 (b, a) = _get_cached_filter_coefs( 

1031 order, 

1032 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1033 btype='bandstop') 

1034 data = self.ydata.astype(num.float64) 

1035 if demean: 

1036 data -= num.mean(data) 

1037 self.drop_growbuffer() 

1038 self.ydata = signal.lfilter(b, a, data) 

1039 

1040 def envelope(self, inplace=True): 

1041 ''' 

1042 Calculate the envelope of the trace. 

1043 

1044 :param inplace: calculate envelope in place 

1045 

1046 The calculation follows: 

1047 

1048 .. math:: 

1049 

1050 Y' = \\sqrt{Y^2+H(Y)^2} 

1051 

1052 where H is the Hilbert-Transform of the signal Y. 

1053 ''' 

1054 

1055 y = self.ydata.astype(float) 

1056 env = num.abs(hilbert(y)) 

1057 if inplace: 

1058 self.drop_growbuffer() 

1059 self.ydata = env 

1060 else: 

1061 tr = self.copy(data=False) 

1062 tr.ydata = env 

1063 return tr 

1064 

1065 def taper(self, taperer, inplace=True, chop=False): 

1066 ''' 

1067 Apply a :py:class:`Taper` to the trace. 

1068 

1069 :param taperer: instance of :py:class:`Taper` subclass 

1070 :param inplace: apply taper inplace 

1071 :param chop: if ``True``: exclude tapered parts from the resulting 

1072 trace 

1073 ''' 

1074 

1075 if not inplace: 

1076 tr = self.copy() 

1077 else: 

1078 tr = self 

1079 

1080 if chop: 

1081 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1082 tr.shift(i*tr.deltat) 

1083 tr.set_ydata(tr.ydata[i:i+n]) 

1084 

1085 taperer(tr.ydata, tr.tmin, tr.deltat) 

1086 

1087 if not inplace: 

1088 return tr 

1089 

1090 def whiten(self, order=6): 

1091 ''' 

1092 Whiten signal in time domain using autoregression and recursive filter. 

1093 

1094 :param order: order of the autoregression process 

1095 ''' 

1096 

1097 b, a = self.whitening_coefficients(order) 

1098 self.drop_growbuffer() 

1099 self.ydata = signal.lfilter(b, a, self.ydata) 

1100 

1101 def whitening_coefficients(self, order=6): 

1102 ar = yulewalker(self.ydata, order) 

1103 b, a = [1.] + ar.tolist(), [1.] 

1104 return b, a 

1105 

1106 def ampspec_whiten( 

1107 self, 

1108 width, 

1109 td_taper='auto', 

1110 fd_taper='auto', 

1111 pad_to_pow2=True, 

1112 demean=True): 

1113 

1114 ''' 

1115 Whiten signal via frequency domain using moving average on amplitude 

1116 spectra. 

1117 

1118 :param width: width of smoothing kernel [Hz] 

1119 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1120 ``None`` or ``'auto'``. 

1121 :param fd_taper: frequency domain taper, object of type 

1122 :py:class:`Taper` or ``None`` or ``'auto'``. 

1123 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1124 of 2^n 

1125 :param demean: whether to demean the signal before tapering 

1126 

1127 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1128 the spectrum is calculated and inversely weighted with a smoothed 

1129 version of its amplitude spectrum. A moving average is used for the 

1130 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1131 Finally, the smoothed and tapered spectrum is back-transformed into the 

1132 time domain. 

1133 

1134 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1135 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1136 ''' 

1137 

1138 ndata = self.data_len() 

1139 

1140 if pad_to_pow2: 

1141 ntrans = nextpow2(ndata) 

1142 else: 

1143 ntrans = ndata 

1144 

1145 df = 1./(ntrans*self.deltat) 

1146 nw = int(round(width/df)) 

1147 if ndata//2+1 <= nw: 

1148 raise TraceTooShort( 

1149 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1150 

1151 if td_taper == 'auto': 

1152 td_taper = CosFader(1./width) 

1153 

1154 if fd_taper == 'auto': 

1155 fd_taper = CosFader(width) 

1156 

1157 if td_taper: 

1158 self.taper(td_taper) 

1159 

1160 ydata = self.get_ydata().astype(float) 

1161 if demean: 

1162 ydata -= ydata.mean() 

1163 

1164 spec = num.fft.rfft(ydata, ntrans) 

1165 

1166 amp = num.abs(spec) 

1167 nspec = amp.size 

1168 csamp = num.cumsum(amp) 

1169 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1170 n1, n2 = nw//2, nw//2 + nspec - nw 

1171 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1172 amp_smoothed[:n1] = amp_smoothed[n1] 

1173 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1174 

1175 denom = amp_smoothed * amp 

1176 numer = amp 

1177 eps = num.mean(denom) * 1e-9 

1178 if eps == 0.0: 

1179 eps = 1e-9 

1180 

1181 numer += eps 

1182 denom += eps 

1183 spec *= numer/denom 

1184 

1185 if fd_taper: 

1186 fd_taper(spec, 0., df) 

1187 

1188 ydata = num.fft.irfft(spec) 

1189 self.set_ydata(ydata[:ndata]) 

1190 

1191 def _get_cached_freqs(self, nf, deltaf): 

1192 ck = (nf, deltaf) 

1193 if ck not in Trace.cached_frequencies: 

1194 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1195 nf, dtype=float) 

1196 

1197 return Trace.cached_frequencies[ck] 

1198 

1199 def bandpass_fft(self, corner_hp, corner_lp): 

1200 ''' 

1201 Apply boxcar bandbpass to trace (in spectral domain). 

1202 ''' 

1203 

1204 n = len(self.ydata) 

1205 n2 = nextpow2(n) 

1206 data = num.zeros(n2, dtype=num.float64) 

1207 data[:n] = self.ydata 

1208 fdata = num.fft.rfft(data) 

1209 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1210 fdata[0] = 0.0 

1211 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1212 data = num.fft.irfft(fdata) 

1213 self.drop_growbuffer() 

1214 self.ydata = data[:n] 

1215 

1216 def shift(self, tshift): 

1217 ''' 

1218 Time shift the trace. 

1219 ''' 

1220 

1221 self.tmin += tshift 

1222 self.tmax += tshift 

1223 self._update_ids() 

1224 

1225 def snap(self, inplace=True, interpolate=False): 

1226 ''' 

1227 Shift trace samples to nearest even multiples of the sampling rate. 

1228 

1229 :param inplace: (boolean) snap traces inplace 

1230 

1231 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1232 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1233 :py:func:`snap` returns the unsnapped instance of the original trace. 

1234 ''' 

1235 

1236 tmin = round(self.tmin/self.deltat)*self.deltat 

1237 tmax = tmin + (self.ydata.size-1)*self.deltat 

1238 

1239 if inplace: 

1240 xself = self 

1241 else: 

1242 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1243 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1244 return self 

1245 

1246 xself = self.copy() 

1247 

1248 if interpolate: 

1249 from pyrocko import signal_ext 

1250 n = xself.data_len() 

1251 ydata_new = num.empty(n, dtype=float) 

1252 i_control = num.array([0, n-1], dtype=num.int64) 

1253 tref = tmin 

1254 t_control = num.array( 

1255 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1256 dtype=float) 

1257 

1258 signal_ext.antidrift(i_control, t_control, 

1259 xself.ydata.astype(float), 

1260 float(tmin-tref), xself.deltat, ydata_new) 

1261 

1262 xself.ydata = ydata_new 

1263 

1264 xself.tmin = tmin 

1265 xself.tmax = tmax 

1266 xself._update_ids() 

1267 

1268 return xself 

1269 

1270 def fix_deltat_rounding_errors(self): 

1271 ''' 

1272 Try to undo sampling rate rounding errors. 

1273 

1274 See :py:func:`fix_deltat_rounding_errors`. 

1275 ''' 

1276 

1277 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1278 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1279 

1280 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1281 ''' 

1282 Run special STA/LTA filter where the short time window is centered on 

1283 the long time window. 

1284 

1285 :param tshort: length of short time window in [s] 

1286 :param tlong: length of long time window in [s] 

1287 :param quad: whether to square the data prior to applying the STA/LTA 

1288 filter 

1289 :param scalingmethod: integer key to select how output values are 

1290 scaled / normalized (``1``, ``2``, or ``3``) 

1291 

1292 ============= ====================================== =========== 

1293 Scalingmethod Implementation Range 

1294 ============= ====================================== =========== 

1295 ``1`` As/Al* Ts/Tl [0,1] 

1296 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1297 ``3`` Like ``2`` but clipping range at zero [0,1] 

1298 ============= ====================================== =========== 

1299 

1300 ''' 

1301 

1302 nshort = int(round(tshort/self.deltat)) 

1303 nlong = int(round(tlong/self.deltat)) 

1304 

1305 assert nshort < nlong 

1306 if nlong > len(self.ydata): 

1307 raise TraceTooShort( 

1308 'Samples in trace: %s, samples needed: %s' 

1309 % (len(self.ydata), nlong)) 

1310 

1311 if quad: 

1312 sqrdata = self.ydata**2 

1313 else: 

1314 sqrdata = self.ydata 

1315 

1316 mavg_short = moving_avg(sqrdata, nshort) 

1317 mavg_long = moving_avg(sqrdata, nlong) 

1318 

1319 self.drop_growbuffer() 

1320 

1321 if scalingmethod not in (1, 2, 3): 

1322 raise Exception('Invalid argument to scalingrange argument.') 

1323 

1324 if scalingmethod == 1: 

1325 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1326 elif scalingmethod in (2, 3): 

1327 self.ydata = (mavg_short/mavg_long - 1.) \ 

1328 / ((float(nlong)/float(nshort)) - 1) 

1329 

1330 if scalingmethod == 3: 

1331 self.ydata = num.maximum(self.ydata, 0.) 

1332 

1333 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1334 ''' 

1335 Run special STA/LTA filter where the short time window is overlapping 

1336 with the last part of the long time window. 

1337 

1338 :param tshort: length of short time window in [s] 

1339 :param tlong: length of long time window in [s] 

1340 :param quad: whether to square the data prior to applying the STA/LTA 

1341 filter 

1342 :param scalingmethod: integer key to select how output values are 

1343 scaled / normalized (``1``, ``2``, or ``3``) 

1344 

1345 ============= ====================================== =========== 

1346 Scalingmethod Implementation Range 

1347 ============= ====================================== =========== 

1348 ``1`` As/Al* Ts/Tl [0,1] 

1349 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1350 ``3`` Like ``2`` but clipping range at zero [0,1] 

1351 ============= ====================================== =========== 

1352 

1353 With ``scalingmethod=1``, the values produced by this variant of the 

1354 STA/LTA are equivalent to 

1355 

1356 .. math:: 

1357 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1358 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1359 

1360 where :math:`f_j` are the input samples, :math:`s` are the number of 

1361 samples in the short time window and :math:`l` are the number of 

1362 samples in the long time window. 

1363 ''' 

1364 

1365 n = self.data_len() 

1366 tmin = self.tmin 

1367 

1368 nshort = max(1, int(round(tshort/self.deltat))) 

1369 nlong = max(1, int(round(tlong/self.deltat))) 

1370 

1371 assert nshort < nlong 

1372 

1373 if nlong > len(self.ydata): 

1374 raise TraceTooShort( 

1375 'Samples in trace: %s, samples needed: %s' 

1376 % (len(self.ydata), nlong)) 

1377 

1378 if quad: 

1379 sqrdata = self.ydata**2 

1380 else: 

1381 sqrdata = self.ydata 

1382 

1383 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1384 if nlong % 2 != 0 and nshort % 2 == 0: 

1385 nshift += 1 

1386 

1387 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1388 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1389 

1390 self.drop_growbuffer() 

1391 

1392 if scalingmethod not in (1, 2, 3): 

1393 raise Exception('Invalid argument to scalingrange argument.') 

1394 

1395 if scalingmethod == 1: 

1396 ydata = mavg_short/mavg_long * nshort/nlong 

1397 elif scalingmethod in (2, 3): 

1398 ydata = (mavg_short/mavg_long - 1.) \ 

1399 / ((float(nlong)/float(nshort)) - 1) 

1400 

1401 if scalingmethod == 3: 

1402 ydata = num.maximum(self.ydata, 0.) 

1403 

1404 self.set_ydata(ydata) 

1405 

1406 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1407 

1408 self.chop( 

1409 tmin + (nlong - nshort) * self.deltat, 

1410 tmin + (n - nshort) * self.deltat) 

1411 

1412 def peaks(self, threshold, tsearch, 

1413 deadtime=False, 

1414 nblock_duration_detection=100): 

1415 

1416 ''' 

1417 Detect peaks above a given threshold (method 1). 

1418 

1419 From every instant, where the signal rises above ``threshold``, a time 

1420 length of ``tsearch`` seconds is searched for a maximum. A list with 

1421 tuples (time, value) for each detected peak is returned. The 

1422 ``deadtime`` argument turns on a special deadtime duration detection 

1423 algorithm useful in combination with recursive STA/LTA filters. 

1424 ''' 

1425 

1426 y = self.ydata 

1427 above = num.where(y > threshold, 1, 0) 

1428 deriv = num.zeros(y.size, dtype=num.int8) 

1429 deriv[1:] = above[1:]-above[:-1] 

1430 itrig_positions = num.nonzero(deriv > 0)[0] 

1431 tpeaks = [] 

1432 apeaks = [] 

1433 tzeros = [] 

1434 tzero = self.tmin 

1435 

1436 for itrig_pos in itrig_positions: 

1437 ibeg = itrig_pos 

1438 iend = min( 

1439 len(self.ydata), 

1440 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1441 ipeak = num.argmax(y[ibeg:iend]) 

1442 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1443 apeak = y[ibeg+ipeak] 

1444 

1445 if tpeak < tzero: 

1446 continue 

1447 

1448 if deadtime: 

1449 ibeg = itrig_pos 

1450 iblock = 0 

1451 nblock = nblock_duration_detection 

1452 totalsum = 0. 

1453 while True: 

1454 if ibeg+iblock*nblock >= len(y): 

1455 tzero = self.tmin + (len(y)-1) * self.deltat 

1456 break 

1457 

1458 logy = num.log( 

1459 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1460 logy[0] += totalsum 

1461 ysum = num.cumsum(logy) 

1462 totalsum = ysum[-1] 

1463 below = num.where(ysum <= 0., 1, 0) 

1464 deriv = num.zeros(ysum.size, dtype=num.int8) 

1465 deriv[1:] = below[1:]-below[:-1] 

1466 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1467 if len(izero_positions) > 0: 

1468 tzero = self.tmin + self.deltat * ( 

1469 ibeg + izero_positions[0]) 

1470 break 

1471 iblock += 1 

1472 else: 

1473 tzero = ibeg*self.deltat + self.tmin + tsearch 

1474 

1475 tpeaks.append(tpeak) 

1476 apeaks.append(apeak) 

1477 tzeros.append(tzero) 

1478 

1479 if deadtime: 

1480 return tpeaks, apeaks, tzeros 

1481 else: 

1482 return tpeaks, apeaks 

1483 

1484 def peaks2(self, threshold, tsearch): 

1485 

1486 ''' 

1487 Detect peaks above a given threshold (method 2). 

1488 

1489 This variant of peak detection is a bit more robust (and slower) than 

1490 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1491 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1492 iteratively the one with the maximum amplitude ``a[j]`` and time 

1493 ``t[j]`` is choosen and potential peaks within 

1494 ``t[j] - tsearch, t[j] + tsearch`` 

1495 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1496 no more potential peaks are left. 

1497 ''' 

1498 

1499 a = num.copy(self.ydata) 

1500 

1501 amin = num.min(a) 

1502 

1503 a[0] = amin 

1504 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1505 a[-1] = amin 

1506 

1507 data = [] 

1508 while True: 

1509 imax = num.argmax(a) 

1510 amax = a[imax] 

1511 

1512 if amax < threshold or amax == amin: 

1513 break 

1514 

1515 data.append((self.tmin + imax * self.deltat, amax)) 

1516 

1517 ntsearch = int(round(tsearch / self.deltat)) 

1518 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1519 

1520 if data: 

1521 data.sort() 

1522 tpeaks, apeaks = list(zip(*data)) 

1523 else: 

1524 tpeaks, apeaks = [], [] 

1525 

1526 return tpeaks, apeaks 

1527 

1528 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1529 ''' 

1530 Extend trace to given span. 

1531 

1532 :param tmin: begin time of new span 

1533 :param tmax: end time of new span 

1534 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1535 ``'median'`` 

1536 ''' 

1537 

1538 nold = self.ydata.size 

1539 

1540 if tmin is not None: 

1541 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1542 else: 

1543 nl = 0 

1544 

1545 if tmax is not None: 

1546 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1547 else: 

1548 nh = nold - 1 

1549 

1550 n = nh - nl + 1 

1551 data = num.zeros(n, dtype=self.ydata.dtype) 

1552 data[-nl:-nl + nold] = self.ydata 

1553 if self.ydata.size >= 1: 

1554 if fillmethod == 'repeat': 

1555 data[:-nl] = self.ydata[0] 

1556 data[-nl + nold:] = self.ydata[-1] 

1557 elif fillmethod == 'median': 

1558 v = num.median(self.ydata) 

1559 data[:-nl] = v 

1560 data[-nl + nold:] = v 

1561 elif fillmethod == 'mean': 

1562 v = num.mean(self.ydata) 

1563 data[:-nl] = v 

1564 data[-nl + nold:] = v 

1565 

1566 self.drop_growbuffer() 

1567 self.ydata = data 

1568 

1569 self.tmin += nl * self.deltat 

1570 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1571 

1572 self._update_ids() 

1573 

1574 def transfer(self, 

1575 tfade=0., 

1576 freqlimits=None, 

1577 transfer_function=None, 

1578 cut_off_fading=True, 

1579 demean=True, 

1580 invert=False): 

1581 

1582 ''' 

1583 Return new trace with transfer function applied (convolution). 

1584 

1585 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1586 at both ends of trace. 

1587 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1588 :param transfer_function: FrequencyResponse object; must provide a 

1589 method 'evaluate(freqs)', which returns the transfer function 

1590 coefficients at the frequencies 'freqs'. 

1591 :param cut_off_fading: whether to cut off rise/fall interval in output 

1592 trace. 

1593 :param demean: remove mean before applying transfer function 

1594 :param invert: set to True to do a deconvolution 

1595 ''' 

1596 

1597 if transfer_function is None: 

1598 transfer_function = FrequencyResponse() 

1599 

1600 if self.tmax - self.tmin <= tfade*2.: 

1601 raise TraceTooShort( 

1602 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1603 'trace length = %g, fading length = %g' 

1604 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1605 

1606 if freqlimits is None and ( 

1607 transfer_function is None or transfer_function.is_scalar()): 

1608 

1609 # special case for flat responses 

1610 

1611 output = self.copy() 

1612 data = self.ydata 

1613 ndata = data.size 

1614 

1615 if transfer_function is not None: 

1616 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1617 

1618 if invert: 

1619 c = 1.0/c 

1620 

1621 data *= c 

1622 

1623 if tfade != 0.0: 

1624 data *= costaper( 

1625 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1626 ndata, self.deltat) 

1627 

1628 output.ydata = data 

1629 

1630 else: 

1631 ndata = self.ydata.size 

1632 ntrans = nextpow2(ndata*1.2) 

1633 coefs = self._get_tapered_coefs( 

1634 ntrans, freqlimits, transfer_function, invert=invert) 

1635 

1636 data = self.ydata 

1637 

1638 data_pad = num.zeros(ntrans, dtype=float) 

1639 data_pad[:ndata] = data 

1640 if demean: 

1641 data_pad[:ndata] -= data.mean() 

1642 

1643 if tfade != 0.0: 

1644 data_pad[:ndata] *= costaper( 

1645 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1646 ndata, self.deltat) 

1647 

1648 fdata = num.fft.rfft(data_pad) 

1649 fdata *= coefs 

1650 ddata = num.fft.irfft(fdata) 

1651 output = self.copy() 

1652 output.ydata = ddata[:ndata] 

1653 

1654 if cut_off_fading and tfade != 0.0: 

1655 try: 

1656 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1657 except NoData: 

1658 raise TraceTooShort( 

1659 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1660 'trace length = %g, fading length = %g' 

1661 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1662 else: 

1663 output.ydata = output.ydata.copy() 

1664 

1665 return output 

1666 

1667 def differentiate(self, n=1, order=4, inplace=True): 

1668 ''' 

1669 Approximate first or second derivative of the trace. 

1670 

1671 :param n: 1 for first derivative, 2 for second 

1672 :param order: order of the approximation 2 and 4 are supported 

1673 :param inplace: if ``True`` the trace is differentiated in place, 

1674 otherwise a new trace object with the derivative is returned. 

1675 

1676 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1677 

1678 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1679 ''' 

1680 

1681 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1682 

1683 if inplace: 

1684 self.ydata = ddata 

1685 else: 

1686 output = self.copy(data=False) 

1687 output.set_ydata(ddata) 

1688 return output 

1689 

1690 def drop_chain_cache(self): 

1691 if self._pchain: 

1692 self._pchain.clear() 

1693 

1694 def init_chain(self): 

1695 self._pchain = pchain.Chain( 

1696 do_downsample, 

1697 do_extend, 

1698 do_pre_taper, 

1699 do_fft, 

1700 do_filter, 

1701 do_ifft) 

1702 

1703 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1704 if setup.domain == 'frequency_domain': 

1705 _, _, data = self._pchain( 

1706 (self, deltat), 

1707 (tmin, tmax), 

1708 (setup.taper,), 

1709 (setup.filter,), 

1710 (setup.filter,), 

1711 nocache=nocache) 

1712 

1713 return num.abs(data), num.abs(data) 

1714 

1715 else: 

1716 processed = self._pchain( 

1717 (self, deltat), 

1718 (tmin, tmax), 

1719 (setup.taper,), 

1720 (setup.filter,), 

1721 (setup.filter,), 

1722 (), 

1723 nocache=nocache) 

1724 

1725 if setup.domain == 'time_domain': 

1726 data = processed.get_ydata() 

1727 

1728 elif setup.domain == 'envelope': 

1729 processed = processed.envelope(inplace=False) 

1730 

1731 elif setup.domain == 'absolute': 

1732 processed.set_ydata(num.abs(processed.get_ydata())) 

1733 

1734 return processed.get_ydata(), processed 

1735 

1736 def misfit(self, candidate, setup, nocache=False, debug=False): 

1737 ''' 

1738 Calculate misfit and normalization factor against candidate trace. 

1739 

1740 :param candidate: :py:class:`Trace` object 

1741 :param setup: :py:class:`MisfitSetup` object 

1742 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1743 normalization divisor 

1744 

1745 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1746 with the higher sampling rate will be downsampled. 

1747 ''' 

1748 

1749 a = self 

1750 b = candidate 

1751 

1752 for tr in (a, b): 

1753 if not tr._pchain: 

1754 tr.init_chain() 

1755 

1756 deltat = max(a.deltat, b.deltat) 

1757 tmin = min(a.tmin, b.tmin) - deltat 

1758 tmax = max(a.tmax, b.tmax) + deltat 

1759 

1760 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1761 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1762 

1763 if setup.domain != 'cc_max_norm': 

1764 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1765 else: 

1766 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1767 ccmax = ctr.max()[1] 

1768 m = 0.5 - 0.5 * ccmax 

1769 n = 0.5 

1770 

1771 if debug: 

1772 return m, n, aproc, bproc 

1773 else: 

1774 return m, n 

1775 

1776 def spectrum(self, pad_to_pow2=False, tfade=None): 

1777 ''' 

1778 Get FFT spectrum of trace. 

1779 

1780 :param pad_to_pow2: whether to zero-pad the data to next larger 

1781 power-of-two length 

1782 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1783 shaped tapers to both 

1784 

1785 :returns: a tuple with (frequencies, values) 

1786 ''' 

1787 

1788 ndata = self.ydata.size 

1789 

1790 if pad_to_pow2: 

1791 ntrans = nextpow2(ndata) 

1792 else: 

1793 ntrans = ndata 

1794 

1795 if tfade is None: 

1796 ydata = self.ydata 

1797 else: 

1798 ydata = self.ydata * costaper( 

1799 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1800 ndata, self.deltat) 

1801 

1802 fydata = num.fft.rfft(ydata, ntrans) 

1803 df = 1./(ntrans*self.deltat) 

1804 fxdata = num.arange(len(fydata))*df 

1805 return fxdata, fydata 

1806 

1807 def multi_filter(self, filter_freqs, bandwidth): 

1808 

1809 class Gauss(FrequencyResponse): 

1810 f0 = Float.T() 

1811 a = Float.T(default=1.0) 

1812 

1813 def __init__(self, f0, a=1.0, **kwargs): 

1814 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1815 

1816 def evaluate(self, freqs): 

1817 omega0 = 2.*math.pi*self.f0 

1818 omega = 2.*math.pi*freqs 

1819 return num.exp(-((omega-omega0) 

1820 / (self.a*omega0))**2) 

1821 

1822 freqs, coefs = self.spectrum() 

1823 n = self.data_len() 

1824 nfilt = len(filter_freqs) 

1825 signal_tf = num.zeros((nfilt, n)) 

1826 centroid_freqs = num.zeros(nfilt) 

1827 for ifilt, f0 in enumerate(filter_freqs): 

1828 taper = Gauss(f0, a=bandwidth) 

1829 weights = taper.evaluate(freqs) 

1830 nhalf = freqs.size 

1831 analytic_spec = num.zeros(n, dtype=complex) 

1832 analytic_spec[:nhalf] = coefs*weights 

1833 

1834 enorm = num.abs(analytic_spec[:nhalf])**2 

1835 enorm /= num.sum(enorm) 

1836 

1837 if n % 2 == 0: 

1838 analytic_spec[1:nhalf-1] *= 2. 

1839 else: 

1840 analytic_spec[1:nhalf] *= 2. 

1841 

1842 analytic = num.fft.ifft(analytic_spec) 

1843 signal_tf[ifilt, :] = num.abs(analytic) 

1844 

1845 enorm = num.abs(analytic_spec[:nhalf])**2 

1846 enorm /= num.sum(enorm) 

1847 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1848 

1849 return centroid_freqs, signal_tf 

1850 

1851 def _get_tapered_coefs( 

1852 self, ntrans, freqlimits, transfer_function, invert=False): 

1853 

1854 deltaf = 1./(self.deltat*ntrans) 

1855 nfreqs = ntrans//2 + 1 

1856 transfer = num.ones(nfreqs, dtype=complex) 

1857 hi = snapper(nfreqs, deltaf) 

1858 if freqlimits is not None: 

1859 a, b, c, d = freqlimits 

1860 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1861 + hi(a)*deltaf 

1862 

1863 if invert: 

1864 coeffs = transfer_function.evaluate(freqs) 

1865 if num.any(coeffs == 0.0): 

1866 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1867 

1868 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1869 else: 

1870 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1871 

1872 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1873 else: 

1874 if invert: 

1875 raise Exception( 

1876 'transfer: `freqlimits` must be given when `invert` is ' 

1877 'set to `True`') 

1878 

1879 freqs = num.arange(nfreqs) * deltaf 

1880 tapered_transfer = transfer_function.evaluate(freqs) 

1881 

1882 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1883 return tapered_transfer 

1884 

1885 def fill_template(self, template, **additional): 

1886 ''' 

1887 Fill string template with trace metadata. 

1888 

1889 Uses normal python '%(placeholder)s' string templates. The following 

1890 placeholders are considered: ``network``, ``station``, ``location``, 

1891 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1892 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1893 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1894 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1895 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1896 microseconds, those with ``'_year'`` contain only the year. 

1897 ''' 

1898 

1899 template = template.replace('%n', '%(network)s')\ 

1900 .replace('%s', '%(station)s')\ 

1901 .replace('%l', '%(location)s')\ 

1902 .replace('%c', '%(channel)s')\ 

1903 .replace('%b', '%(tmin)s')\ 

1904 .replace('%e', '%(tmax)s')\ 

1905 .replace('%j', '%(julianday)s') 

1906 

1907 params = dict( 

1908 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1909 params['tmin'] = util.time_to_str( 

1910 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1911 params['tmax'] = util.time_to_str( 

1912 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1913 params['tmin_ms'] = util.time_to_str( 

1914 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1915 params['tmax_ms'] = util.time_to_str( 

1916 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1917 params['tmin_us'] = util.time_to_str( 

1918 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1919 params['tmax_us'] = util.time_to_str( 

1920 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1921 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1922 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1923 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1924 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1925 params['julianday'] = util.julian_day_of_year(self.tmin) 

1926 params.update(additional) 

1927 return template % params 

1928 

1929 def plot(self): 

1930 ''' 

1931 Show trace with matplotlib. 

1932 

1933 See also: :py:meth:`Trace.snuffle`. 

1934 ''' 

1935 

1936 import pylab 

1937 pylab.plot(self.get_xdata(), self.get_ydata()) 

1938 name = '%s %s %s - %s' % ( 

1939 self.channel, 

1940 self.station, 

1941 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1942 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1943 

1944 pylab.title(name) 

1945 pylab.show() 

1946 

1947 def snuffle(self, **kwargs): 

1948 ''' 

1949 Show trace in a snuffler window. 

1950 

1951 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1952 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1953 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1954 :param ntracks: float, number of tracks to be shown initially (default: 

1955 12) 

1956 :param follow: time interval (in seconds) for real time follow mode or 

1957 ``None`` 

1958 :param controls: bool, whether to show the main controls (default: 

1959 ``True``) 

1960 :param opengl: bool, whether to use opengl (default: ``False``) 

1961 ''' 

1962 

1963 return snuffle([self], **kwargs) 

1964 

1965 

1966def snuffle(traces, **kwargs): 

1967 ''' 

1968 Show traces in a snuffler window. 

1969 

1970 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1971 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1972 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1973 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1974 :param follow: time interval (in seconds) for real time follow mode or 

1975 ``None`` 

1976 :param controls: bool, whether to show the main controls (default: 

1977 ``True``) 

1978 :param opengl: bool, whether to use opengl (default: ``False``) 

1979 ''' 

1980 

1981 from pyrocko import pile 

1982 from pyrocko.gui import snuffler 

1983 p = pile.Pile() 

1984 if traces: 

1985 trf = pile.MemTracesFile(None, traces) 

1986 p.add_file(trf) 

1987 return snuffler.snuffle(p, **kwargs) 

1988 

1989 

1990class InfiniteResponse(Exception): 

1991 ''' 

1992 This exception is raised by :py:class:`Trace` operations when deconvolution 

1993 of a frequency response (instrument response transfer function) would 

1994 result in a division by zero. 

1995 ''' 

1996 

1997 

1998class MisalignedTraces(Exception): 

1999 ''' 

2000 This exception is raised by some :py:class:`Trace` operations when tmin, 

2001 tmax or number of samples do not match. 

2002 ''' 

2003 

2004 pass 

2005 

2006 

2007class NoData(Exception): 

2008 ''' 

2009 This exception is raised by some :py:class:`Trace` operations when no or 

2010 not enough data is available. 

2011 ''' 

2012 

2013 pass 

2014 

2015 

2016class AboveNyquist(Exception): 

2017 ''' 

2018 This exception is raised by some :py:class:`Trace` operations when given 

2019 frequencies are above the Nyquist frequency. 

2020 ''' 

2021 

2022 pass 

2023 

2024 

2025class TraceTooShort(Exception): 

2026 ''' 

2027 This exception is raised by some :py:class:`Trace` operations when the 

2028 trace is too short. 

2029 ''' 

2030 

2031 pass 

2032 

2033 

2034class ResamplingFailed(Exception): 

2035 pass 

2036 

2037 

2038def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2039 

2040 ''' 

2041 Get data range given traces grouped by selected pattern. 

2042 

2043 :param key: a callable which takes as single argument a trace and returns a 

2044 key for the grouping of the results. If this is ``None``, the default, 

2045 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2046 used. 

2047 :param mode: ``'minmax'`` or floating point number. If this is 

2048 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2049 number, mean +- standard deviation times ``mode`` is used. 

2050 

2051 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2052 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2053 extreme values on either end. 

2054 

2055 :returns: a dict with the combined data ranges. 

2056 

2057 Examples:: 

2058 

2059 ranges = minmax(traces, lambda tr: tr.channel) 

2060 print ranges['N'] # print min & max of all traces with channel == 'N' 

2061 print ranges['E'] # print min & max of all traces with channel == 'E' 

2062 

2063 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2064 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2065 # network == 'GR' and station == 'HAM3' 

2066 

2067 ranges = minmax(traces, lambda tr: None) 

2068 print ranges[None] # prints min & max of all traces 

2069 ''' 

2070 

2071 if key is None: 

2072 key = _default_key 

2073 

2074 ranges = defaultdict(list) 

2075 for trace in traces: 

2076 if isinstance(mode, str) and mode == 'minmax': 

2077 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2078 else: 

2079 mean = trace.ydata.mean() 

2080 std = trace.ydata.std() 

2081 mi, ma = mean-std*mode, mean+std*mode 

2082 

2083 k = key(trace) 

2084 ranges[k].append((mi, ma)) 

2085 

2086 for k in ranges: 

2087 mins, maxs = num.array(ranges[k]).T 

2088 if outer_mode == 'minmax': 

2089 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2090 elif outer_mode == 'robust': 

2091 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2092 

2093 return ranges 

2094 

2095 

2096def minmaxtime(traces, key=None): 

2097 

2098 ''' 

2099 Get time range given traces grouped by selected pattern. 

2100 

2101 :param key: a callable which takes as single argument a trace and returns a 

2102 key for the grouping of the results. If this is ``None``, the default, 

2103 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2104 used. 

2105 

2106 :returns: a dict with the combined data ranges. 

2107 ''' 

2108 

2109 if key is None: 

2110 key = _default_key 

2111 

2112 ranges = {} 

2113 for trace in traces: 

2114 mi, ma = trace.tmin, trace.tmax 

2115 k = key(trace) 

2116 if k not in ranges: 

2117 ranges[k] = mi, ma 

2118 else: 

2119 tmi, tma = ranges[k] 

2120 ranges[k] = min(tmi, mi), max(tma, ma) 

2121 

2122 return ranges 

2123 

2124 

2125def degapper( 

2126 traces, 

2127 maxgap=5, 

2128 fillmethod='interpolate', 

2129 deoverlap='use_second', 

2130 maxlap=None): 

2131 

2132 ''' 

2133 Try to connect traces and remove gaps. 

2134 

2135 This method will combine adjacent traces, which match in their network, 

2136 station, location and channel attributes. Overlapping parts are handled 

2137 according to the ``deoverlap`` argument. 

2138 

2139 :param traces: input traces, must be sorted by their full_id attribute. 

2140 :param maxgap: maximum number of samples to interpolate. 

2141 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2142 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2143 second trace (default), 'use_first' to use data from first trace, 

2144 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2145 values. 

2146 :param maxlap: maximum number of samples of overlap which are removed 

2147 

2148 :returns: list of traces 

2149 ''' 

2150 

2151 in_traces = traces 

2152 out_traces = [] 

2153 if not in_traces: 

2154 return out_traces 

2155 out_traces.append(in_traces.pop(0)) 

2156 while in_traces: 

2157 

2158 a = out_traces[-1] 

2159 b = in_traces.pop(0) 

2160 

2161 avirt, bvirt = a.ydata is None, b.ydata is None 

2162 assert avirt == bvirt, \ 

2163 'traces given to degapper() must either all have data or have ' \ 

2164 'no data.' 

2165 

2166 virtual = avirt and bvirt 

2167 

2168 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2169 and a.data_len() >= 1 and b.data_len() >= 1 

2170 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2171 

2172 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2173 idist = int(round(dist)) 

2174 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2175 # logger.warning('Cannot degap traces with displaced sampling ' 

2176 # '(%s, %s, %s, %s)' % a.nslc_id) 

2177 pass 

2178 else: 

2179 if 1 < idist <= maxgap: 

2180 if not virtual: 

2181 if fillmethod == 'interpolate': 

2182 filler = a.ydata[-1] + ( 

2183 ((1.0 + num.arange(idist-1, dtype=float)) 

2184 / idist) * (b.ydata[0]-a.ydata[-1]) 

2185 ).astype(a.ydata.dtype) 

2186 elif fillmethod == 'zeros': 

2187 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2188 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2189 a.tmax = b.tmax 

2190 if a.mtime and b.mtime: 

2191 a.mtime = max(a.mtime, b.mtime) 

2192 continue 

2193 

2194 elif idist == 1: 

2195 if not virtual: 

2196 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2197 a.tmax = b.tmax 

2198 if a.mtime and b.mtime: 

2199 a.mtime = max(a.mtime, b.mtime) 

2200 continue 

2201 

2202 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2203 if b.tmax > a.tmax: 

2204 if not virtual: 

2205 na = a.ydata.size 

2206 n = -idist+1 

2207 if deoverlap == 'use_second': 

2208 a.ydata = num.concatenate( 

2209 (a.ydata[:-n], b.ydata)) 

2210 elif deoverlap in ('use_first', 'crossfade_cos'): 

2211 a.ydata = num.concatenate( 

2212 (a.ydata, b.ydata[n:])) 

2213 elif deoverlap == 'add': 

2214 a.ydata[-n:] += b.ydata[:n] 

2215 a.ydata = num.concatenate( 

2216 (a.ydata, b.ydata[n:])) 

2217 else: 

2218 assert False, 'unknown deoverlap method' 

2219 

2220 if deoverlap == 'crossfade_cos': 

2221 n = -idist+1 

2222 taper = 0.5-0.5*num.cos( 

2223 (1.+num.arange(n))/(1.+n)*num.pi) 

2224 a.ydata[na-n:na] *= 1.-taper 

2225 a.ydata[na-n:na] += b.ydata[:n] * taper 

2226 

2227 a.tmax = b.tmax 

2228 if a.mtime and b.mtime: 

2229 a.mtime = max(a.mtime, b.mtime) 

2230 continue 

2231 else: 

2232 # make short second trace vanish 

2233 continue 

2234 

2235 if b.data_len() >= 1: 

2236 out_traces.append(b) 

2237 

2238 for tr in out_traces: 

2239 tr._update_ids() 

2240 

2241 return out_traces 

2242 

2243 

2244def rotate(traces, azimuth, in_channels, out_channels): 

2245 ''' 

2246 2D rotation of traces. 

2247 

2248 :param traces: list of input traces 

2249 :param azimuth: difference of the azimuths of the component directions 

2250 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2251 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2252 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2253 :returns: list of rotated traces 

2254 ''' 

2255 

2256 phi = azimuth/180.*math.pi 

2257 cphi = math.cos(phi) 

2258 sphi = math.sin(phi) 

2259 rotated = [] 

2260 in_channels = tuple(_channels_to_names(in_channels)) 

2261 out_channels = tuple(_channels_to_names(out_channels)) 

2262 for a in traces: 

2263 for b in traces: 

2264 if ((a.channel, b.channel) == in_channels and 

2265 a.nslc_id[:3] == b.nslc_id[:3] and 

2266 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2267 tmin = max(a.tmin, b.tmin) 

2268 tmax = min(a.tmax, b.tmax) 

2269 

2270 if tmin < tmax: 

2271 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2272 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2273 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2274 logger.warning( 

2275 'Cannot rotate traces with displaced sampling ' 

2276 '(%s, %s, %s, %s)' % a.nslc_id) 

2277 continue 

2278 

2279 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2280 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2281 ac.set_ydata(acydata) 

2282 bc.set_ydata(bcydata) 

2283 

2284 ac.set_codes(channel=out_channels[0]) 

2285 bc.set_codes(channel=out_channels[1]) 

2286 rotated.append(ac) 

2287 rotated.append(bc) 

2288 

2289 return rotated 

2290 

2291 

2292def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2293 ''' 

2294 Rotate traces from NE to RT system. 

2295 

2296 :param n: 

2297 North trace. 

2298 :type n: 

2299 :py:class:`~pyrocko.trace.Trace` 

2300 

2301 :param e: 

2302 East trace. 

2303 :type e: 

2304 :py:class:`~pyrocko.trace.Trace` 

2305 

2306 :param source: 

2307 Source of the recorded signal. 

2308 :type source: 

2309 :py:class:`pyrocko.gf.seismosizer.Source` 

2310 

2311 :param receiver: 

2312 Receiver of the recorded signal. 

2313 :type receiver: 

2314 :py:class:`pyrocko.model.location.Location` 

2315 

2316 :param out_channels: 

2317 Channel codes of the output channels (radial, transversal). 

2318 Default is ('R', 'T'). 

2319 . 

2320 :type out_channels 

2321 optional, tuple[str, str] 

2322 

2323 :returns: 

2324 Rotated traces (radial, transversal). 

2325 :rtype: 

2326 tuple[ 

2327 :py:class:`~pyrocko.trace.Trace`, 

2328 :py:class:`~pyrocko.trace.Trace`] 

2329 ''' 

2330 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2331 in_channels = n.channel, e.channel 

2332 out = rotate( 

2333 [n, e], azimuth, 

2334 in_channels=in_channels, 

2335 out_channels=out_channels) 

2336 

2337 assert len(out) == 2 

2338 for tr in out: 

2339 if tr.channel == out_channels[0]: 

2340 r = tr 

2341 elif tr.channel == out_channels[1]: 

2342 t = tr 

2343 else: 

2344 assert False 

2345 

2346 return r, t 

2347 

2348 

2349def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2350 out_channels=('L', 'Q', 'T')): 

2351 ''' 

2352 Rotate traces from ZNE to LQT system. 

2353 

2354 :param traces: list of traces in arbitrary order 

2355 :param backazimuth: backazimuth in degrees clockwise from north 

2356 :param incidence: incidence angle in degrees from vertical 

2357 :param in_channels: input channel names 

2358 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2359 :returns: list of transformed traces 

2360 ''' 

2361 i = incidence/180.*num.pi 

2362 b = backazimuth/180.*num.pi 

2363 

2364 ci = num.cos(i) 

2365 cb = num.cos(b) 

2366 si = num.sin(i) 

2367 sb = num.sin(b) 

2368 

2369 rotmat = num.array( 

2370 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2371 return project(traces, rotmat, in_channels, out_channels) 

2372 

2373 

2374def _decompose(a): 

2375 ''' 

2376 Decompose matrix into independent submatrices. 

2377 ''' 

2378 

2379 def depends(iout, a): 

2380 row = a[iout, :] 

2381 return set(num.arange(row.size).compress(row != 0.0)) 

2382 

2383 def provides(iin, a): 

2384 col = a[:, iin] 

2385 return set(num.arange(col.size).compress(col != 0.0)) 

2386 

2387 a = num.asarray(a) 

2388 outs = set(range(a.shape[0])) 

2389 systems = [] 

2390 while outs: 

2391 iout = outs.pop() 

2392 

2393 gout = set() 

2394 for iin in depends(iout, a): 

2395 gout.update(provides(iin, a)) 

2396 

2397 if not gout: 

2398 continue 

2399 

2400 gin = set() 

2401 for iout2 in gout: 

2402 gin.update(depends(iout2, a)) 

2403 

2404 if not gin: 

2405 continue 

2406 

2407 for iout2 in gout: 

2408 if iout2 in outs: 

2409 outs.remove(iout2) 

2410 

2411 gin = list(gin) 

2412 gin.sort() 

2413 gout = list(gout) 

2414 gout.sort() 

2415 

2416 systems.append((gin, gout, a[gout, :][:, gin])) 

2417 

2418 return systems 

2419 

2420 

2421def _channels_to_names(channels): 

2422 from pyrocko import squirrel 

2423 names = [] 

2424 for ch in channels: 

2425 if isinstance(ch, model.Channel): 

2426 names.append(ch.name) 

2427 elif isinstance(ch, squirrel.Channel): 

2428 names.append(ch.codes.channel) 

2429 else: 

2430 names.append(ch) 

2431 

2432 return names 

2433 

2434 

2435def project(traces, matrix, in_channels, out_channels): 

2436 ''' 

2437 Affine transform of three-component traces. 

2438 

2439 Compute matrix-vector product of three-component traces, to e.g. rotate 

2440 traces into a different basis. The traces are distinguished and ordered by 

2441 their channel attribute. The tranform is applied to overlapping parts of 

2442 any appropriate combinations of the input traces. This should allow this 

2443 function to be robust with data gaps. It also tries to apply the 

2444 tranformation to subsets of the channels, if this is possible, so that, if 

2445 for example a vertical compontent is missing, horizontal components can 

2446 still be rotated. 

2447 

2448 :param traces: list of traces in arbitrary order 

2449 :param matrix: tranformation matrix 

2450 :param in_channels: input channel names 

2451 :param out_channels: output channel names 

2452 :returns: list of transformed traces 

2453 ''' 

2454 

2455 in_channels = tuple(_channels_to_names(in_channels)) 

2456 out_channels = tuple(_channels_to_names(out_channels)) 

2457 systems = _decompose(matrix) 

2458 

2459 # fallback to full matrix if some are not quadratic 

2460 for iins, iouts, submatrix in systems: 

2461 if submatrix.shape[0] != submatrix.shape[1]: 

2462 if len(in_channels) != 3 or len(out_channels) != 3: 

2463 return [] 

2464 else: 

2465 return _project3(traces, matrix, in_channels, out_channels) 

2466 

2467 projected = [] 

2468 for iins, iouts, submatrix in systems: 

2469 in_cha = tuple([in_channels[iin] for iin in iins]) 

2470 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2471 if submatrix.shape[0] == 1: 

2472 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2473 elif submatrix.shape[1] == 2: 

2474 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2475 else: 

2476 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2477 

2478 return projected 

2479 

2480 

2481def project_dependencies(matrix, in_channels, out_channels): 

2482 ''' 

2483 Figure out what dependencies project() would produce. 

2484 ''' 

2485 

2486 in_channels = tuple(_channels_to_names(in_channels)) 

2487 out_channels = tuple(_channels_to_names(out_channels)) 

2488 systems = _decompose(matrix) 

2489 

2490 subpro = [] 

2491 for iins, iouts, submatrix in systems: 

2492 if submatrix.shape[0] != submatrix.shape[1]: 

2493 subpro.append((matrix, in_channels, out_channels)) 

2494 

2495 if not subpro: 

2496 for iins, iouts, submatrix in systems: 

2497 in_cha = tuple([in_channels[iin] for iin in iins]) 

2498 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2499 subpro.append((submatrix, in_cha, out_cha)) 

2500 

2501 deps = {} 

2502 for mat, in_cha, out_cha in subpro: 

2503 for oc in out_cha: 

2504 if oc not in deps: 

2505 deps[oc] = [] 

2506 

2507 for ic in in_cha: 

2508 deps[oc].append(ic) 

2509 

2510 return deps 

2511 

2512 

2513def _project1(traces, matrix, in_channels, out_channels): 

2514 assert len(in_channels) == 1 

2515 assert len(out_channels) == 1 

2516 assert matrix.shape == (1, 1) 

2517 

2518 projected = [] 

2519 for a in traces: 

2520 if not (a.channel,) == in_channels: 

2521 continue 

2522 

2523 ac = a.copy() 

2524 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2525 ac.set_codes(channel=out_channels[0]) 

2526 projected.append(ac) 

2527 

2528 return projected 

2529 

2530 

2531def _project2(traces, matrix, in_channels, out_channels): 

2532 assert len(in_channels) == 2 

2533 assert len(out_channels) == 2 

2534 assert matrix.shape == (2, 2) 

2535 projected = [] 

2536 for a in traces: 

2537 for b in traces: 

2538 if not ((a.channel, b.channel) == in_channels and 

2539 a.nslc_id[:3] == b.nslc_id[:3] and 

2540 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2541 continue 

2542 

2543 tmin = max(a.tmin, b.tmin) 

2544 tmax = min(a.tmax, b.tmax) 

2545 

2546 if tmin > tmax: 

2547 continue 

2548 

2549 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2550 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2551 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2552 logger.warning( 

2553 'Cannot project traces with displaced sampling ' 

2554 '(%s, %s, %s, %s)' % a.nslc_id) 

2555 continue 

2556 

2557 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2558 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2559 

2560 ac.set_ydata(acydata) 

2561 bc.set_ydata(bcydata) 

2562 

2563 ac.set_codes(channel=out_channels[0]) 

2564 bc.set_codes(channel=out_channels[1]) 

2565 

2566 projected.append(ac) 

2567 projected.append(bc) 

2568 

2569 return projected 

2570 

2571 

2572def _project3(traces, matrix, in_channels, out_channels): 

2573 assert len(in_channels) == 3 

2574 assert len(out_channels) == 3 

2575 assert matrix.shape == (3, 3) 

2576 projected = [] 

2577 for a in traces: 

2578 for b in traces: 

2579 for c in traces: 

2580 if not ((a.channel, b.channel, c.channel) == in_channels 

2581 and a.nslc_id[:3] == b.nslc_id[:3] 

2582 and b.nslc_id[:3] == c.nslc_id[:3] 

2583 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2584 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2585 

2586 continue 

2587 

2588 tmin = max(a.tmin, b.tmin, c.tmin) 

2589 tmax = min(a.tmax, b.tmax, c.tmax) 

2590 

2591 if tmin >= tmax: 

2592 continue 

2593 

2594 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2595 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2596 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2597 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2598 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2599 

2600 logger.warning( 

2601 'Cannot project traces with displaced sampling ' 

2602 '(%s, %s, %s, %s)' % a.nslc_id) 

2603 continue 

2604 

2605 acydata = num.dot( 

2606 matrix[0], 

2607 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2608 bcydata = num.dot( 

2609 matrix[1], 

2610 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2611 ccydata = num.dot( 

2612 matrix[2], 

2613 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2614 

2615 ac.set_ydata(acydata) 

2616 bc.set_ydata(bcydata) 

2617 cc.set_ydata(ccydata) 

2618 

2619 ac.set_codes(channel=out_channels[0]) 

2620 bc.set_codes(channel=out_channels[1]) 

2621 cc.set_codes(channel=out_channels[2]) 

2622 

2623 projected.append(ac) 

2624 projected.append(bc) 

2625 projected.append(cc) 

2626 

2627 return projected 

2628 

2629 

2630def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2631 ''' 

2632 Cross correlation of two traces. 

2633 

2634 :param a,b: input traces 

2635 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2636 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2637 :param use_fft: bool, whether to do cross correlation in spectral domain 

2638 

2639 :returns: trace containing cross correlation coefficients 

2640 

2641 This function computes the cross correlation between two traces. It 

2642 evaluates the discrete equivalent of 

2643 

2644 .. math:: 

2645 

2646 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2647 

2648 where the star denotes complex conjugate. Note, that the arguments here are 

2649 swapped when compared with the :py:func:`numpy.correlate` function, 

2650 which is internally called. This function should be safe even with older 

2651 versions of NumPy, where the correlate function has some problems. 

2652 

2653 A trace containing the cross correlation coefficients is returned. The time 

2654 information of the output trace is set so that the returned cross 

2655 correlation can be viewed directly as a function of time lag. 

2656 

2657 Example:: 

2658 

2659 # align two traces a and b containing a time shifted similar signal: 

2660 c = pyrocko.trace.correlate(a,b) 

2661 t, coef = c.max() # get time and value of maximum 

2662 b.shift(-t) # align b with a 

2663 

2664 ''' 

2665 

2666 assert_same_sampling_rate(a, b) 

2667 

2668 ya, yb = a.ydata, b.ydata 

2669 

2670 # need reversed order here: 

2671 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2672 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2673 

2674 if normalization == 'normal': 

2675 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2676 yc = yc/normfac 

2677 

2678 elif normalization == 'gliding': 

2679 if mode != 'valid': 

2680 assert False, 'gliding normalization currently only available ' \ 

2681 'with "valid" mode.' 

2682 

2683 if ya.size < yb.size: 

2684 yshort, ylong = ya, yb 

2685 else: 

2686 yshort, ylong = yb, ya 

2687 

2688 epsilon = 0.00001 

2689 normfac_short = num.sqrt(num.sum(yshort**2)) 

2690 normfac = normfac_short * num.sqrt( 

2691 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2692 + normfac_short*epsilon 

2693 

2694 if yb.size <= ya.size: 

2695 normfac = normfac[::-1] 

2696 

2697 yc /= normfac 

2698 

2699 c = a.copy() 

2700 c.set_ydata(yc) 

2701 c.set_codes(*merge_codes(a, b, '~')) 

2702 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2703 

2704 return c 

2705 

2706 

2707def deconvolve( 

2708 a, b, waterlevel, 

2709 tshift=0., 

2710 pad=0.5, 

2711 fd_taper=None, 

2712 pad_to_pow2=True): 

2713 

2714 same_sampling_rate(a, b) 

2715 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2716 deltat = a.deltat 

2717 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2718 

2719 ndata = max(a.data_len(), b.data_len()) 

2720 ndata_pad = ndata + npad 

2721 

2722 if pad_to_pow2: 

2723 ntrans = nextpow2(ndata_pad) 

2724 else: 

2725 ntrans = ndata 

2726 

2727 aspec = num.fft.rfft(a.ydata, ntrans) 

2728 bspec = num.fft.rfft(b.ydata, ntrans) 

2729 

2730 out = aspec * num.conj(bspec) 

2731 

2732 bautocorr = bspec*num.conj(bspec) 

2733 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2734 

2735 out /= denom 

2736 df = 1/(ntrans*deltat) 

2737 

2738 if fd_taper is not None: 

2739 fd_taper(out, 0.0, df) 

2740 

2741 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2742 c = a.copy(data=False) 

2743 c.set_ydata(ydata[:ndata]) 

2744 c.set_codes(*merge_codes(a, b, '/')) 

2745 return c 

2746 

2747 

2748def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2749 assert same_sampling_rate(a, b, eps), \ 

2750 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2751 

2752 

2753def same_sampling_rate(a, b, eps=1.0e-6): 

2754 ''' 

2755 Check if two traces have the same sampling rate. 

2756 

2757 :param a,b: input traces 

2758 :param eps: relative tolerance 

2759 ''' 

2760 

2761 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2762 

2763 

2764def fix_deltat_rounding_errors(deltat): 

2765 ''' 

2766 Try to undo sampling rate rounding errors. 

2767 

2768 Fix rounding errors of sampling intervals when these are read from single 

2769 precision floating point values. 

2770 

2771 Assumes that the true sampling rate or sampling interval was an integer 

2772 value. No correction will be applied if this would change the sampling 

2773 rate by more than 0.001%. 

2774 ''' 

2775 

2776 if deltat <= 1.0: 

2777 deltat_new = 1.0 / round(1.0 / deltat) 

2778 else: 

2779 deltat_new = round(deltat) 

2780 

2781 if abs(deltat_new - deltat) / deltat > 1e-5: 

2782 deltat_new = deltat 

2783 

2784 return deltat_new 

2785 

2786 

2787def merge_codes(a, b, sep='-'): 

2788 ''' 

2789 Merge network-station-location-channel codes of a pair of traces. 

2790 ''' 

2791 

2792 o = [] 

2793 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2794 if xa == xb: 

2795 o.append(xa) 

2796 else: 

2797 o.append(sep.join((xa, xb))) 

2798 return o 

2799 

2800 

2801class Taper(Object): 

2802 ''' 

2803 Base class for tapers. 

2804 

2805 Does nothing by default. 

2806 ''' 

2807 

2808 def __call__(self, y, x0, dx): 

2809 pass 

2810 

2811 

2812class CosTaper(Taper): 

2813 ''' 

2814 Cosine Taper. 

2815 

2816 :param a: start of fading in 

2817 :param b: end of fading in 

2818 :param c: start of fading out 

2819 :param d: end of fading out 

2820 ''' 

2821 

2822 a = Float.T() 

2823 b = Float.T() 

2824 c = Float.T() 

2825 d = Float.T() 

2826 

2827 def __init__(self, a, b, c, d): 

2828 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2829 

2830 def __call__(self, y, x0, dx): 

2831 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2832 

2833 def span(self, y, x0, dx): 

2834 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2835 

2836 def time_span(self): 

2837 return self.a, self.d 

2838 

2839 

2840class CosFader(Taper): 

2841 ''' 

2842 Cosine Fader. 

2843 

2844 :param xfade: fade in and fade out time in seconds (optional) 

2845 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2846 

2847 Only one argument can be set. The other should to be ``None``. 

2848 ''' 

2849 

2850 xfade = Float.T(optional=True) 

2851 xfrac = Float.T(optional=True) 

2852 

2853 def __init__(self, xfade=None, xfrac=None): 

2854 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2855 assert (xfade is None) != (xfrac is None) 

2856 self._xfade = xfade 

2857 self._xfrac = xfrac 

2858 

2859 def __call__(self, y, x0, dx): 

2860 

2861 xfade = self._xfade 

2862 

2863 xlen = (y.size - 1)*dx 

2864 if xfade is None: 

2865 xfade = xlen * self._xfrac 

2866 

2867 a = x0 

2868 b = x0 + xfade 

2869 c = x0 + xlen - xfade 

2870 d = x0 + xlen 

2871 

2872 apply_costaper(a, b, c, d, y, x0, dx) 

2873 

2874 def span(self, y, x0, dx): 

2875 return 0, y.size 

2876 

2877 def time_span(self): 

2878 return None, None 

2879 

2880 

2881def none_min(li): 

2882 if None in li: 

2883 return None 

2884 else: 

2885 return min(x for x in li if x is not None) 

2886 

2887 

2888def none_max(li): 

2889 if None in li: 

2890 return None 

2891 else: 

2892 return max(x for x in li if x is not None) 

2893 

2894 

2895class MultiplyTaper(Taper): 

2896 ''' 

2897 Multiplication of several tapers. 

2898 ''' 

2899 

2900 tapers = List.T(Taper.T()) 

2901 

2902 def __init__(self, tapers=None): 

2903 if tapers is None: 

2904 tapers = [] 

2905 

2906 Taper.__init__(self, tapers=tapers) 

2907 

2908 def __call__(self, y, x0, dx): 

2909 for taper in self.tapers: 

2910 taper(y, x0, dx) 

2911 

2912 def span(self, y, x0, dx): 

2913 spans = [] 

2914 for taper in self.tapers: 

2915 spans.append(taper.span(y, x0, dx)) 

2916 

2917 mins, maxs = list(zip(*spans)) 

2918 return min(mins), max(maxs) 

2919 

2920 def time_span(self): 

2921 spans = [] 

2922 for taper in self.tapers: 

2923 spans.append(taper.time_span()) 

2924 

2925 mins, maxs = list(zip(*spans)) 

2926 return none_min(mins), none_max(maxs) 

2927 

2928 

2929class GaussTaper(Taper): 

2930 ''' 

2931 Frequency domain Gaussian filter. 

2932 ''' 

2933 

2934 alpha = Float.T() 

2935 

2936 def __init__(self, alpha): 

2937 Taper.__init__(self, alpha=alpha) 

2938 self._alpha = alpha 

2939 

2940 def __call__(self, y, x0, dx): 

2941 f = x0 + num.arange(y.size)*dx 

2942 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2943 

2944 

2945cached_coefficients = {} 

2946 

2947 

2948def _get_cached_filter_coefs(order, corners, btype): 

2949 ck = (order, tuple(corners), btype) 

2950 if ck not in cached_coefficients: 

2951 if len(corners) == 0: 

2952 cached_coefficients[ck] = signal.butter( 

2953 order, corners[0], btype=btype) 

2954 else: 

2955 cached_coefficients[ck] = signal.butter( 

2956 order, corners, btype=btype) 

2957 

2958 return cached_coefficients[ck] 

2959 

2960 

2961class _globals(object): 

2962 _numpy_has_correlate_flip_bug = None 

2963 

2964 

2965def _default_key(tr): 

2966 return (tr.network, tr.station, tr.location, tr.channel) 

2967 

2968 

2969def numpy_has_correlate_flip_bug(): 

2970 ''' 

2971 Check if NumPy's correlate function reveals old behaviour. 

2972 ''' 

2973 

2974 if _globals._numpy_has_correlate_flip_bug is None: 

2975 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

2976 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

2977 ab = num.correlate(a, b, mode='same') 

2978 ba = num.correlate(b, a, mode='same') 

2979 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

2980 

2981 return _globals._numpy_has_correlate_flip_bug 

2982 

2983 

2984def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

2985 ''' 

2986 Call :py:func:`numpy.correlate` with fixes. 

2987 

2988 c[k] = sum_i a[i+k] * conj(b[i]) 

2989 

2990 Note that the result produced by newer numpy.correlate is always flipped 

2991 with respect to the formula given in its documentation (if ascending k 

2992 assumed for the output). 

2993 ''' 

2994 

2995 if use_fft: 

2996 if a.size < b.size: 

2997 c = signal.fftconvolve(b[::-1], a, mode=mode) 

2998 else: 

2999 c = signal.fftconvolve(a, b[::-1], mode=mode) 

3000 return c 

3001 

3002 else: 

3003 buggy = numpy_has_correlate_flip_bug() 

3004 

3005 a = num.asarray(a) 

3006 b = num.asarray(b) 

3007 

3008 if buggy: 

3009 b = num.conj(b) 

3010 

3011 c = num.correlate(a, b, mode=mode) 

3012 

3013 if buggy and a.size < b.size: 

3014 return c[::-1] 

3015 else: 

3016 return c 

3017 

3018 

3019def numpy_correlate_emulate(a, b, mode='valid'): 

3020 ''' 

3021 Slow version of :py:func:`numpy.correlate` for comparison. 

3022 ''' 

3023 

3024 a = num.asarray(a) 

3025 b = num.asarray(b) 

3026 kmin = -(b.size-1) 

3027 klen = a.size-kmin 

3028 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3029 kmin = int(kmin) 

3030 kmax = int(kmax) 

3031 klen = kmax - kmin + 1 

3032 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3033 for k in range(kmin, kmin+klen): 

3034 imin = max(0, -k) 

3035 ilen = min(b.size, a.size-k) - imin 

3036 c[k-kmin] = num.sum( 

3037 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3038 

3039 return c 

3040 

3041 

3042def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3043 ''' 

3044 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3045 ''' 

3046 

3047 a = num.asarray(a) 

3048 b = num.asarray(b) 

3049 

3050 kmin = -(b.size-1) 

3051 if mode == 'full': 

3052 klen = a.size-kmin 

3053 elif mode == 'same': 

3054 klen = max(a.size, b.size) 

3055 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3056 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3057 elif mode == 'valid': 

3058 klen = abs(a.size - b.size) + 1 

3059 kmin += min(a.size, b.size) - 1 

3060 

3061 return kmin, kmin + klen - 1 

3062 

3063 

3064def autocorr(x, nshifts): 

3065 ''' 

3066 Compute biased estimate of the first autocorrelation coefficients. 

3067 

3068 :param x: input array 

3069 :param nshifts: number of coefficients to calculate 

3070 ''' 

3071 

3072 mean = num.mean(x) 

3073 std = num.std(x) 

3074 n = x.size 

3075 xdm = x - mean 

3076 r = num.zeros(nshifts) 

3077 for k in range(nshifts): 

3078 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3079 

3080 return r 

3081 

3082 

3083def yulewalker(x, order): 

3084 ''' 

3085 Compute autoregression coefficients using Yule-Walker method. 

3086 

3087 :param x: input array 

3088 :param order: number of coefficients to produce 

3089 

3090 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3091 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3092 recursion which is normally used. 

3093 ''' 

3094 

3095 gamma = autocorr(x, order+1) 

3096 d = gamma[1:1+order] 

3097 a = num.zeros((order, order)) 

3098 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3099 for i in range(order): 

3100 ioff = order-i 

3101 a[i, :] = gamma2[ioff:ioff+order] 

3102 

3103 return num.dot(num.linalg.inv(a), -d) 

3104 

3105 

3106def moving_avg(x, n): 

3107 n = int(n) 

3108 cx = x.cumsum() 

3109 nn = len(x) 

3110 y = num.zeros(nn, dtype=cx.dtype) 

3111 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3112 y[:n//2] = y[n//2] 

3113 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3114 return y 

3115 

3116 

3117def moving_sum(x, n, mode='valid'): 

3118 n = int(n) 

3119 cx = x.cumsum() 

3120 nn = len(x) 

3121 

3122 if mode == 'valid': 

3123 if nn-n+1 <= 0: 

3124 return num.zeros(0, dtype=cx.dtype) 

3125 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3126 y[0] = cx[n-1] 

3127 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3128 

3129 if mode == 'full': 

3130 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3131 if n <= nn: 

3132 y[0:n] = cx[0:n] 

3133 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3134 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3135 else: 

3136 y[0:nn] = cx[0:nn] 

3137 y[nn:n] = cx[nn-1] 

3138 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3139 

3140 if mode == 'same': 

3141 n1 = (n-1)//2 

3142 y = num.zeros(nn, dtype=cx.dtype) 

3143 if n <= nn: 

3144 y[0:n-n1] = cx[n1:n] 

3145 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3146 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3147 else: 

3148 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3149 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3150 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3151 

3152 return y 

3153 

3154 

3155def nextpow2(i): 

3156 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3157 

3158 

3159def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3160 def snap(x): 

3161 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3162 return snap 

3163 

3164 

3165def snapper(nmax, delta, snapfun=math.ceil): 

3166 def snap(x): 

3167 return max(0, min(int(snapfun(x/delta)), nmax)) 

3168 return snap 

3169 

3170 

3171def apply_costaper(a, b, c, d, y, x0, dx): 

3172 hi = snapper_w_offset(y.size, x0, dx) 

3173 y[:hi(a)] = 0. 

3174 y[hi(a):hi(b)] *= 0.5 \ 

3175 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3176 y[hi(c):hi(d)] *= 0.5 \ 

3177 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3178 y[hi(d):] = 0. 

3179 

3180 

3181def span_costaper(a, b, c, d, y, x0, dx): 

3182 hi = snapper_w_offset(y.size, x0, dx) 

3183 return hi(a), hi(d) - hi(a) 

3184 

3185 

3186def costaper(a, b, c, d, nfreqs, deltaf): 

3187 hi = snapper(nfreqs, deltaf) 

3188 tap = num.zeros(nfreqs) 

3189 tap[hi(a):hi(b)] = 0.5 \ 

3190 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3191 tap[hi(b):hi(c)] = 1. 

3192 tap[hi(c):hi(d)] = 0.5 \ 

3193 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3194 

3195 return tap 

3196 

3197 

3198def t2ind(t, tdelta, snap=round): 

3199 return int(snap(t/tdelta)) 

3200 

3201 

3202def hilbert(x, N=None): 

3203 ''' 

3204 Return the hilbert transform of x of length N. 

3205 

3206 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3207 ''' 

3208 

3209 x = num.asarray(x) 

3210 if N is None: 

3211 N = len(x) 

3212 if N <= 0: 

3213 raise ValueError("N must be positive.") 

3214 if num.iscomplexobj(x): 

3215 logger.warning('imaginary part of x ignored.') 

3216 x = num.real(x) 

3217 

3218 Xf = num.fft.fft(x, N, axis=0) 

3219 h = num.zeros(N) 

3220 if N % 2 == 0: 

3221 h[0] = h[N//2] = 1 

3222 h[1:N//2] = 2 

3223 else: 

3224 h[0] = 1 

3225 h[1:(N+1)//2] = 2 

3226 

3227 if len(x.shape) > 1: 

3228 h = h[:, num.newaxis] 

3229 x = num.fft.ifft(Xf*h) 

3230 return x 

3231 

3232 

3233def near(a, b, eps): 

3234 return abs(a-b) < eps 

3235 

3236 

3237def coroutine(func): 

3238 def wrapper(*args, **kwargs): 

3239 gen = func(*args, **kwargs) 

3240 next(gen) 

3241 return gen 

3242 

3243 wrapper.__name__ = func.__name__ 

3244 wrapper.__dict__ = func.__dict__ 

3245 wrapper.__doc__ = func.__doc__ 

3246 return wrapper 

3247 

3248 

3249class States(object): 

3250 ''' 

3251 Utility to store channel-specific state in coroutines. 

3252 ''' 

3253 

3254 def __init__(self): 

3255 self._states = {} 

3256 

3257 def get(self, tr): 

3258 k = tr.nslc_id 

3259 if k in self._states: 

3260 tmin, deltat, dtype, value = self._states[k] 

3261 if (near(tmin, tr.tmin, deltat/100.) 

3262 and near(deltat, tr.deltat, deltat/10000.) 

3263 and dtype == tr.ydata.dtype): 

3264 

3265 return value 

3266 

3267 return None 

3268 

3269 def set(self, tr, value): 

3270 k = tr.nslc_id 

3271 if k in self._states and self._states[k][-1] is not value: 

3272 self.free(self._states[k][-1]) 

3273 

3274 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3275 

3276 def free(self, value): 

3277 pass 

3278 

3279 

3280@coroutine 

3281def co_list_append(list): 

3282 while True: 

3283 list.append((yield)) 

3284 

3285 

3286class ScipyBug(Exception): 

3287 pass 

3288 

3289 

3290@coroutine 

3291def co_lfilter(target, b, a): 

3292 ''' 

3293 Successively filter broken continuous trace data (coroutine). 

3294 

3295 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3296 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3297 objects containing the filtered data to target. This is useful, if one 

3298 wants to filter a long continuous time series, which is split into many 

3299 successive traces without producing filter artifacts at trace boundaries. 

3300 

3301 Filter states are kept *per channel*, specifically, for each (network, 

3302 station, location, channel) combination occuring in the input traces, a 

3303 separate state is created and maintained. This makes it possible to filter 

3304 multichannel or multistation data with only one :py:func:`co_lfilter` 

3305 instance. 

3306 

3307 Filter state is reset, when gaps occur. 

3308 

3309 Use it like this:: 

3310 

3311 from pyrocko.trace import co_lfilter, co_list_append 

3312 

3313 filtered_traces = [] 

3314 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3315 for trace in traces: 

3316 pipe.send(trace) 

3317 

3318 pipe.close() 

3319 

3320 ''' 

3321 

3322 try: 

3323 states = States() 

3324 output = None 

3325 while True: 

3326 input = (yield) 

3327 

3328 zi = states.get(input) 

3329 if zi is None: 

3330 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3331 

3332 output = input.copy(data=False) 

3333 try: 

3334 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3335 except ValueError: 

3336 raise ScipyBug( 

3337 'signal.lfilter failed: could be related to a bug ' 

3338 'in some older scipy versions, e.g. on opensuse42.1') 

3339 

3340 output.set_ydata(ydata) 

3341 states.set(input, zf) 

3342 target.send(output) 

3343 

3344 except GeneratorExit: 

3345 target.close() 

3346 

3347 

3348def co_antialias(target, q, n=None, ftype='fir'): 

3349 b, a, n = util.decimate_coeffs(q, n, ftype) 

3350 anti = co_lfilter(target, b, a) 

3351 return anti 

3352 

3353 

3354@coroutine 

3355def co_dropsamples(target, q, nfir): 

3356 try: 

3357 states = States() 

3358 while True: 

3359 tr = (yield) 

3360 newdeltat = q * tr.deltat 

3361 ioffset = states.get(tr) 

3362 if ioffset is None: 

3363 # for fir filter, the first nfir samples are pulluted by 

3364 # boundary effects; cut it off. 

3365 # for iir this may be (much) more, we do not correct for that. 

3366 # put sample instances to a time which is a multiple of the 

3367 # new sampling interval. 

3368 newtmin_want = math.ceil( 

3369 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3370 - (nfir/2*tr.deltat) 

3371 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3372 if ioffset < 0: 

3373 ioffset = ioffset % q 

3374 

3375 newtmin_have = tr.tmin + ioffset * tr.deltat 

3376 newtr = tr.copy(data=False) 

3377 newtr.deltat = newdeltat 

3378 # because the fir kernel shifts data by nfir/2 samples: 

3379 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3380 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3381 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3382 target.send(newtr) 

3383 

3384 except GeneratorExit: 

3385 target.close() 

3386 

3387 

3388def co_downsample(target, q, n=None, ftype='fir'): 

3389 ''' 

3390 Successively downsample broken continuous trace data (coroutine). 

3391 

3392 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3393 data and sends new :py:class:`Trace` objects containing the downsampled 

3394 data to target. This is useful, if one wants to downsample a long 

3395 continuous time series, which is split into many successive traces without 

3396 producing filter artifacts and gaps at trace boundaries. 

3397 

3398 Filter states are kept *per channel*, specifically, for each (network, 

3399 station, location, channel) combination occuring in the input traces, a 

3400 separate state is created and maintained. This makes it possible to filter 

3401 multichannel or multistation data with only one :py:func:`co_lfilter` 

3402 instance. 

3403 

3404 Filter state is reset, when gaps occur. The sampling instances are choosen 

3405 so that they occur at (or as close as possible) to even multiples of the 

3406 sampling interval of the downsampled trace (based on system time). 

3407 ''' 

3408 

3409 b, a, n = util.decimate_coeffs(q, n, ftype) 

3410 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3411 

3412 

3413@coroutine 

3414def co_downsample_to(target, deltat): 

3415 

3416 decimators = {} 

3417 try: 

3418 while True: 

3419 tr = (yield) 

3420 ratio = deltat / tr.deltat 

3421 rratio = round(ratio) 

3422 if abs(rratio - ratio)/ratio > 0.0001: 

3423 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3424 

3425 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3426 if deci_seq not in decimators: 

3427 pipe = target 

3428 for q in deci_seq[::-1]: 

3429 pipe = co_downsample(pipe, q) 

3430 

3431 decimators[deci_seq] = pipe 

3432 

3433 decimators[deci_seq].send(tr) 

3434 

3435 except GeneratorExit: 

3436 for g in decimators.values(): 

3437 g.close() 

3438 

3439 

3440class DomainChoice(StringChoice): 

3441 choices = [ 

3442 'time_domain', 

3443 'frequency_domain', 

3444 'envelope', 

3445 'absolute', 

3446 'cc_max_norm'] 

3447 

3448 

3449class MisfitSetup(Object): 

3450 ''' 

3451 Contains misfit setup to be used in :py:func:`trace.misfit` 

3452 

3453 :param description: Description of the setup 

3454 :param norm: L-norm classifier 

3455 :param taper: Object of :py:class:`Taper` 

3456 :param filter: Object of :py:class:`FrequencyResponse` 

3457 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3458 'cc_max_norm'] 

3459 

3460 Can be dumped to a yaml file. 

3461 ''' 

3462 

3463 xmltagname = 'misfitsetup' 

3464 description = String.T(optional=True) 

3465 norm = Int.T(optional=False) 

3466 taper = Taper.T(optional=False) 

3467 filter = FrequencyResponse.T(optional=True) 

3468 domain = DomainChoice.T(default='time_domain') 

3469 

3470 

3471def equalize_sampling_rates(trace_1, trace_2): 

3472 ''' 

3473 Equalize sampling rates of two traces (reduce higher sampling rate to 

3474 lower). 

3475 

3476 :param trace_1: :py:class:`Trace` object 

3477 :param trace_2: :py:class:`Trace` object 

3478 

3479 Returns a copy of the resampled trace if resampling is needed. 

3480 ''' 

3481 

3482 if same_sampling_rate(trace_1, trace_2): 

3483 return trace_1, trace_2 

3484 

3485 if trace_1.deltat < trace_2.deltat: 

3486 t1_out = trace_1.copy() 

3487 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3488 logger.debug('Trace downsampled (return copy of trace): %s' 

3489 % '.'.join(t1_out.nslc_id)) 

3490 return t1_out, trace_2 

3491 

3492 elif trace_1.deltat > trace_2.deltat: 

3493 t2_out = trace_2.copy() 

3494 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3495 logger.debug('Trace downsampled (return copy of trace): %s' 

3496 % '.'.join(t2_out.nslc_id)) 

3497 return trace_1, t2_out 

3498 

3499 

3500def Lx_norm(u, v, norm=2): 

3501 ''' 

3502 Calculate the misfit denominator *m* and the normalization devisor *n* 

3503 according to norm. 

3504 

3505 The normalization divisor *n* is calculated from ``v``. 

3506 

3507 :param u: :py:class:`numpy.array` 

3508 :param v: :py:class:`numpy.array` 

3509 :param norm: (default = 2) 

3510 

3511 ``u`` and ``v`` must be of same size. 

3512 ''' 

3513 

3514 if norm == 1: 

3515 return ( 

3516 num.sum(num.abs(v-u)), 

3517 num.sum(num.abs(v))) 

3518 

3519 elif norm == 2: 

3520 return ( 

3521 num.sqrt(num.sum((v-u)**2)), 

3522 num.sqrt(num.sum(v**2))) 

3523 

3524 else: 

3525 return ( 

3526 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3527 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3528 

3529 

3530def do_downsample(tr, deltat): 

3531 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3532 tr = tr.copy() 

3533 tr.downsample_to(deltat, snap=True, demean=False) 

3534 else: 

3535 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3536 tr = tr.copy() 

3537 tr.snap() 

3538 return tr 

3539 

3540 

3541def do_extend(tr, tmin, tmax): 

3542 if tmin < tr.tmin or tmax > tr.tmax: 

3543 tr = tr.copy() 

3544 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3545 

3546 return tr 

3547 

3548 

3549def do_pre_taper(tr, taper): 

3550 return tr.taper(taper, inplace=False, chop=True) 

3551 

3552 

3553def do_fft(tr, filter): 

3554 if filter is None: 

3555 return tr 

3556 else: 

3557 ndata = tr.ydata.size 

3558 nfft = nextpow2(ndata) 

3559 padded = num.zeros(nfft, dtype=float) 

3560 padded[:ndata] = tr.ydata 

3561 spectrum = num.fft.rfft(padded) 

3562 df = 1.0 / (tr.deltat * nfft) 

3563 frequencies = num.arange(spectrum.size)*df 

3564 return [tr, frequencies, spectrum] 

3565 

3566 

3567def do_filter(inp, filter): 

3568 if filter is None: 

3569 return inp 

3570 else: 

3571 tr, frequencies, spectrum = inp 

3572 spectrum *= filter.evaluate(frequencies) 

3573 return [tr, frequencies, spectrum] 

3574 

3575 

3576def do_ifft(inp): 

3577 if isinstance(inp, Trace): 

3578 return inp 

3579 else: 

3580 tr, _, spectrum = inp 

3581 ndata = tr.ydata.size 

3582 tr = tr.copy(data=False) 

3583 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3584 return tr 

3585 

3586 

3587def check_alignment(t1, t2): 

3588 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3589 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3590 t1.ydata.shape != t2.ydata.shape: 

3591 raise MisalignedTraces( 

3592 'Cannot calculate misfit of %s and %s due to misaligned ' 

3593 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))