1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6This module provides basic signal processing for seismic traces. 

7''' 

8 

9import time 

10import math 

11import copy 

12import logging 

13import hashlib 

14from collections import defaultdict 

15 

16import numpy as num 

17from scipy import signal 

18 

19from pyrocko import util, orthodrome, pchain, model 

20from pyrocko.util import reuse 

21from pyrocko.guts import Object, Float, Int, String, List, \ 

22 StringChoice, Timestamp 

23from pyrocko.guts_array import Array 

24from pyrocko.model import squirrel_content 

25 

26# backward compatibility 

27from pyrocko.util import UnavailableDecimation # noqa 

28from pyrocko.response import ( # noqa 

29 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

30 ButterworthResponse, SampledResponse, IntegrationResponse, 

31 DifferentiationResponse, MultiplyResponse) 

32 

33 

34guts_prefix = 'pf' 

35 

36logger = logging.getLogger('pyrocko.trace') 

37 

38 

39@squirrel_content 

40class Trace(Object): 

41 

42 ''' 

43 Create new trace object. 

44 

45 A ``Trace`` object represents a single continuous strip of evenly sampled 

46 time series data. It is built from a 1D NumPy array containing the data 

47 samples and some attributes describing its beginning and ending time, its 

48 sampling rate and four string identifiers (its network, station, location 

49 and channel code). 

50 

51 :param network: network code 

52 :param station: station code 

53 :param location: location code 

54 :param channel: channel code 

55 :param extra: extra code 

56 :param tmin: system time of first sample in [s] 

57 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

58 computed from length of ``ydata``) 

59 :param deltat: sampling interval in [s] 

60 :param ydata: 1D numpy array with data samples (can be ``None`` when 

61 ``tmax`` is not ``None``) 

62 :param mtime: optional modification time 

63 

64 :param meta: additional meta information (not used, but maintained by the 

65 library) 

66 

67 The length of the network, station, location and channel codes is not 

68 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

69 have different limits on the lengths of these codes. The codes set here are 

70 silently truncated when the trace is stored 

71 ''' 

72 

73 network = String.T(default='', help='Deployment/network code (1-8)') 

74 station = String.T(default='STA', help='Station code (1-5)') 

75 location = String.T(default='', help='Location code (0-2)') 

76 channel = String.T(default='', help='Channel code (3)') 

77 extra = String.T(default='', help='Extra/custom code') 

78 

79 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

80 tmax = Timestamp.T() 

81 

82 deltat = Float.T(default=1.0) 

83 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

84 

85 mtime = Timestamp.T(optional=True) 

86 

87 cached_frequencies = {} 

88 

89 def __init__(self, network='', station='STA', location='', channel='', 

90 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

91 meta=None, extra=''): 

92 

93 Object.__init__(self, init_props=False) 

94 

95 time_float = util.get_time_float() 

96 

97 if not isinstance(tmin, time_float): 

98 tmin = Trace.tmin.regularize_extra(tmin) 

99 

100 if tmax is not None and not isinstance(tmax, time_float): 

101 tmax = Trace.tmax.regularize_extra(tmax) 

102 

103 if mtime is not None and not isinstance(mtime, time_float): 

104 mtime = Trace.mtime.regularize_extra(mtime) 

105 

106 if ydata is not None and not isinstance(ydata, num.ndarray): 

107 ydata = Trace.ydata.regularize_extra(ydata) 

108 

109 self._growbuffer = None 

110 

111 tmin = time_float(tmin) 

112 if tmax is not None: 

113 tmax = time_float(tmax) 

114 

115 if mtime is None: 

116 mtime = time_float(time.time()) 

117 

118 self.network, self.station, self.location, self.channel, \ 

119 self.extra = [ 

120 reuse(x) for x in ( 

121 network, station, location, channel, extra)] 

122 

123 self.tmin = tmin 

124 self.deltat = deltat 

125 

126 if tmax is None: 

127 if ydata is not None: 

128 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

129 else: 

130 raise Exception( 

131 'fixme: trace must be created with tmax or ydata') 

132 else: 

133 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

134 self.tmax = self.tmin + (n - 1) * self.deltat 

135 

136 self.meta = meta 

137 self.ydata = ydata 

138 self.mtime = mtime 

139 self._update_ids() 

140 self.file = None 

141 self._pchain = None 

142 

143 def __str__(self): 

144 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

145 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

146 s += ' timerange: %s - %s\n' % ( 

147 util.time_to_str(self.tmin, format=fmt), 

148 util.time_to_str(self.tmax, format=fmt)) 

149 

150 s += ' delta t: %g\n' % self.deltat 

151 if self.meta: 

152 for k in sorted(self.meta.keys()): 

153 s += ' %s: %s\n' % (k, self.meta[k]) 

154 return s 

155 

156 @property 

157 def codes(self): 

158 from pyrocko.squirrel import model 

159 return model.CodesNSLCE( 

160 self.network, self.station, self.location, self.channel, 

161 self.extra) 

162 

163 @property 

164 def time_span(self): 

165 return self.tmin, self.tmax 

166 

167 @property 

168 def summary(self): 

169 return '%s %-16s %s %g' % ( 

170 self.__class__.__name__, self.str_codes, self.str_time_span, 

171 self.deltat) 

172 

173 def __getstate__(self): 

174 return (self.network, self.station, self.location, self.channel, 

175 self.tmin, self.tmax, self.deltat, self.mtime, 

176 self.ydata, self.meta, self.extra) 

177 

178 def __setstate__(self, state): 

179 if len(state) == 11: 

180 self.network, self.station, self.location, self.channel, \ 

181 self.tmin, self.tmax, self.deltat, self.mtime, \ 

182 self.ydata, self.meta, self.extra = state 

183 

184 elif len(state) == 12: 

185 # backward compatibility 0 

186 self.network, self.station, self.location, self.channel, \ 

187 self.tmin, self.tmax, self.deltat, self.mtime, \ 

188 self.ydata, self.meta, _, self.extra = state 

189 

190 elif len(state) == 10: 

191 # backward compatibility 1 

192 self.network, self.station, self.location, self.channel, \ 

193 self.tmin, self.tmax, self.deltat, self.mtime, \ 

194 self.ydata, self.meta = state 

195 

196 self.extra = '' 

197 

198 else: 

199 # backward compatibility 2 

200 self.network, self.station, self.location, self.channel, \ 

201 self.tmin, self.tmax, self.deltat, self.mtime = state 

202 self.ydata = None 

203 self.meta = None 

204 self.extra = '' 

205 

206 self._growbuffer = None 

207 self._update_ids() 

208 

209 def hash(self, unsafe=False): 

210 sha1 = hashlib.sha1() 

211 for code in self.nslc_id: 

212 sha1.update(code.encode()) 

213 sha1.update(self.tmin.hex().encode()) 

214 sha1.update(self.tmax.hex().encode()) 

215 sha1.update(self.deltat.hex().encode()) 

216 

217 if self.ydata is not None: 

218 if not unsafe: 

219 sha1.update(memoryview(self.ydata)) 

220 else: 

221 sha1.update(memoryview(self.ydata[:32])) 

222 sha1.update(memoryview(self.ydata[-32:])) 

223 

224 return sha1.hexdigest() 

225 

226 def name(self): 

227 ''' 

228 Get a short string description. 

229 ''' 

230 

231 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

232 util.time_to_str(self.tmin), 

233 util.time_to_str(self.tmax))) 

234 

235 return s 

236 

237 def __eq__(self, other): 

238 return ( 

239 isinstance(other, Trace) 

240 and self.network == other.network 

241 and self.station == other.station 

242 and self.location == other.location 

243 and self.channel == other.channel 

244 and (abs(self.deltat - other.deltat) 

245 < (self.deltat + other.deltat)*1e-6) 

246 and abs(self.tmin-other.tmin) < self.deltat*0.01 

247 and abs(self.tmax-other.tmax) < self.deltat*0.01 

248 and num.all(self.ydata == other.ydata)) 

249 

250 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

251 return ( 

252 self.network == other.network 

253 and self.station == other.station 

254 and self.location == other.location 

255 and self.channel == other.channel 

256 and (abs(self.deltat - other.deltat) 

257 < (self.deltat + other.deltat)*1e-6) 

258 and abs(self.tmin-other.tmin) < self.deltat*0.01 

259 and abs(self.tmax-other.tmax) < self.deltat*0.01 

260 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

261 

262 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

263 

264 assert self.network == other.network, \ 

265 'network codes differ: %s, %s' % (self.network, other.network) 

266 assert self.station == other.station, \ 

267 'station codes differ: %s, %s' % (self.station, other.station) 

268 assert self.location == other.location, \ 

269 'location codes differ: %s, %s' % (self.location, other.location) 

270 assert self.channel == other.channel, 'channel codes differ' 

271 assert (abs(self.deltat - other.deltat) 

272 < (self.deltat + other.deltat)*1e-6), \ 

273 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

274 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

275 'start times differ: %s, %s' % ( 

276 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

277 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

278 'end times differ: %s, %s' % ( 

279 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

280 

281 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

282 'trace values differ' 

283 

284 def __hash__(self): 

285 return id(self) 

286 

287 def __call__(self, t, clip=False, snap=round): 

288 it = int(snap((t-self.tmin)/self.deltat)) 

289 if clip: 

290 it = max(0, min(it, self.ydata.size-1)) 

291 else: 

292 if it < 0 or self.ydata.size <= it: 

293 raise IndexError() 

294 

295 return self.tmin+it*self.deltat, self.ydata[it] 

296 

297 def interpolate(self, t, clip=False): 

298 ''' 

299 Value of trace between supporting points through linear interpolation. 

300 

301 :param t: time instant 

302 :param clip: whether to clip indices to trace ends 

303 ''' 

304 

305 t0, y0 = self(t, clip=clip, snap=math.floor) 

306 t1, y1 = self(t, clip=clip, snap=math.ceil) 

307 if t0 == t1: 

308 return y0 

309 else: 

310 return y0+(t-t0)/(t1-t0)*(y1-y0) 

311 

312 def index_clip(self, i): 

313 ''' 

314 Clip index to valid range. 

315 ''' 

316 

317 return min(max(0, i), self.ydata.size) 

318 

319 def add(self, other, interpolate=True, left=0., right=0.): 

320 ''' 

321 Add values of other trace (self += other). 

322 

323 Add values of ``other`` trace to the values of ``self``, where it 

324 intersects with ``other``. This method does not change the extent of 

325 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

326 ``other`` to be added are interpolated at sampling instants of 

327 ``self``. Linear interpolation is performed. In this case the sampling 

328 rate of ``other`` must be equal to or lower than that of ``self``. If 

329 ``interpolate`` is ``False``, the sampling rates of the two traces must 

330 match. 

331 ''' 

332 

333 if interpolate: 

334 assert self.deltat <= other.deltat \ 

335 or same_sampling_rate(self, other) 

336 

337 other_xdata = other.get_xdata() 

338 xdata = self.get_xdata() 

339 self.ydata += num.interp( 

340 xdata, other_xdata, other.ydata, left=left, right=left) 

341 else: 

342 assert self.deltat == other.deltat 

343 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

344 ibeg = max(0, ioff) 

345 iend = min(self.data_len(), ioff+other.data_len()) 

346 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

347 

348 def mult(self, other, interpolate=True): 

349 ''' 

350 Muliply with values of other trace ``(self *= other)``. 

351 

352 Multiply values of ``other`` trace to the values of ``self``, where it 

353 intersects with ``other``. This method does not change the extent of 

354 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

355 ``other`` to be multiplied are interpolated at sampling instants of 

356 ``self``. Linear interpolation is performed. In this case the sampling 

357 rate of ``other`` must be equal to or lower than that of ``self``. If 

358 ``interpolate`` is ``False``, the sampling rates of the two traces must 

359 match. 

360 ''' 

361 

362 if interpolate: 

363 assert self.deltat <= other.deltat or \ 

364 same_sampling_rate(self, other) 

365 

366 other_xdata = other.get_xdata() 

367 xdata = self.get_xdata() 

368 self.ydata *= num.interp( 

369 xdata, other_xdata, other.ydata, left=0., right=0.) 

370 else: 

371 assert self.deltat == other.deltat 

372 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

373 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

374 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

375 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

376 

377 ibeg1 = self.index_clip(ibeg1) 

378 iend1 = self.index_clip(iend1) 

379 ibeg2 = self.index_clip(ibeg2) 

380 iend2 = self.index_clip(iend2) 

381 

382 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

383 

384 def max(self): 

385 ''' 

386 Get time and value of data maximum. 

387 ''' 

388 

389 i = num.argmax(self.ydata) 

390 return self.tmin + i*self.deltat, self.ydata[i] 

391 

392 def min(self): 

393 ''' 

394 Get time and value of data minimum. 

395 ''' 

396 

397 i = num.argmin(self.ydata) 

398 return self.tmin + i*self.deltat, self.ydata[i] 

399 

400 def absmax(self): 

401 ''' 

402 Get time and value of maximum of the absolute of data. 

403 ''' 

404 

405 tmi, mi = self.min() 

406 tma, ma = self.max() 

407 if abs(mi) > abs(ma): 

408 return tmi, abs(mi) 

409 else: 

410 return tma, abs(ma) 

411 

412 def set_codes( 

413 self, network=None, station=None, location=None, channel=None, 

414 extra=None): 

415 

416 ''' 

417 Set network, station, location, and channel codes. 

418 ''' 

419 

420 if network is not None: 

421 self.network = network 

422 if station is not None: 

423 self.station = station 

424 if location is not None: 

425 self.location = location 

426 if channel is not None: 

427 self.channel = channel 

428 if extra is not None: 

429 self.extra = extra 

430 

431 self._update_ids() 

432 

433 def set_network(self, network): 

434 self.network = network 

435 self._update_ids() 

436 

437 def set_station(self, station): 

438 self.station = station 

439 self._update_ids() 

440 

441 def set_location(self, location): 

442 self.location = location 

443 self._update_ids() 

444 

445 def set_channel(self, channel): 

446 self.channel = channel 

447 self._update_ids() 

448 

449 def overlaps(self, tmin, tmax): 

450 ''' 

451 Check if trace has overlap with a given time span. 

452 ''' 

453 return not (tmax < self.tmin or self.tmax < tmin) 

454 

455 def is_relevant(self, tmin, tmax, selector=None): 

456 ''' 

457 Check if trace has overlap with a given time span and matches a 

458 condition callback. (internal use) 

459 ''' 

460 

461 return not (tmax <= self.tmin or self.tmax < tmin) \ 

462 and (selector is None or selector(self)) 

463 

464 def _update_ids(self): 

465 ''' 

466 Update dependent ids. 

467 ''' 

468 

469 self.full_id = ( 

470 self.network, self.station, self.location, self.channel, self.tmin) 

471 self.nslc_id = reuse( 

472 (self.network, self.station, self.location, self.channel)) 

473 

474 def prune_from_reuse_cache(self): 

475 util.deuse(self.nslc_id) 

476 util.deuse(self.network) 

477 util.deuse(self.station) 

478 util.deuse(self.location) 

479 util.deuse(self.channel) 

480 

481 def set_mtime(self, mtime): 

482 ''' 

483 Set modification time of the trace. 

484 ''' 

485 

486 self.mtime = mtime 

487 

488 def get_xdata(self): 

489 ''' 

490 Create array for time axis. 

491 ''' 

492 

493 if self.ydata is None: 

494 raise NoData() 

495 

496 return self.tmin \ 

497 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

498 

499 def get_ydata(self): 

500 ''' 

501 Get data array. 

502 ''' 

503 

504 if self.ydata is None: 

505 raise NoData() 

506 

507 return self.ydata 

508 

509 def set_ydata(self, new_ydata): 

510 ''' 

511 Replace data array. 

512 ''' 

513 

514 self.drop_growbuffer() 

515 self.ydata = new_ydata 

516 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

517 

518 def data_len(self): 

519 if self.ydata is not None: 

520 return self.ydata.size 

521 else: 

522 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

523 

524 def drop_data(self): 

525 ''' 

526 Forget data, make dataless trace. 

527 ''' 

528 

529 self.drop_growbuffer() 

530 self.ydata = None 

531 

532 def drop_growbuffer(self): 

533 ''' 

534 Detach the traces grow buffer. 

535 ''' 

536 

537 self._growbuffer = None 

538 self._pchain = None 

539 

540 def copy(self, data=True): 

541 ''' 

542 Make a deep copy of the trace. 

543 ''' 

544 

545 tracecopy = copy.copy(self) 

546 tracecopy.drop_growbuffer() 

547 if data: 

548 tracecopy.ydata = self.ydata.copy() 

549 tracecopy.meta = copy.deepcopy(self.meta) 

550 return tracecopy 

551 

552 def crop_zeros(self): 

553 ''' 

554 Remove any zeros at beginning and end. 

555 ''' 

556 

557 indices = num.where(self.ydata != 0.0)[0] 

558 if indices.size == 0: 

559 raise NoData() 

560 

561 ibeg = indices[0] 

562 iend = indices[-1]+1 

563 if ibeg == 0 and iend == self.ydata.size-1: 

564 return 

565 

566 self.drop_growbuffer() 

567 self.ydata = self.ydata[ibeg:iend].copy() 

568 self.tmin = self.tmin+ibeg*self.deltat 

569 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

570 self._update_ids() 

571 

572 def append(self, data): 

573 ''' 

574 Append data to the end of the trace. 

575 

576 To make this method efficient when successively very few or even single 

577 samples are appended, a larger grow buffer is allocated upon first 

578 invocation. The traces data is then changed to be a view into the 

579 currently filled portion of the grow buffer array. 

580 ''' 

581 

582 assert self.ydata.dtype == data.dtype 

583 newlen = data.size + self.ydata.size 

584 if self._growbuffer is None or self._growbuffer.size < newlen: 

585 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

586 self._growbuffer[:self.ydata.size] = self.ydata 

587 self._growbuffer[self.ydata.size:newlen] = data 

588 self.ydata = self._growbuffer[:newlen] 

589 self.tmax = self.tmin + (newlen-1)*self.deltat 

590 

591 def chop( 

592 self, tmin, tmax, inplace=True, include_last=False, 

593 snap=(round, round), want_incomplete=True): 

594 

595 ''' 

596 Cut the trace to given time span. 

597 

598 If the ``inplace`` argument is True (the default) the trace is cut in 

599 place, otherwise a new trace with the cut part is returned. By 

600 default, the indices where to start and end the trace data array are 

601 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

602 using Python's :py:func:`round` function. This behaviour can be changed 

603 with the ``snap`` argument, which takes a tuple of two functions (one 

604 for the lower and one for the upper end) to be used instead of 

605 :py:func:`round`. The last sample is by default not included unless 

606 ``include_last`` is set to True. If the given time span exceeds the 

607 available time span of the trace, the available part is returned, 

608 unless ``want_incomplete`` is set to False - in that case, a 

609 :py:exc:`NoData` exception is raised. This exception is always raised, 

610 when the requested time span does dot overlap with the trace's time 

611 span. 

612 ''' 

613 

614 if want_incomplete: 

615 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

616 raise NoData() 

617 else: 

618 if tmin < self.tmin or self.tmax < tmax: 

619 raise NoData() 

620 

621 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

622 iplus = 0 

623 if include_last: 

624 iplus = 1 

625 

626 iend = min( 

627 self.data_len(), 

628 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

629 

630 if ibeg >= iend: 

631 raise NoData() 

632 

633 obj = self 

634 if not inplace: 

635 obj = self.copy(data=False) 

636 

637 self.drop_growbuffer() 

638 if self.ydata is not None: 

639 obj.ydata = self.ydata[ibeg:iend].copy() 

640 else: 

641 obj.ydata = None 

642 

643 obj.tmin = obj.tmin+ibeg*obj.deltat 

644 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

645 

646 obj._update_ids() 

647 

648 return obj 

649 

650 def downsample(self, ndecimate, snap=False, initials=None, demean=False, 

651 ftype='fir-remez'): 

652 ''' 

653 Downsample trace by a given integer factor. 

654 

655 Antialiasing filter details: 

656 

657 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum 

658 ripple of 0.05 dB in the passband. 

659 * ``fir``: A FIR filter using a Hamming window and 31 taps and a 

660 well-behaved phase response. 

661 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm 

662 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency. 

663 

664 Comparison of the digital filters: 

665 

666 .. figure :: ../../static/downsampling-filter-comparison.png 

667 :width: 60% 

668 :alt: Comparison of the downsampling filters. 

669 

670 :param ndecimate: decimation factor, avoid values larger than 8 

671 :param snap: whether to put the new sampling instances closest to 

672 multiples of the sampling rate. 

673 :param initials: ``None``, ``True``, or initial conditions for the 

674 anti-aliasing filter, obtained from a previous run. In the latter 

675 two cases the final state of the filter is returned instead of 

676 ``None``. 

677 :param demean: whether to demean the signal before filtering. 

678 :param ftype: which FIR filter to use, choose from 

679 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

680 ''' 

681 newdeltat = self.deltat*ndecimate 

682 if snap: 

683 ilag = int(round( 

684 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

685 / self.deltat)) 

686 else: 

687 ilag = 0 

688 

689 if snap and ilag > 0 and ilag < self.ydata.size: 

690 data = self.ydata.astype(num.float64) 

691 self.tmin += ilag*self.deltat 

692 else: 

693 data = self.ydata.astype(num.float64) 

694 

695 if demean: 

696 data -= num.mean(data) 

697 

698 if data.size != 0: 

699 result = util.decimate( 

700 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag) 

701 else: 

702 result = data 

703 

704 if initials is None: 

705 self.ydata, finals = result, None 

706 else: 

707 self.ydata, finals = result 

708 

709 self.deltat = reuse(self.deltat*ndecimate) 

710 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

711 self._update_ids() 

712 

713 return finals 

714 

715 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

716 initials=None, demean=False, ftype='fir-remez'): 

717 

718 ''' 

719 Downsample to given sampling rate. 

720 

721 Tries to downsample the trace to a target sampling interval of 

722 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

723 times. If ``allow_upsample_max`` is set to a value larger than 1, 

724 intermediate upsampling steps are allowed, in order to increase the 

725 number of possible downsampling ratios. 

726 

727 If the requested ratio is not supported, an exception of type 

728 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

729 

730 :param deltat: desired sampling interval in [s] 

731 :param allow_upsample_max: maximum upsampling of the signal to achieve 

732 the desired deltat. Default is ``1``. 

733 :param snap: whether to put the new sampling instances closest to 

734 multiples of the sampling rate. 

735 :param initials: ``None``, ``True``, or initial conditions for the 

736 anti-aliasing filter, obtained from a previous run. In the latter 

737 two cases the final state of the filter is returned instead of 

738 ``None``. 

739 :param demean: whether to demean the signal before filtering. 

740 :param ftype: which FIR filter to use, choose from 

741 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``. 

742 See also: :meth:`Trace.downample` 

743 ''' 

744 

745 ratio = deltat/self.deltat 

746 rratio = round(ratio) 

747 

748 ok = False 

749 for upsratio in range(1, allow_upsample_max+1): 

750 dratio = (upsratio/self.deltat) / (1./deltat) 

751 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

752 util.decitab(int(round(dratio))): 

753 

754 ok = True 

755 break 

756 

757 if not ok: 

758 raise util.UnavailableDecimation('ratio = %g' % ratio) 

759 

760 if upsratio > 1: 

761 self.drop_growbuffer() 

762 ydata = self.ydata 

763 self.ydata = num.zeros( 

764 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

765 self.ydata[::upsratio] = ydata 

766 for i in range(1, upsratio): 

767 self.ydata[i::upsratio] = \ 

768 float(i)/upsratio * ydata[:-1] \ 

769 + float(upsratio-i)/upsratio * ydata[1:] 

770 self.deltat = self.deltat/upsratio 

771 

772 ratio = deltat/self.deltat 

773 rratio = round(ratio) 

774 

775 deci_seq = util.decitab(int(rratio)) 

776 finals = [] 

777 for i, ndecimate in enumerate(deci_seq): 

778 if ndecimate != 1: 

779 xinitials = None 

780 if initials is not None: 

781 xinitials = initials[i] 

782 finals.append(self.downsample( 

783 ndecimate, snap=snap, initials=xinitials, demean=demean, 

784 ftype=ftype)) 

785 

786 if initials is not None: 

787 return finals 

788 

789 def resample(self, deltat): 

790 ''' 

791 Resample to given sampling rate ``deltat``. 

792 

793 Resampling is performed in the frequency domain. 

794 ''' 

795 

796 ndata = self.ydata.size 

797 ntrans = nextpow2(ndata) 

798 fntrans2 = ntrans * self.deltat/deltat 

799 ntrans2 = int(round(fntrans2)) 

800 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

801 ndata2 = int(round(ndata*self.deltat/deltat2)) 

802 if abs(fntrans2 - ntrans2) > 1e-7: 

803 logger.warning( 

804 'resample: requested deltat %g could not be matched exactly: ' 

805 '%g' % (deltat, deltat2)) 

806 

807 data = self.ydata 

808 data_pad = num.zeros(ntrans, dtype=float) 

809 data_pad[:ndata] = data 

810 fdata = num.fft.rfft(data_pad) 

811 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

812 n = min(fdata.size, fdata2.size) 

813 fdata2[:n] = fdata[:n] 

814 data2 = num.fft.irfft(fdata2) 

815 data2 = data2[:ndata2] 

816 data2 *= float(ntrans2) / float(ntrans) 

817 self.deltat = deltat2 

818 self.set_ydata(data2) 

819 

820 def resample_simple(self, deltat): 

821 tyear = 3600*24*365. 

822 

823 if deltat == self.deltat: 

824 return 

825 

826 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

827 logger.warning( 

828 'resample_simple: less than one sample would have to be ' 

829 'inserted/deleted per year. Doing nothing.') 

830 return 

831 

832 ninterval = int(round(deltat / (self.deltat - deltat))) 

833 if abs(ninterval) < 20: 

834 logger.error( 

835 'resample_simple: sample insertion/deletion interval less ' 

836 'than 20. results would be erroneous.') 

837 raise ResamplingFailed() 

838 

839 delete = False 

840 if ninterval < 0: 

841 ninterval = - ninterval 

842 delete = True 

843 

844 tyearbegin = util.year_start(self.tmin) 

845 

846 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

847 

848 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

849 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

850 if nindices > 0: 

851 indices = ibegin + num.arange(nindices) * ninterval 

852 data_split = num.split(self.ydata, indices) 

853 data = [] 

854 for ln, h in zip(data_split[:-1], data_split[1:]): 

855 if delete: 

856 ln = ln[:-1] 

857 

858 data.append(ln) 

859 if not delete: 

860 if ln.size == 0: 

861 v = h[0] 

862 else: 

863 v = 0.5*(ln[-1] + h[0]) 

864 data.append(num.array([v], dtype=ln.dtype)) 

865 

866 data.append(data_split[-1]) 

867 

868 ydata_new = num.concatenate(data) 

869 

870 self.tmin = tyearbegin + nmin * deltat 

871 self.deltat = deltat 

872 self.set_ydata(ydata_new) 

873 

874 def stretch(self, tmin_new, tmax_new): 

875 ''' 

876 Stretch signal while preserving sample rate using sinc interpolation. 

877 

878 :param tmin_new: new time of first sample 

879 :param tmax_new: new time of last sample 

880 

881 This method can be used to correct for a small linear time drift or to 

882 introduce sub-sample time shifts. The amount of stretching is limited 

883 to 10% by the implementation and is expected to be much smaller than 

884 that by the approximations used. 

885 ''' 

886 

887 from pyrocko import signal_ext 

888 

889 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

890 t_control = num.array([tmin_new, tmax_new], dtype=float) 

891 

892 r = (tmax_new - tmin_new) / self.deltat + 1.0 

893 n_new = int(round(r)) 

894 if abs(n_new - r) > 0.001: 

895 n_new = int(math.floor(r)) 

896 

897 assert n_new >= 2 

898 

899 tmax_new = tmin_new + (n_new-1) * self.deltat 

900 

901 ydata_new = num.empty(n_new, dtype=float) 

902 signal_ext.antidrift(i_control, t_control, 

903 self.ydata.astype(float), 

904 tmin_new, self.deltat, ydata_new) 

905 

906 self.tmin = tmin_new 

907 self.set_ydata(ydata_new) 

908 self._update_ids() 

909 

910 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

911 raise_exception=False): 

912 

913 ''' 

914 Check if a given frequency is above the Nyquist frequency of the trace. 

915 

916 :param intro: string used to introduce the warning/error message 

917 :param warn: whether to emit a warning 

918 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

919 exception. 

920 ''' 

921 

922 if frequency >= 0.5/self.deltat: 

923 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

924 'frequency (%g Hz). (Trace %s)' \ 

925 % (intro, frequency, 0.5/self.deltat, self.name()) 

926 if warn: 

927 logger.warning(message) 

928 if raise_exception: 

929 raise AboveNyquist(message) 

930 

931 def lowpass(self, order, corner, nyquist_warn=True, 

932 nyquist_exception=False, demean=True): 

933 

934 ''' 

935 Apply Butterworth lowpass to the trace. 

936 

937 :param order: order of the filter 

938 :param corner: corner frequency of the filter 

939 

940 Mean is removed before filtering. 

941 ''' 

942 

943 self.nyquist_check( 

944 corner, 'Corner frequency of lowpass', nyquist_warn, 

945 nyquist_exception) 

946 

947 (b, a) = _get_cached_filter_coefs( 

948 order, [corner*2.0*self.deltat], btype='low') 

949 

950 if len(a) != order+1 or len(b) != order+1: 

951 logger.warning( 

952 'Erroneous filter coefficients returned by ' 

953 'scipy.signal.butter(). You may need to downsample the ' 

954 'signal before filtering.') 

955 

956 data = self.ydata.astype(num.float64) 

957 if demean: 

958 data -= num.mean(data) 

959 self.drop_growbuffer() 

960 self.ydata = signal.lfilter(b, a, data) 

961 

962 def highpass(self, order, corner, nyquist_warn=True, 

963 nyquist_exception=False, demean=True): 

964 

965 ''' 

966 Apply butterworth highpass to the trace. 

967 

968 :param order: order of the filter 

969 :param corner: corner frequency of the filter 

970 

971 Mean is removed before filtering. 

972 ''' 

973 

974 self.nyquist_check( 

975 corner, 'Corner frequency of highpass', nyquist_warn, 

976 nyquist_exception) 

977 

978 (b, a) = _get_cached_filter_coefs( 

979 order, [corner*2.0*self.deltat], btype='high') 

980 

981 data = self.ydata.astype(num.float64) 

982 if len(a) != order+1 or len(b) != order+1: 

983 logger.warning( 

984 'Erroneous filter coefficients returned by ' 

985 'scipy.signal.butter(). You may need to downsample the ' 

986 'signal before filtering.') 

987 if demean: 

988 data -= num.mean(data) 

989 self.drop_growbuffer() 

990 self.ydata = signal.lfilter(b, a, data) 

991 

992 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

993 ''' 

994 Apply butterworth bandpass to the trace. 

995 

996 :param order: order of the filter 

997 :param corner_hp: lower corner frequency of the filter 

998 :param corner_lp: upper corner frequency of the filter 

999 

1000 Mean is removed before filtering. 

1001 ''' 

1002 

1003 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

1004 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

1005 (b, a) = _get_cached_filter_coefs( 

1006 order, 

1007 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1008 btype='band') 

1009 data = self.ydata.astype(num.float64) 

1010 if demean: 

1011 data -= num.mean(data) 

1012 self.drop_growbuffer() 

1013 self.ydata = signal.lfilter(b, a, data) 

1014 

1015 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1016 ''' 

1017 Apply bandstop (attenuates frequencies in band) to the trace. 

1018 

1019 :param order: order of the filter 

1020 :param corner_hp: lower corner frequency of the filter 

1021 :param corner_lp: upper corner frequency of the filter 

1022 

1023 Mean is removed before filtering. 

1024 ''' 

1025 

1026 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1027 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1028 (b, a) = _get_cached_filter_coefs( 

1029 order, 

1030 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1031 btype='bandstop') 

1032 data = self.ydata.astype(num.float64) 

1033 if demean: 

1034 data -= num.mean(data) 

1035 self.drop_growbuffer() 

1036 self.ydata = signal.lfilter(b, a, data) 

1037 

1038 def envelope(self, inplace=True): 

1039 ''' 

1040 Calculate the envelope of the trace. 

1041 

1042 :param inplace: calculate envelope in place 

1043 

1044 The calculation follows: 

1045 

1046 .. math:: 

1047 

1048 Y' = \\sqrt{Y^2+H(Y)^2} 

1049 

1050 where H is the Hilbert-Transform of the signal Y. 

1051 ''' 

1052 

1053 y = self.ydata.astype(float) 

1054 env = num.abs(hilbert(y)) 

1055 if inplace: 

1056 self.drop_growbuffer() 

1057 self.ydata = env 

1058 else: 

1059 tr = self.copy(data=False) 

1060 tr.ydata = env 

1061 return tr 

1062 

1063 def taper(self, taperer, inplace=True, chop=False): 

1064 ''' 

1065 Apply a :py:class:`Taper` to the trace. 

1066 

1067 :param taperer: instance of :py:class:`Taper` subclass 

1068 :param inplace: apply taper inplace 

1069 :param chop: if ``True``: exclude tapered parts from the resulting 

1070 trace 

1071 ''' 

1072 

1073 if not inplace: 

1074 tr = self.copy() 

1075 else: 

1076 tr = self 

1077 

1078 if chop: 

1079 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1080 tr.shift(i*tr.deltat) 

1081 tr.set_ydata(tr.ydata[i:i+n]) 

1082 

1083 taperer(tr.ydata, tr.tmin, tr.deltat) 

1084 

1085 if not inplace: 

1086 return tr 

1087 

1088 def whiten(self, order=6): 

1089 ''' 

1090 Whiten signal in time domain using autoregression and recursive filter. 

1091 

1092 :param order: order of the autoregression process 

1093 ''' 

1094 

1095 b, a = self.whitening_coefficients(order) 

1096 self.drop_growbuffer() 

1097 self.ydata = signal.lfilter(b, a, self.ydata) 

1098 

1099 def whitening_coefficients(self, order=6): 

1100 ar = yulewalker(self.ydata, order) 

1101 b, a = [1.] + ar.tolist(), [1.] 

1102 return b, a 

1103 

1104 def ampspec_whiten( 

1105 self, 

1106 width, 

1107 td_taper='auto', 

1108 fd_taper='auto', 

1109 pad_to_pow2=True, 

1110 demean=True): 

1111 

1112 ''' 

1113 Whiten signal via frequency domain using moving average on amplitude 

1114 spectra. 

1115 

1116 :param width: width of smoothing kernel [Hz] 

1117 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1118 ``None`` or ``'auto'``. 

1119 :param fd_taper: frequency domain taper, object of type 

1120 :py:class:`Taper` or ``None`` or ``'auto'``. 

1121 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1122 of 2^n 

1123 :param demean: whether to demean the signal before tapering 

1124 

1125 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1126 the spectrum is calculated and inversely weighted with a smoothed 

1127 version of its amplitude spectrum. A moving average is used for the 

1128 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1129 Finally, the smoothed and tapered spectrum is back-transformed into the 

1130 time domain. 

1131 

1132 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1133 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1134 ''' 

1135 

1136 ndata = self.data_len() 

1137 

1138 if pad_to_pow2: 

1139 ntrans = nextpow2(ndata) 

1140 else: 

1141 ntrans = ndata 

1142 

1143 df = 1./(ntrans*self.deltat) 

1144 nw = int(round(width/df)) 

1145 if ndata//2+1 <= nw: 

1146 raise TraceTooShort( 

1147 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1148 

1149 if td_taper == 'auto': 

1150 td_taper = CosFader(1./width) 

1151 

1152 if fd_taper == 'auto': 

1153 fd_taper = CosFader(width) 

1154 

1155 if td_taper: 

1156 self.taper(td_taper) 

1157 

1158 ydata = self.get_ydata().astype(float) 

1159 if demean: 

1160 ydata -= ydata.mean() 

1161 

1162 spec = num.fft.rfft(ydata, ntrans) 

1163 

1164 amp = num.abs(spec) 

1165 nspec = amp.size 

1166 csamp = num.cumsum(amp) 

1167 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1168 n1, n2 = nw//2, nw//2 + nspec - nw 

1169 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1170 amp_smoothed[:n1] = amp_smoothed[n1] 

1171 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1172 

1173 denom = amp_smoothed * amp 

1174 numer = amp 

1175 eps = num.mean(denom) * 1e-9 

1176 if eps == 0.0: 

1177 eps = 1e-9 

1178 

1179 numer += eps 

1180 denom += eps 

1181 spec *= numer/denom 

1182 

1183 if fd_taper: 

1184 fd_taper(spec, 0., df) 

1185 

1186 ydata = num.fft.irfft(spec) 

1187 self.set_ydata(ydata[:ndata]) 

1188 

1189 def _get_cached_freqs(self, nf, deltaf): 

1190 ck = (nf, deltaf) 

1191 if ck not in Trace.cached_frequencies: 

1192 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1193 nf, dtype=float) 

1194 

1195 return Trace.cached_frequencies[ck] 

1196 

1197 def bandpass_fft(self, corner_hp, corner_lp): 

1198 ''' 

1199 Apply boxcar bandbpass to trace (in spectral domain). 

1200 ''' 

1201 

1202 n = len(self.ydata) 

1203 n2 = nextpow2(n) 

1204 data = num.zeros(n2, dtype=num.float64) 

1205 data[:n] = self.ydata 

1206 fdata = num.fft.rfft(data) 

1207 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1208 fdata[0] = 0.0 

1209 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1210 data = num.fft.irfft(fdata) 

1211 self.drop_growbuffer() 

1212 self.ydata = data[:n] 

1213 

1214 def shift(self, tshift): 

1215 ''' 

1216 Time shift the trace. 

1217 ''' 

1218 

1219 self.tmin += tshift 

1220 self.tmax += tshift 

1221 self._update_ids() 

1222 

1223 def snap(self, inplace=True, interpolate=False): 

1224 ''' 

1225 Shift trace samples to nearest even multiples of the sampling rate. 

1226 

1227 :param inplace: (boolean) snap traces inplace 

1228 

1229 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1230 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1231 :py:func:`snap` returns the unsnapped instance of the original trace. 

1232 ''' 

1233 

1234 tmin = round(self.tmin/self.deltat)*self.deltat 

1235 tmax = tmin + (self.ydata.size-1)*self.deltat 

1236 

1237 if inplace: 

1238 xself = self 

1239 else: 

1240 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1241 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1242 return self 

1243 

1244 xself = self.copy() 

1245 

1246 if interpolate: 

1247 from pyrocko import signal_ext 

1248 n = xself.data_len() 

1249 ydata_new = num.empty(n, dtype=float) 

1250 i_control = num.array([0, n-1], dtype=num.int64) 

1251 tref = tmin 

1252 t_control = num.array( 

1253 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1254 dtype=float) 

1255 

1256 signal_ext.antidrift(i_control, t_control, 

1257 xself.ydata.astype(float), 

1258 float(tmin-tref), xself.deltat, ydata_new) 

1259 

1260 xself.ydata = ydata_new 

1261 

1262 xself.tmin = tmin 

1263 xself.tmax = tmax 

1264 xself._update_ids() 

1265 

1266 return xself 

1267 

1268 def fix_deltat_rounding_errors(self): 

1269 ''' 

1270 Try to undo sampling rate rounding errors. 

1271 

1272 See :py:func:`fix_deltat_rounding_errors`. 

1273 ''' 

1274 

1275 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1276 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1277 

1278 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1279 ''' 

1280 Run special STA/LTA filter where the short time window is centered on 

1281 the long time window. 

1282 

1283 :param tshort: length of short time window in [s] 

1284 :param tlong: length of long time window in [s] 

1285 :param quad: whether to square the data prior to applying the STA/LTA 

1286 filter 

1287 :param scalingmethod: integer key to select how output values are 

1288 scaled / normalized (``1``, ``2``, or ``3``) 

1289 

1290 ============= ====================================== =========== 

1291 Scalingmethod Implementation Range 

1292 ============= ====================================== =========== 

1293 ``1`` As/Al* Ts/Tl [0,1] 

1294 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1295 ``3`` Like ``2`` but clipping range at zero [0,1] 

1296 ============= ====================================== =========== 

1297 

1298 ''' 

1299 

1300 nshort = int(round(tshort/self.deltat)) 

1301 nlong = int(round(tlong/self.deltat)) 

1302 

1303 assert nshort < nlong 

1304 if nlong > len(self.ydata): 

1305 raise TraceTooShort( 

1306 'Samples in trace: %s, samples needed: %s' 

1307 % (len(self.ydata), nlong)) 

1308 

1309 if quad: 

1310 sqrdata = self.ydata**2 

1311 else: 

1312 sqrdata = self.ydata 

1313 

1314 mavg_short = moving_avg(sqrdata, nshort) 

1315 mavg_long = moving_avg(sqrdata, nlong) 

1316 

1317 self.drop_growbuffer() 

1318 

1319 if scalingmethod not in (1, 2, 3): 

1320 raise Exception('Invalid argument to scalingrange argument.') 

1321 

1322 if scalingmethod == 1: 

1323 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1324 elif scalingmethod in (2, 3): 

1325 self.ydata = (mavg_short/mavg_long - 1.) \ 

1326 / ((float(nlong)/float(nshort)) - 1) 

1327 

1328 if scalingmethod == 3: 

1329 self.ydata = num.maximum(self.ydata, 0.) 

1330 

1331 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1332 ''' 

1333 Run special STA/LTA filter where the short time window is overlapping 

1334 with the last part of the long time window. 

1335 

1336 :param tshort: length of short time window in [s] 

1337 :param tlong: length of long time window in [s] 

1338 :param quad: whether to square the data prior to applying the STA/LTA 

1339 filter 

1340 :param scalingmethod: integer key to select how output values are 

1341 scaled / normalized (``1``, ``2``, or ``3``) 

1342 

1343 ============= ====================================== =========== 

1344 Scalingmethod Implementation Range 

1345 ============= ====================================== =========== 

1346 ``1`` As/Al* Ts/Tl [0,1] 

1347 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1348 ``3`` Like ``2`` but clipping range at zero [0,1] 

1349 ============= ====================================== =========== 

1350 

1351 With ``scalingmethod=1``, the values produced by this variant of the 

1352 STA/LTA are equivalent to 

1353 

1354 .. math:: 

1355 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1356 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1357 

1358 where :math:`f_j` are the input samples, :math:`s` are the number of 

1359 samples in the short time window and :math:`l` are the number of 

1360 samples in the long time window. 

1361 ''' 

1362 

1363 n = self.data_len() 

1364 tmin = self.tmin 

1365 

1366 nshort = max(1, int(round(tshort/self.deltat))) 

1367 nlong = max(1, int(round(tlong/self.deltat))) 

1368 

1369 assert nshort < nlong 

1370 

1371 if nlong > len(self.ydata): 

1372 raise TraceTooShort( 

1373 'Samples in trace: %s, samples needed: %s' 

1374 % (len(self.ydata), nlong)) 

1375 

1376 if quad: 

1377 sqrdata = self.ydata**2 

1378 else: 

1379 sqrdata = self.ydata 

1380 

1381 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1382 if nlong % 2 != 0 and nshort % 2 == 0: 

1383 nshift += 1 

1384 

1385 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1386 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1387 

1388 self.drop_growbuffer() 

1389 

1390 if scalingmethod not in (1, 2, 3): 

1391 raise Exception('Invalid argument to scalingrange argument.') 

1392 

1393 if scalingmethod == 1: 

1394 ydata = mavg_short/mavg_long * nshort/nlong 

1395 elif scalingmethod in (2, 3): 

1396 ydata = (mavg_short/mavg_long - 1.) \ 

1397 / ((float(nlong)/float(nshort)) - 1) 

1398 

1399 if scalingmethod == 3: 

1400 ydata = num.maximum(self.ydata, 0.) 

1401 

1402 self.set_ydata(ydata) 

1403 

1404 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1405 

1406 self.chop( 

1407 tmin + (nlong - nshort) * self.deltat, 

1408 tmin + (n - nshort) * self.deltat) 

1409 

1410 def peaks(self, threshold, tsearch, 

1411 deadtime=False, 

1412 nblock_duration_detection=100): 

1413 

1414 ''' 

1415 Detect peaks above a given threshold (method 1). 

1416 

1417 From every instant, where the signal rises above ``threshold``, a time 

1418 length of ``tsearch`` seconds is searched for a maximum. A list with 

1419 tuples (time, value) for each detected peak is returned. The 

1420 ``deadtime`` argument turns on a special deadtime duration detection 

1421 algorithm useful in combination with recursive STA/LTA filters. 

1422 ''' 

1423 

1424 y = self.ydata 

1425 above = num.where(y > threshold, 1, 0) 

1426 deriv = num.zeros(y.size, dtype=num.int8) 

1427 deriv[1:] = above[1:]-above[:-1] 

1428 itrig_positions = num.nonzero(deriv > 0)[0] 

1429 tpeaks = [] 

1430 apeaks = [] 

1431 tzeros = [] 

1432 tzero = self.tmin 

1433 

1434 for itrig_pos in itrig_positions: 

1435 ibeg = itrig_pos 

1436 iend = min( 

1437 len(self.ydata), 

1438 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1439 ipeak = num.argmax(y[ibeg:iend]) 

1440 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1441 apeak = y[ibeg+ipeak] 

1442 

1443 if tpeak < tzero: 

1444 continue 

1445 

1446 if deadtime: 

1447 ibeg = itrig_pos 

1448 iblock = 0 

1449 nblock = nblock_duration_detection 

1450 totalsum = 0. 

1451 while True: 

1452 if ibeg+iblock*nblock >= len(y): 

1453 tzero = self.tmin + (len(y)-1) * self.deltat 

1454 break 

1455 

1456 logy = num.log( 

1457 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1458 logy[0] += totalsum 

1459 ysum = num.cumsum(logy) 

1460 totalsum = ysum[-1] 

1461 below = num.where(ysum <= 0., 1, 0) 

1462 deriv = num.zeros(ysum.size, dtype=num.int8) 

1463 deriv[1:] = below[1:]-below[:-1] 

1464 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1465 if len(izero_positions) > 0: 

1466 tzero = self.tmin + self.deltat * ( 

1467 ibeg + izero_positions[0]) 

1468 break 

1469 iblock += 1 

1470 else: 

1471 tzero = ibeg*self.deltat + self.tmin + tsearch 

1472 

1473 tpeaks.append(tpeak) 

1474 apeaks.append(apeak) 

1475 tzeros.append(tzero) 

1476 

1477 if deadtime: 

1478 return tpeaks, apeaks, tzeros 

1479 else: 

1480 return tpeaks, apeaks 

1481 

1482 def peaks2(self, threshold, tsearch): 

1483 

1484 ''' 

1485 Detect peaks above a given threshold (method 2). 

1486 

1487 This variant of peak detection is a bit more robust (and slower) than 

1488 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1489 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1490 iteratively the one with the maximum amplitude ``a[j]`` and time 

1491 ``t[j]`` is choosen and potential peaks within 

1492 ``t[j] - tsearch, t[j] + tsearch`` 

1493 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1494 no more potential peaks are left. 

1495 ''' 

1496 

1497 a = num.copy(self.ydata) 

1498 

1499 amin = num.min(a) 

1500 

1501 a[0] = amin 

1502 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1503 a[-1] = amin 

1504 

1505 data = [] 

1506 while True: 

1507 imax = num.argmax(a) 

1508 amax = a[imax] 

1509 

1510 if amax < threshold or amax == amin: 

1511 break 

1512 

1513 data.append((self.tmin + imax * self.deltat, amax)) 

1514 

1515 ntsearch = int(round(tsearch / self.deltat)) 

1516 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1517 

1518 if data: 

1519 data.sort() 

1520 tpeaks, apeaks = list(zip(*data)) 

1521 else: 

1522 tpeaks, apeaks = [], [] 

1523 

1524 return tpeaks, apeaks 

1525 

1526 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1527 ''' 

1528 Extend trace to given span. 

1529 

1530 :param tmin: begin time of new span 

1531 :param tmax: end time of new span 

1532 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1533 ``'median'`` 

1534 ''' 

1535 

1536 nold = self.ydata.size 

1537 

1538 if tmin is not None: 

1539 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1540 else: 

1541 nl = 0 

1542 

1543 if tmax is not None: 

1544 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1545 else: 

1546 nh = nold - 1 

1547 

1548 n = nh - nl + 1 

1549 data = num.zeros(n, dtype=self.ydata.dtype) 

1550 data[-nl:-nl + nold] = self.ydata 

1551 if self.ydata.size >= 1: 

1552 if fillmethod == 'repeat': 

1553 data[:-nl] = self.ydata[0] 

1554 data[-nl + nold:] = self.ydata[-1] 

1555 elif fillmethod == 'median': 

1556 v = num.median(self.ydata) 

1557 data[:-nl] = v 

1558 data[-nl + nold:] = v 

1559 elif fillmethod == 'mean': 

1560 v = num.mean(self.ydata) 

1561 data[:-nl] = v 

1562 data[-nl + nold:] = v 

1563 

1564 self.drop_growbuffer() 

1565 self.ydata = data 

1566 

1567 self.tmin += nl * self.deltat 

1568 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1569 

1570 self._update_ids() 

1571 

1572 def transfer(self, 

1573 tfade=0., 

1574 freqlimits=None, 

1575 transfer_function=None, 

1576 cut_off_fading=True, 

1577 demean=True, 

1578 invert=False): 

1579 

1580 ''' 

1581 Return new trace with transfer function applied (convolution). 

1582 

1583 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1584 at both ends of trace. 

1585 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1586 :param transfer_function: FrequencyResponse object; must provide a 

1587 method 'evaluate(freqs)', which returns the transfer function 

1588 coefficients at the frequencies 'freqs'. 

1589 :param cut_off_fading: whether to cut off rise/fall interval in output 

1590 trace. 

1591 :param demean: remove mean before applying transfer function 

1592 :param invert: set to True to do a deconvolution 

1593 ''' 

1594 

1595 if transfer_function is None: 

1596 transfer_function = FrequencyResponse() 

1597 

1598 if self.tmax - self.tmin <= tfade*2.: 

1599 raise TraceTooShort( 

1600 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1601 'trace length = %g, fading length = %g' 

1602 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1603 

1604 if freqlimits is None and ( 

1605 transfer_function is None or transfer_function.is_scalar()): 

1606 

1607 # special case for flat responses 

1608 

1609 output = self.copy() 

1610 data = self.ydata 

1611 ndata = data.size 

1612 

1613 if transfer_function is not None: 

1614 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1615 

1616 if invert: 

1617 c = 1.0/c 

1618 

1619 data *= c 

1620 

1621 if tfade != 0.0: 

1622 data *= costaper( 

1623 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1624 ndata, self.deltat) 

1625 

1626 output.ydata = data 

1627 

1628 else: 

1629 ndata = self.ydata.size 

1630 ntrans = nextpow2(ndata*1.2) 

1631 coefs = self._get_tapered_coefs( 

1632 ntrans, freqlimits, transfer_function, invert=invert) 

1633 

1634 data = self.ydata 

1635 

1636 data_pad = num.zeros(ntrans, dtype=float) 

1637 data_pad[:ndata] = data 

1638 if demean: 

1639 data_pad[:ndata] -= data.mean() 

1640 

1641 if tfade != 0.0: 

1642 data_pad[:ndata] *= costaper( 

1643 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1644 ndata, self.deltat) 

1645 

1646 fdata = num.fft.rfft(data_pad) 

1647 fdata *= coefs 

1648 ddata = num.fft.irfft(fdata) 

1649 output = self.copy() 

1650 output.ydata = ddata[:ndata] 

1651 

1652 if cut_off_fading and tfade != 0.0: 

1653 try: 

1654 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1655 except NoData: 

1656 raise TraceTooShort( 

1657 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1658 'trace length = %g, fading length = %g' 

1659 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1660 else: 

1661 output.ydata = output.ydata.copy() 

1662 

1663 return output 

1664 

1665 def differentiate(self, n=1, order=4, inplace=True): 

1666 ''' 

1667 Approximate first or second derivative of the trace. 

1668 

1669 :param n: 1 for first derivative, 2 for second 

1670 :param order: order of the approximation 2 and 4 are supported 

1671 :param inplace: if ``True`` the trace is differentiated in place, 

1672 otherwise a new trace object with the derivative is returned. 

1673 

1674 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1675 

1676 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1677 ''' 

1678 

1679 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1680 

1681 if inplace: 

1682 self.ydata = ddata 

1683 else: 

1684 output = self.copy(data=False) 

1685 output.set_ydata(ddata) 

1686 return output 

1687 

1688 def drop_chain_cache(self): 

1689 if self._pchain: 

1690 self._pchain.clear() 

1691 

1692 def init_chain(self): 

1693 self._pchain = pchain.Chain( 

1694 do_downsample, 

1695 do_extend, 

1696 do_pre_taper, 

1697 do_fft, 

1698 do_filter, 

1699 do_ifft) 

1700 

1701 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1702 if setup.domain == 'frequency_domain': 

1703 _, _, data = self._pchain( 

1704 (self, deltat), 

1705 (tmin, tmax), 

1706 (setup.taper,), 

1707 (setup.filter,), 

1708 (setup.filter,), 

1709 nocache=nocache) 

1710 

1711 return num.abs(data), num.abs(data) 

1712 

1713 else: 

1714 processed = self._pchain( 

1715 (self, deltat), 

1716 (tmin, tmax), 

1717 (setup.taper,), 

1718 (setup.filter,), 

1719 (setup.filter,), 

1720 (), 

1721 nocache=nocache) 

1722 

1723 if setup.domain == 'time_domain': 

1724 data = processed.get_ydata() 

1725 

1726 elif setup.domain == 'envelope': 

1727 processed = processed.envelope(inplace=False) 

1728 

1729 elif setup.domain == 'absolute': 

1730 processed.set_ydata(num.abs(processed.get_ydata())) 

1731 

1732 return processed.get_ydata(), processed 

1733 

1734 def misfit(self, candidate, setup, nocache=False, debug=False): 

1735 ''' 

1736 Calculate misfit and normalization factor against candidate trace. 

1737 

1738 :param candidate: :py:class:`Trace` object 

1739 :param setup: :py:class:`MisfitSetup` object 

1740 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1741 normalization divisor 

1742 

1743 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1744 with the higher sampling rate will be downsampled. 

1745 ''' 

1746 

1747 a = self 

1748 b = candidate 

1749 

1750 for tr in (a, b): 

1751 if not tr._pchain: 

1752 tr.init_chain() 

1753 

1754 deltat = max(a.deltat, b.deltat) 

1755 tmin = min(a.tmin, b.tmin) - deltat 

1756 tmax = max(a.tmax, b.tmax) + deltat 

1757 

1758 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1759 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1760 

1761 if setup.domain != 'cc_max_norm': 

1762 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1763 else: 

1764 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1765 ccmax = ctr.max()[1] 

1766 m = 0.5 - 0.5 * ccmax 

1767 n = 0.5 

1768 

1769 if debug: 

1770 return m, n, aproc, bproc 

1771 else: 

1772 return m, n 

1773 

1774 def spectrum(self, pad_to_pow2=False, tfade=None): 

1775 ''' 

1776 Get FFT spectrum of trace. 

1777 

1778 :param pad_to_pow2: whether to zero-pad the data to next larger 

1779 power-of-two length 

1780 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1781 shaped tapers to both 

1782 

1783 :returns: a tuple with (frequencies, values) 

1784 ''' 

1785 

1786 ndata = self.ydata.size 

1787 

1788 if pad_to_pow2: 

1789 ntrans = nextpow2(ndata) 

1790 else: 

1791 ntrans = ndata 

1792 

1793 if tfade is None: 

1794 ydata = self.ydata 

1795 else: 

1796 ydata = self.ydata * costaper( 

1797 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1798 ndata, self.deltat) 

1799 

1800 fydata = num.fft.rfft(ydata, ntrans) 

1801 df = 1./(ntrans*self.deltat) 

1802 fxdata = num.arange(len(fydata))*df 

1803 return fxdata, fydata 

1804 

1805 def multi_filter(self, filter_freqs, bandwidth): 

1806 

1807 class Gauss(FrequencyResponse): 

1808 f0 = Float.T() 

1809 a = Float.T(default=1.0) 

1810 

1811 def __init__(self, f0, a=1.0, **kwargs): 

1812 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1813 

1814 def evaluate(self, freqs): 

1815 omega0 = 2.*math.pi*self.f0 

1816 omega = 2.*math.pi*freqs 

1817 return num.exp(-((omega-omega0) 

1818 / (self.a*omega0))**2) 

1819 

1820 freqs, coefs = self.spectrum() 

1821 n = self.data_len() 

1822 nfilt = len(filter_freqs) 

1823 signal_tf = num.zeros((nfilt, n)) 

1824 centroid_freqs = num.zeros(nfilt) 

1825 for ifilt, f0 in enumerate(filter_freqs): 

1826 taper = Gauss(f0, a=bandwidth) 

1827 weights = taper.evaluate(freqs) 

1828 nhalf = freqs.size 

1829 analytic_spec = num.zeros(n, dtype=complex) 

1830 analytic_spec[:nhalf] = coefs*weights 

1831 

1832 enorm = num.abs(analytic_spec[:nhalf])**2 

1833 enorm /= num.sum(enorm) 

1834 

1835 if n % 2 == 0: 

1836 analytic_spec[1:nhalf-1] *= 2. 

1837 else: 

1838 analytic_spec[1:nhalf] *= 2. 

1839 

1840 analytic = num.fft.ifft(analytic_spec) 

1841 signal_tf[ifilt, :] = num.abs(analytic) 

1842 

1843 enorm = num.abs(analytic_spec[:nhalf])**2 

1844 enorm /= num.sum(enorm) 

1845 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1846 

1847 return centroid_freqs, signal_tf 

1848 

1849 def _get_tapered_coefs( 

1850 self, ntrans, freqlimits, transfer_function, invert=False): 

1851 

1852 deltaf = 1./(self.deltat*ntrans) 

1853 nfreqs = ntrans//2 + 1 

1854 transfer = num.ones(nfreqs, dtype=complex) 

1855 hi = snapper(nfreqs, deltaf) 

1856 if freqlimits is not None: 

1857 a, b, c, d = freqlimits 

1858 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1859 + hi(a)*deltaf 

1860 

1861 if invert: 

1862 coeffs = transfer_function.evaluate(freqs) 

1863 if num.any(coeffs == 0.0): 

1864 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1865 

1866 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1867 else: 

1868 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1869 

1870 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1871 else: 

1872 if invert: 

1873 raise Exception( 

1874 'transfer: `freqlimits` must be given when `invert` is ' 

1875 'set to `True`') 

1876 

1877 freqs = num.arange(nfreqs) * deltaf 

1878 tapered_transfer = transfer_function.evaluate(freqs) 

1879 

1880 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1881 return tapered_transfer 

1882 

1883 def fill_template(self, template, **additional): 

1884 ''' 

1885 Fill string template with trace metadata. 

1886 

1887 Uses normal python '%(placeholder)s' string templates. The following 

1888 placeholders are considered: ``network``, ``station``, ``location``, 

1889 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1890 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1891 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1892 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1893 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1894 microseconds, those with ``'_year'`` contain only the year. 

1895 ''' 

1896 

1897 template = template.replace('%n', '%(network)s')\ 

1898 .replace('%s', '%(station)s')\ 

1899 .replace('%l', '%(location)s')\ 

1900 .replace('%c', '%(channel)s')\ 

1901 .replace('%b', '%(tmin)s')\ 

1902 .replace('%e', '%(tmax)s')\ 

1903 .replace('%j', '%(julianday)s') 

1904 

1905 params = dict( 

1906 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1907 params['tmin'] = util.time_to_str( 

1908 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1909 params['tmax'] = util.time_to_str( 

1910 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1911 params['tmin_ms'] = util.time_to_str( 

1912 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1913 params['tmax_ms'] = util.time_to_str( 

1914 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1915 params['tmin_us'] = util.time_to_str( 

1916 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1917 params['tmax_us'] = util.time_to_str( 

1918 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1919 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1920 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1921 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1922 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1923 params['julianday'] = util.julian_day_of_year(self.tmin) 

1924 params.update(additional) 

1925 return template % params 

1926 

1927 def plot(self): 

1928 ''' 

1929 Show trace with matplotlib. 

1930 

1931 See also: :py:meth:`Trace.snuffle`. 

1932 ''' 

1933 

1934 import pylab 

1935 pylab.plot(self.get_xdata(), self.get_ydata()) 

1936 name = '%s %s %s - %s' % ( 

1937 self.channel, 

1938 self.station, 

1939 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1940 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1941 

1942 pylab.title(name) 

1943 pylab.show() 

1944 

1945 def snuffle(self, **kwargs): 

1946 ''' 

1947 Show trace in a snuffler window. 

1948 

1949 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1950 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1951 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1952 :param ntracks: float, number of tracks to be shown initially (default: 

1953 12) 

1954 :param follow: time interval (in seconds) for real time follow mode or 

1955 ``None`` 

1956 :param controls: bool, whether to show the main controls (default: 

1957 ``True``) 

1958 :param opengl: bool, whether to use opengl (default: ``False``) 

1959 ''' 

1960 

1961 return snuffle([self], **kwargs) 

1962 

1963 

1964def snuffle(traces, **kwargs): 

1965 ''' 

1966 Show traces in a snuffler window. 

1967 

1968 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1969 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1970 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1971 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1972 :param follow: time interval (in seconds) for real time follow mode or 

1973 ``None`` 

1974 :param controls: bool, whether to show the main controls (default: 

1975 ``True``) 

1976 :param opengl: bool, whether to use opengl (default: ``False``) 

1977 ''' 

1978 

1979 from pyrocko import pile 

1980 from pyrocko.gui import snuffler 

1981 p = pile.Pile() 

1982 if traces: 

1983 trf = pile.MemTracesFile(None, traces) 

1984 p.add_file(trf) 

1985 return snuffler.snuffle(p, **kwargs) 

1986 

1987 

1988class InfiniteResponse(Exception): 

1989 ''' 

1990 This exception is raised by :py:class:`Trace` operations when deconvolution 

1991 of a frequency response (instrument response transfer function) would 

1992 result in a division by zero. 

1993 ''' 

1994 

1995 

1996class MisalignedTraces(Exception): 

1997 ''' 

1998 This exception is raised by some :py:class:`Trace` operations when tmin, 

1999 tmax or number of samples do not match. 

2000 ''' 

2001 

2002 pass 

2003 

2004 

2005class NoData(Exception): 

2006 ''' 

2007 This exception is raised by some :py:class:`Trace` operations when no or 

2008 not enough data is available. 

2009 ''' 

2010 

2011 pass 

2012 

2013 

2014class AboveNyquist(Exception): 

2015 ''' 

2016 This exception is raised by some :py:class:`Trace` operations when given 

2017 frequencies are above the Nyquist frequency. 

2018 ''' 

2019 

2020 pass 

2021 

2022 

2023class TraceTooShort(Exception): 

2024 ''' 

2025 This exception is raised by some :py:class:`Trace` operations when the 

2026 trace is too short. 

2027 ''' 

2028 

2029 pass 

2030 

2031 

2032class ResamplingFailed(Exception): 

2033 pass 

2034 

2035 

2036def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2037 

2038 ''' 

2039 Get data range given traces grouped by selected pattern. 

2040 

2041 :param key: a callable which takes as single argument a trace and returns a 

2042 key for the grouping of the results. If this is ``None``, the default, 

2043 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2044 used. 

2045 :param mode: ``'minmax'`` or floating point number. If this is 

2046 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2047 number, mean +- standard deviation times ``mode`` is used. 

2048 

2049 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2050 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2051 extreme values on either end. 

2052 

2053 :returns: a dict with the combined data ranges. 

2054 

2055 Examples:: 

2056 

2057 ranges = minmax(traces, lambda tr: tr.channel) 

2058 print ranges['N'] # print min & max of all traces with channel == 'N' 

2059 print ranges['E'] # print min & max of all traces with channel == 'E' 

2060 

2061 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2062 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2063 # network == 'GR' and station == 'HAM3' 

2064 

2065 ranges = minmax(traces, lambda tr: None) 

2066 print ranges[None] # prints min & max of all traces 

2067 ''' 

2068 

2069 if key is None: 

2070 key = _default_key 

2071 

2072 ranges = defaultdict(list) 

2073 for trace in traces: 

2074 if isinstance(mode, str) and mode == 'minmax': 

2075 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2076 else: 

2077 mean = trace.ydata.mean() 

2078 std = trace.ydata.std() 

2079 mi, ma = mean-std*mode, mean+std*mode 

2080 

2081 k = key(trace) 

2082 ranges[k].append((mi, ma)) 

2083 

2084 for k in ranges: 

2085 mins, maxs = num.array(ranges[k]).T 

2086 if outer_mode == 'minmax': 

2087 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2088 elif outer_mode == 'robust': 

2089 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2090 

2091 return ranges 

2092 

2093 

2094def minmaxtime(traces, key=None): 

2095 

2096 ''' 

2097 Get time range given traces grouped by selected pattern. 

2098 

2099 :param key: a callable which takes as single argument a trace and returns a 

2100 key for the grouping of the results. If this is ``None``, the default, 

2101 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2102 used. 

2103 

2104 :returns: a dict with the combined data ranges. 

2105 ''' 

2106 

2107 if key is None: 

2108 key = _default_key 

2109 

2110 ranges = {} 

2111 for trace in traces: 

2112 mi, ma = trace.tmin, trace.tmax 

2113 k = key(trace) 

2114 if k not in ranges: 

2115 ranges[k] = mi, ma 

2116 else: 

2117 tmi, tma = ranges[k] 

2118 ranges[k] = min(tmi, mi), max(tma, ma) 

2119 

2120 return ranges 

2121 

2122 

2123def degapper( 

2124 traces, 

2125 maxgap=5, 

2126 fillmethod='interpolate', 

2127 deoverlap='use_second', 

2128 maxlap=None): 

2129 

2130 ''' 

2131 Try to connect traces and remove gaps. 

2132 

2133 This method will combine adjacent traces, which match in their network, 

2134 station, location and channel attributes. Overlapping parts are handled 

2135 according to the ``deoverlap`` argument. 

2136 

2137 :param traces: input traces, must be sorted by their full_id attribute. 

2138 :param maxgap: maximum number of samples to interpolate. 

2139 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2140 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2141 second trace (default), 'use_first' to use data from first trace, 

2142 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2143 values. 

2144 :param maxlap: maximum number of samples of overlap which are removed 

2145 

2146 :returns: list of traces 

2147 ''' 

2148 

2149 in_traces = traces 

2150 out_traces = [] 

2151 if not in_traces: 

2152 return out_traces 

2153 out_traces.append(in_traces.pop(0)) 

2154 while in_traces: 

2155 

2156 a = out_traces[-1] 

2157 b = in_traces.pop(0) 

2158 

2159 avirt, bvirt = a.ydata is None, b.ydata is None 

2160 assert avirt == bvirt, \ 

2161 'traces given to degapper() must either all have data or have ' \ 

2162 'no data.' 

2163 

2164 virtual = avirt and bvirt 

2165 

2166 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2167 and a.data_len() >= 1 and b.data_len() >= 1 

2168 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2169 

2170 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2171 idist = int(round(dist)) 

2172 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2173 # logger.warning('Cannot degap traces with displaced sampling ' 

2174 # '(%s, %s, %s, %s)' % a.nslc_id) 

2175 pass 

2176 else: 

2177 if 1 < idist <= maxgap: 

2178 if not virtual: 

2179 if fillmethod == 'interpolate': 

2180 filler = a.ydata[-1] + ( 

2181 ((1.0 + num.arange(idist-1, dtype=float)) 

2182 / idist) * (b.ydata[0]-a.ydata[-1]) 

2183 ).astype(a.ydata.dtype) 

2184 elif fillmethod == 'zeros': 

2185 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2186 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2187 a.tmax = b.tmax 

2188 if a.mtime and b.mtime: 

2189 a.mtime = max(a.mtime, b.mtime) 

2190 continue 

2191 

2192 elif idist == 1: 

2193 if not virtual: 

2194 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2195 a.tmax = b.tmax 

2196 if a.mtime and b.mtime: 

2197 a.mtime = max(a.mtime, b.mtime) 

2198 continue 

2199 

2200 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2201 if b.tmax > a.tmax: 

2202 if not virtual: 

2203 na = a.ydata.size 

2204 n = -idist+1 

2205 if deoverlap == 'use_second': 

2206 a.ydata = num.concatenate( 

2207 (a.ydata[:-n], b.ydata)) 

2208 elif deoverlap in ('use_first', 'crossfade_cos'): 

2209 a.ydata = num.concatenate( 

2210 (a.ydata, b.ydata[n:])) 

2211 elif deoverlap == 'add': 

2212 a.ydata[-n:] += b.ydata[:n] 

2213 a.ydata = num.concatenate( 

2214 (a.ydata, b.ydata[n:])) 

2215 else: 

2216 assert False, 'unknown deoverlap method' 

2217 

2218 if deoverlap == 'crossfade_cos': 

2219 n = -idist+1 

2220 taper = 0.5-0.5*num.cos( 

2221 (1.+num.arange(n))/(1.+n)*num.pi) 

2222 a.ydata[na-n:na] *= 1.-taper 

2223 a.ydata[na-n:na] += b.ydata[:n] * taper 

2224 

2225 a.tmax = b.tmax 

2226 if a.mtime and b.mtime: 

2227 a.mtime = max(a.mtime, b.mtime) 

2228 continue 

2229 else: 

2230 # make short second trace vanish 

2231 continue 

2232 

2233 if b.data_len() >= 1: 

2234 out_traces.append(b) 

2235 

2236 for tr in out_traces: 

2237 tr._update_ids() 

2238 

2239 return out_traces 

2240 

2241 

2242def rotate(traces, azimuth, in_channels, out_channels): 

2243 ''' 

2244 2D rotation of traces. 

2245 

2246 :param traces: list of input traces 

2247 :param azimuth: difference of the azimuths of the component directions 

2248 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2249 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2250 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2251 :returns: list of rotated traces 

2252 ''' 

2253 

2254 phi = azimuth/180.*math.pi 

2255 cphi = math.cos(phi) 

2256 sphi = math.sin(phi) 

2257 rotated = [] 

2258 in_channels = tuple(_channels_to_names(in_channels)) 

2259 out_channels = tuple(_channels_to_names(out_channels)) 

2260 for a in traces: 

2261 for b in traces: 

2262 if ((a.channel, b.channel) == in_channels and 

2263 a.nslc_id[:3] == b.nslc_id[:3] and 

2264 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2265 tmin = max(a.tmin, b.tmin) 

2266 tmax = min(a.tmax, b.tmax) 

2267 

2268 if tmin < tmax: 

2269 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2270 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2271 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2272 logger.warning( 

2273 'Cannot rotate traces with displaced sampling ' 

2274 '(%s, %s, %s, %s)' % a.nslc_id) 

2275 continue 

2276 

2277 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2278 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2279 ac.set_ydata(acydata) 

2280 bc.set_ydata(bcydata) 

2281 

2282 ac.set_codes(channel=out_channels[0]) 

2283 bc.set_codes(channel=out_channels[1]) 

2284 rotated.append(ac) 

2285 rotated.append(bc) 

2286 

2287 return rotated 

2288 

2289 

2290def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2291 ''' 

2292 Rotate traces from NE to RT system. 

2293 

2294 :param n: 

2295 North trace. 

2296 :type n: 

2297 :py:class:`~pyrocko.trace.Trace` 

2298 

2299 :param e: 

2300 East trace. 

2301 :type e: 

2302 :py:class:`~pyrocko.trace.Trace` 

2303 

2304 :param source: 

2305 Source of the recorded signal. 

2306 :type source: 

2307 :py:class:`pyrocko.gf.seismosizer.Source` 

2308 

2309 :param receiver: 

2310 Receiver of the recorded signal. 

2311 :type receiver: 

2312 :py:class:`pyrocko.model.location.Location` 

2313 

2314 :param out_channels: 

2315 Channel codes of the output channels (radial, transversal). 

2316 Default is ('R', 'T'). 

2317 . 

2318 :type out_channels 

2319 optional, tuple[str, str] 

2320 

2321 :returns: 

2322 Rotated traces (radial, transversal). 

2323 :rtype: 

2324 tuple[ 

2325 :py:class:`~pyrocko.trace.Trace`, 

2326 :py:class:`~pyrocko.trace.Trace`] 

2327 ''' 

2328 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2329 in_channels = n.channel, e.channel 

2330 out = rotate( 

2331 [n, e], azimuth, 

2332 in_channels=in_channels, 

2333 out_channels=out_channels) 

2334 

2335 assert len(out) == 2 

2336 for tr in out: 

2337 if tr.channel == out_channels[0]: 

2338 r = tr 

2339 elif tr.channel == out_channels[1]: 

2340 t = tr 

2341 else: 

2342 assert False 

2343 

2344 return r, t 

2345 

2346 

2347def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2348 out_channels=('L', 'Q', 'T')): 

2349 ''' 

2350 Rotate traces from ZNE to LQT system. 

2351 

2352 :param traces: list of traces in arbitrary order 

2353 :param backazimuth: backazimuth in degrees clockwise from north 

2354 :param incidence: incidence angle in degrees from vertical 

2355 :param in_channels: input channel names 

2356 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2357 :returns: list of transformed traces 

2358 ''' 

2359 i = incidence/180.*num.pi 

2360 b = backazimuth/180.*num.pi 

2361 

2362 ci = num.cos(i) 

2363 cb = num.cos(b) 

2364 si = num.sin(i) 

2365 sb = num.sin(b) 

2366 

2367 rotmat = num.array( 

2368 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2369 return project(traces, rotmat, in_channels, out_channels) 

2370 

2371 

2372def _decompose(a): 

2373 ''' 

2374 Decompose matrix into independent submatrices. 

2375 ''' 

2376 

2377 def depends(iout, a): 

2378 row = a[iout, :] 

2379 return set(num.arange(row.size).compress(row != 0.0)) 

2380 

2381 def provides(iin, a): 

2382 col = a[:, iin] 

2383 return set(num.arange(col.size).compress(col != 0.0)) 

2384 

2385 a = num.asarray(a) 

2386 outs = set(range(a.shape[0])) 

2387 systems = [] 

2388 while outs: 

2389 iout = outs.pop() 

2390 

2391 gout = set() 

2392 for iin in depends(iout, a): 

2393 gout.update(provides(iin, a)) 

2394 

2395 if not gout: 

2396 continue 

2397 

2398 gin = set() 

2399 for iout2 in gout: 

2400 gin.update(depends(iout2, a)) 

2401 

2402 if not gin: 

2403 continue 

2404 

2405 for iout2 in gout: 

2406 if iout2 in outs: 

2407 outs.remove(iout2) 

2408 

2409 gin = list(gin) 

2410 gin.sort() 

2411 gout = list(gout) 

2412 gout.sort() 

2413 

2414 systems.append((gin, gout, a[gout, :][:, gin])) 

2415 

2416 return systems 

2417 

2418 

2419def _channels_to_names(channels): 

2420 from pyrocko import squirrel 

2421 names = [] 

2422 for ch in channels: 

2423 if isinstance(ch, model.Channel): 

2424 names.append(ch.name) 

2425 elif isinstance(ch, squirrel.Channel): 

2426 names.append(ch.codes.channel) 

2427 else: 

2428 names.append(ch) 

2429 

2430 return names 

2431 

2432 

2433def project(traces, matrix, in_channels, out_channels): 

2434 ''' 

2435 Affine transform of three-component traces. 

2436 

2437 Compute matrix-vector product of three-component traces, to e.g. rotate 

2438 traces into a different basis. The traces are distinguished and ordered by 

2439 their channel attribute. The tranform is applied to overlapping parts of 

2440 any appropriate combinations of the input traces. This should allow this 

2441 function to be robust with data gaps. It also tries to apply the 

2442 tranformation to subsets of the channels, if this is possible, so that, if 

2443 for example a vertical compontent is missing, horizontal components can 

2444 still be rotated. 

2445 

2446 :param traces: list of traces in arbitrary order 

2447 :param matrix: tranformation matrix 

2448 :param in_channels: input channel names 

2449 :param out_channels: output channel names 

2450 :returns: list of transformed traces 

2451 ''' 

2452 

2453 in_channels = tuple(_channels_to_names(in_channels)) 

2454 out_channels = tuple(_channels_to_names(out_channels)) 

2455 systems = _decompose(matrix) 

2456 

2457 # fallback to full matrix if some are not quadratic 

2458 for iins, iouts, submatrix in systems: 

2459 if submatrix.shape[0] != submatrix.shape[1]: 

2460 if len(in_channels) != 3 or len(out_channels) != 3: 

2461 return [] 

2462 else: 

2463 return _project3(traces, matrix, in_channels, out_channels) 

2464 

2465 projected = [] 

2466 for iins, iouts, submatrix in systems: 

2467 in_cha = tuple([in_channels[iin] for iin in iins]) 

2468 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2469 if submatrix.shape[0] == 1: 

2470 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2471 elif submatrix.shape[1] == 2: 

2472 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2473 else: 

2474 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2475 

2476 return projected 

2477 

2478 

2479def project_dependencies(matrix, in_channels, out_channels): 

2480 ''' 

2481 Figure out what dependencies project() would produce. 

2482 ''' 

2483 

2484 in_channels = tuple(_channels_to_names(in_channels)) 

2485 out_channels = tuple(_channels_to_names(out_channels)) 

2486 systems = _decompose(matrix) 

2487 

2488 subpro = [] 

2489 for iins, iouts, submatrix in systems: 

2490 if submatrix.shape[0] != submatrix.shape[1]: 

2491 subpro.append((matrix, in_channels, out_channels)) 

2492 

2493 if not subpro: 

2494 for iins, iouts, submatrix in systems: 

2495 in_cha = tuple([in_channels[iin] for iin in iins]) 

2496 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2497 subpro.append((submatrix, in_cha, out_cha)) 

2498 

2499 deps = {} 

2500 for mat, in_cha, out_cha in subpro: 

2501 for oc in out_cha: 

2502 if oc not in deps: 

2503 deps[oc] = [] 

2504 

2505 for ic in in_cha: 

2506 deps[oc].append(ic) 

2507 

2508 return deps 

2509 

2510 

2511def _project1(traces, matrix, in_channels, out_channels): 

2512 assert len(in_channels) == 1 

2513 assert len(out_channels) == 1 

2514 assert matrix.shape == (1, 1) 

2515 

2516 projected = [] 

2517 for a in traces: 

2518 if not (a.channel,) == in_channels: 

2519 continue 

2520 

2521 ac = a.copy() 

2522 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2523 ac.set_codes(channel=out_channels[0]) 

2524 projected.append(ac) 

2525 

2526 return projected 

2527 

2528 

2529def _project2(traces, matrix, in_channels, out_channels): 

2530 assert len(in_channels) == 2 

2531 assert len(out_channels) == 2 

2532 assert matrix.shape == (2, 2) 

2533 projected = [] 

2534 for a in traces: 

2535 for b in traces: 

2536 if not ((a.channel, b.channel) == in_channels and 

2537 a.nslc_id[:3] == b.nslc_id[:3] and 

2538 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2539 continue 

2540 

2541 tmin = max(a.tmin, b.tmin) 

2542 tmax = min(a.tmax, b.tmax) 

2543 

2544 if tmin > tmax: 

2545 continue 

2546 

2547 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2548 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2549 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2550 logger.warning( 

2551 'Cannot project traces with displaced sampling ' 

2552 '(%s, %s, %s, %s)' % a.nslc_id) 

2553 continue 

2554 

2555 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2556 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2557 

2558 ac.set_ydata(acydata) 

2559 bc.set_ydata(bcydata) 

2560 

2561 ac.set_codes(channel=out_channels[0]) 

2562 bc.set_codes(channel=out_channels[1]) 

2563 

2564 projected.append(ac) 

2565 projected.append(bc) 

2566 

2567 return projected 

2568 

2569 

2570def _project3(traces, matrix, in_channels, out_channels): 

2571 assert len(in_channels) == 3 

2572 assert len(out_channels) == 3 

2573 assert matrix.shape == (3, 3) 

2574 projected = [] 

2575 for a in traces: 

2576 for b in traces: 

2577 for c in traces: 

2578 if not ((a.channel, b.channel, c.channel) == in_channels 

2579 and a.nslc_id[:3] == b.nslc_id[:3] 

2580 and b.nslc_id[:3] == c.nslc_id[:3] 

2581 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2582 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2583 

2584 continue 

2585 

2586 tmin = max(a.tmin, b.tmin, c.tmin) 

2587 tmax = min(a.tmax, b.tmax, c.tmax) 

2588 

2589 if tmin >= tmax: 

2590 continue 

2591 

2592 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2593 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2594 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2595 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2596 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2597 

2598 logger.warning( 

2599 'Cannot project traces with displaced sampling ' 

2600 '(%s, %s, %s, %s)' % a.nslc_id) 

2601 continue 

2602 

2603 acydata = num.dot( 

2604 matrix[0], 

2605 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2606 bcydata = num.dot( 

2607 matrix[1], 

2608 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2609 ccydata = num.dot( 

2610 matrix[2], 

2611 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2612 

2613 ac.set_ydata(acydata) 

2614 bc.set_ydata(bcydata) 

2615 cc.set_ydata(ccydata) 

2616 

2617 ac.set_codes(channel=out_channels[0]) 

2618 bc.set_codes(channel=out_channels[1]) 

2619 cc.set_codes(channel=out_channels[2]) 

2620 

2621 projected.append(ac) 

2622 projected.append(bc) 

2623 projected.append(cc) 

2624 

2625 return projected 

2626 

2627 

2628def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2629 ''' 

2630 Cross correlation of two traces. 

2631 

2632 :param a,b: input traces 

2633 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2634 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2635 :param use_fft: bool, whether to do cross correlation in spectral domain 

2636 

2637 :returns: trace containing cross correlation coefficients 

2638 

2639 This function computes the cross correlation between two traces. It 

2640 evaluates the discrete equivalent of 

2641 

2642 .. math:: 

2643 

2644 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2645 

2646 where the star denotes complex conjugate. Note, that the arguments here are 

2647 swapped when compared with the :py:func:`numpy.correlate` function, 

2648 which is internally called. This function should be safe even with older 

2649 versions of NumPy, where the correlate function has some problems. 

2650 

2651 A trace containing the cross correlation coefficients is returned. The time 

2652 information of the output trace is set so that the returned cross 

2653 correlation can be viewed directly as a function of time lag. 

2654 

2655 Example:: 

2656 

2657 # align two traces a and b containing a time shifted similar signal: 

2658 c = pyrocko.trace.correlate(a,b) 

2659 t, coef = c.max() # get time and value of maximum 

2660 b.shift(-t) # align b with a 

2661 

2662 ''' 

2663 

2664 assert_same_sampling_rate(a, b) 

2665 

2666 ya, yb = a.ydata, b.ydata 

2667 

2668 # need reversed order here: 

2669 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2670 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2671 

2672 if normalization == 'normal': 

2673 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2674 yc = yc/normfac 

2675 

2676 elif normalization == 'gliding': 

2677 if mode != 'valid': 

2678 assert False, 'gliding normalization currently only available ' \ 

2679 'with "valid" mode.' 

2680 

2681 if ya.size < yb.size: 

2682 yshort, ylong = ya, yb 

2683 else: 

2684 yshort, ylong = yb, ya 

2685 

2686 epsilon = 0.00001 

2687 normfac_short = num.sqrt(num.sum(yshort**2)) 

2688 normfac = normfac_short * num.sqrt( 

2689 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2690 + normfac_short*epsilon 

2691 

2692 if yb.size <= ya.size: 

2693 normfac = normfac[::-1] 

2694 

2695 yc /= normfac 

2696 

2697 c = a.copy() 

2698 c.set_ydata(yc) 

2699 c.set_codes(*merge_codes(a, b, '~')) 

2700 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2701 

2702 return c 

2703 

2704 

2705def deconvolve( 

2706 a, b, waterlevel, 

2707 tshift=0., 

2708 pad=0.5, 

2709 fd_taper=None, 

2710 pad_to_pow2=True): 

2711 

2712 same_sampling_rate(a, b) 

2713 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2714 deltat = a.deltat 

2715 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2716 

2717 ndata = max(a.data_len(), b.data_len()) 

2718 ndata_pad = ndata + npad 

2719 

2720 if pad_to_pow2: 

2721 ntrans = nextpow2(ndata_pad) 

2722 else: 

2723 ntrans = ndata 

2724 

2725 aspec = num.fft.rfft(a.ydata, ntrans) 

2726 bspec = num.fft.rfft(b.ydata, ntrans) 

2727 

2728 out = aspec * num.conj(bspec) 

2729 

2730 bautocorr = bspec*num.conj(bspec) 

2731 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2732 

2733 out /= denom 

2734 df = 1/(ntrans*deltat) 

2735 

2736 if fd_taper is not None: 

2737 fd_taper(out, 0.0, df) 

2738 

2739 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2740 c = a.copy(data=False) 

2741 c.set_ydata(ydata[:ndata]) 

2742 c.set_codes(*merge_codes(a, b, '/')) 

2743 return c 

2744 

2745 

2746def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2747 assert same_sampling_rate(a, b, eps), \ 

2748 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2749 

2750 

2751def same_sampling_rate(a, b, eps=1.0e-6): 

2752 ''' 

2753 Check if two traces have the same sampling rate. 

2754 

2755 :param a,b: input traces 

2756 :param eps: relative tolerance 

2757 ''' 

2758 

2759 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2760 

2761 

2762def fix_deltat_rounding_errors(deltat): 

2763 ''' 

2764 Try to undo sampling rate rounding errors. 

2765 

2766 Fix rounding errors of sampling intervals when these are read from single 

2767 precision floating point values. 

2768 

2769 Assumes that the true sampling rate or sampling interval was an integer 

2770 value. No correction will be applied if this would change the sampling 

2771 rate by more than 0.001%. 

2772 ''' 

2773 

2774 if deltat <= 1.0: 

2775 deltat_new = 1.0 / round(1.0 / deltat) 

2776 else: 

2777 deltat_new = round(deltat) 

2778 

2779 if abs(deltat_new - deltat) / deltat > 1e-5: 

2780 deltat_new = deltat 

2781 

2782 return deltat_new 

2783 

2784 

2785def merge_codes(a, b, sep='-'): 

2786 ''' 

2787 Merge network-station-location-channel codes of a pair of traces. 

2788 ''' 

2789 

2790 o = [] 

2791 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2792 if xa == xb: 

2793 o.append(xa) 

2794 else: 

2795 o.append(sep.join((xa, xb))) 

2796 return o 

2797 

2798 

2799class Taper(Object): 

2800 ''' 

2801 Base class for tapers. 

2802 

2803 Does nothing by default. 

2804 ''' 

2805 

2806 def __call__(self, y, x0, dx): 

2807 pass 

2808 

2809 

2810class CosTaper(Taper): 

2811 ''' 

2812 Cosine Taper. 

2813 

2814 :param a: start of fading in 

2815 :param b: end of fading in 

2816 :param c: start of fading out 

2817 :param d: end of fading out 

2818 ''' 

2819 

2820 a = Float.T() 

2821 b = Float.T() 

2822 c = Float.T() 

2823 d = Float.T() 

2824 

2825 def __init__(self, a, b, c, d): 

2826 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2827 

2828 def __call__(self, y, x0, dx): 

2829 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2830 

2831 def span(self, y, x0, dx): 

2832 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2833 

2834 def time_span(self): 

2835 return self.a, self.d 

2836 

2837 

2838class CosFader(Taper): 

2839 ''' 

2840 Cosine Fader. 

2841 

2842 :param xfade: fade in and fade out time in seconds (optional) 

2843 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2844 

2845 Only one argument can be set. The other should to be ``None``. 

2846 ''' 

2847 

2848 xfade = Float.T(optional=True) 

2849 xfrac = Float.T(optional=True) 

2850 

2851 def __init__(self, xfade=None, xfrac=None): 

2852 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2853 assert (xfade is None) != (xfrac is None) 

2854 self._xfade = xfade 

2855 self._xfrac = xfrac 

2856 

2857 def __call__(self, y, x0, dx): 

2858 

2859 xfade = self._xfade 

2860 

2861 xlen = (y.size - 1)*dx 

2862 if xfade is None: 

2863 xfade = xlen * self._xfrac 

2864 

2865 a = x0 

2866 b = x0 + xfade 

2867 c = x0 + xlen - xfade 

2868 d = x0 + xlen 

2869 

2870 apply_costaper(a, b, c, d, y, x0, dx) 

2871 

2872 def span(self, y, x0, dx): 

2873 return 0, y.size 

2874 

2875 def time_span(self): 

2876 return None, None 

2877 

2878 

2879def none_min(li): 

2880 if None in li: 

2881 return None 

2882 else: 

2883 return min(x for x in li if x is not None) 

2884 

2885 

2886def none_max(li): 

2887 if None in li: 

2888 return None 

2889 else: 

2890 return max(x for x in li if x is not None) 

2891 

2892 

2893class MultiplyTaper(Taper): 

2894 ''' 

2895 Multiplication of several tapers. 

2896 ''' 

2897 

2898 tapers = List.T(Taper.T()) 

2899 

2900 def __init__(self, tapers=None): 

2901 if tapers is None: 

2902 tapers = [] 

2903 

2904 Taper.__init__(self, tapers=tapers) 

2905 

2906 def __call__(self, y, x0, dx): 

2907 for taper in self.tapers: 

2908 taper(y, x0, dx) 

2909 

2910 def span(self, y, x0, dx): 

2911 spans = [] 

2912 for taper in self.tapers: 

2913 spans.append(taper.span(y, x0, dx)) 

2914 

2915 mins, maxs = list(zip(*spans)) 

2916 return min(mins), max(maxs) 

2917 

2918 def time_span(self): 

2919 spans = [] 

2920 for taper in self.tapers: 

2921 spans.append(taper.time_span()) 

2922 

2923 mins, maxs = list(zip(*spans)) 

2924 return none_min(mins), none_max(maxs) 

2925 

2926 

2927class GaussTaper(Taper): 

2928 ''' 

2929 Frequency domain Gaussian filter. 

2930 ''' 

2931 

2932 alpha = Float.T() 

2933 

2934 def __init__(self, alpha): 

2935 Taper.__init__(self, alpha=alpha) 

2936 self._alpha = alpha 

2937 

2938 def __call__(self, y, x0, dx): 

2939 f = x0 + num.arange(y.size)*dx 

2940 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2941 

2942 

2943cached_coefficients = {} 

2944 

2945 

2946def _get_cached_filter_coefs(order, corners, btype): 

2947 ck = (order, tuple(corners), btype) 

2948 if ck not in cached_coefficients: 

2949 if len(corners) == 0: 

2950 cached_coefficients[ck] = signal.butter( 

2951 order, corners[0], btype=btype) 

2952 else: 

2953 cached_coefficients[ck] = signal.butter( 

2954 order, corners, btype=btype) 

2955 

2956 return cached_coefficients[ck] 

2957 

2958 

2959class _globals(object): 

2960 _numpy_has_correlate_flip_bug = None 

2961 

2962 

2963def _default_key(tr): 

2964 return (tr.network, tr.station, tr.location, tr.channel) 

2965 

2966 

2967def numpy_has_correlate_flip_bug(): 

2968 ''' 

2969 Check if NumPy's correlate function reveals old behaviour. 

2970 ''' 

2971 

2972 if _globals._numpy_has_correlate_flip_bug is None: 

2973 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

2974 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

2975 ab = num.correlate(a, b, mode='same') 

2976 ba = num.correlate(b, a, mode='same') 

2977 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

2978 

2979 return _globals._numpy_has_correlate_flip_bug 

2980 

2981 

2982def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

2983 ''' 

2984 Call :py:func:`numpy.correlate` with fixes. 

2985 

2986 c[k] = sum_i a[i+k] * conj(b[i]) 

2987 

2988 Note that the result produced by newer numpy.correlate is always flipped 

2989 with respect to the formula given in its documentation (if ascending k 

2990 assumed for the output). 

2991 ''' 

2992 

2993 if use_fft: 

2994 if a.size < b.size: 

2995 c = signal.fftconvolve(b[::-1], a, mode=mode) 

2996 else: 

2997 c = signal.fftconvolve(a, b[::-1], mode=mode) 

2998 return c 

2999 

3000 else: 

3001 buggy = numpy_has_correlate_flip_bug() 

3002 

3003 a = num.asarray(a) 

3004 b = num.asarray(b) 

3005 

3006 if buggy: 

3007 b = num.conj(b) 

3008 

3009 c = num.correlate(a, b, mode=mode) 

3010 

3011 if buggy and a.size < b.size: 

3012 return c[::-1] 

3013 else: 

3014 return c 

3015 

3016 

3017def numpy_correlate_emulate(a, b, mode='valid'): 

3018 ''' 

3019 Slow version of :py:func:`numpy.correlate` for comparison. 

3020 ''' 

3021 

3022 a = num.asarray(a) 

3023 b = num.asarray(b) 

3024 kmin = -(b.size-1) 

3025 klen = a.size-kmin 

3026 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3027 kmin = int(kmin) 

3028 kmax = int(kmax) 

3029 klen = kmax - kmin + 1 

3030 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3031 for k in range(kmin, kmin+klen): 

3032 imin = max(0, -k) 

3033 ilen = min(b.size, a.size-k) - imin 

3034 c[k-kmin] = num.sum( 

3035 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3036 

3037 return c 

3038 

3039 

3040def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3041 ''' 

3042 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3043 ''' 

3044 

3045 a = num.asarray(a) 

3046 b = num.asarray(b) 

3047 

3048 kmin = -(b.size-1) 

3049 if mode == 'full': 

3050 klen = a.size-kmin 

3051 elif mode == 'same': 

3052 klen = max(a.size, b.size) 

3053 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3054 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3055 elif mode == 'valid': 

3056 klen = abs(a.size - b.size) + 1 

3057 kmin += min(a.size, b.size) - 1 

3058 

3059 return kmin, kmin + klen - 1 

3060 

3061 

3062def autocorr(x, nshifts): 

3063 ''' 

3064 Compute biased estimate of the first autocorrelation coefficients. 

3065 

3066 :param x: input array 

3067 :param nshifts: number of coefficients to calculate 

3068 ''' 

3069 

3070 mean = num.mean(x) 

3071 std = num.std(x) 

3072 n = x.size 

3073 xdm = x - mean 

3074 r = num.zeros(nshifts) 

3075 for k in range(nshifts): 

3076 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3077 

3078 return r 

3079 

3080 

3081def yulewalker(x, order): 

3082 ''' 

3083 Compute autoregression coefficients using Yule-Walker method. 

3084 

3085 :param x: input array 

3086 :param order: number of coefficients to produce 

3087 

3088 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3089 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3090 recursion which is normally used. 

3091 ''' 

3092 

3093 gamma = autocorr(x, order+1) 

3094 d = gamma[1:1+order] 

3095 a = num.zeros((order, order)) 

3096 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3097 for i in range(order): 

3098 ioff = order-i 

3099 a[i, :] = gamma2[ioff:ioff+order] 

3100 

3101 return num.dot(num.linalg.inv(a), -d) 

3102 

3103 

3104def moving_avg(x, n): 

3105 n = int(n) 

3106 cx = x.cumsum() 

3107 nn = len(x) 

3108 y = num.zeros(nn, dtype=cx.dtype) 

3109 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3110 y[:n//2] = y[n//2] 

3111 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3112 return y 

3113 

3114 

3115def moving_sum(x, n, mode='valid'): 

3116 n = int(n) 

3117 cx = x.cumsum() 

3118 nn = len(x) 

3119 

3120 if mode == 'valid': 

3121 if nn-n+1 <= 0: 

3122 return num.zeros(0, dtype=cx.dtype) 

3123 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3124 y[0] = cx[n-1] 

3125 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3126 

3127 if mode == 'full': 

3128 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3129 if n <= nn: 

3130 y[0:n] = cx[0:n] 

3131 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3132 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3133 else: 

3134 y[0:nn] = cx[0:nn] 

3135 y[nn:n] = cx[nn-1] 

3136 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3137 

3138 if mode == 'same': 

3139 n1 = (n-1)//2 

3140 y = num.zeros(nn, dtype=cx.dtype) 

3141 if n <= nn: 

3142 y[0:n-n1] = cx[n1:n] 

3143 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3144 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3145 else: 

3146 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3147 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3148 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3149 

3150 return y 

3151 

3152 

3153def nextpow2(i): 

3154 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3155 

3156 

3157def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3158 def snap(x): 

3159 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3160 return snap 

3161 

3162 

3163def snapper(nmax, delta, snapfun=math.ceil): 

3164 def snap(x): 

3165 return max(0, min(int(snapfun(x/delta)), nmax)) 

3166 return snap 

3167 

3168 

3169def apply_costaper(a, b, c, d, y, x0, dx): 

3170 hi = snapper_w_offset(y.size, x0, dx) 

3171 y[:hi(a)] = 0. 

3172 y[hi(a):hi(b)] *= 0.5 \ 

3173 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3174 y[hi(c):hi(d)] *= 0.5 \ 

3175 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3176 y[hi(d):] = 0. 

3177 

3178 

3179def span_costaper(a, b, c, d, y, x0, dx): 

3180 hi = snapper_w_offset(y.size, x0, dx) 

3181 return hi(a), hi(d) - hi(a) 

3182 

3183 

3184def costaper(a, b, c, d, nfreqs, deltaf): 

3185 hi = snapper(nfreqs, deltaf) 

3186 tap = num.zeros(nfreqs) 

3187 tap[hi(a):hi(b)] = 0.5 \ 

3188 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3189 tap[hi(b):hi(c)] = 1. 

3190 tap[hi(c):hi(d)] = 0.5 \ 

3191 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3192 

3193 return tap 

3194 

3195 

3196def t2ind(t, tdelta, snap=round): 

3197 return int(snap(t/tdelta)) 

3198 

3199 

3200def hilbert(x, N=None): 

3201 ''' 

3202 Return the hilbert transform of x of length N. 

3203 

3204 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3205 ''' 

3206 

3207 x = num.asarray(x) 

3208 if N is None: 

3209 N = len(x) 

3210 if N <= 0: 

3211 raise ValueError("N must be positive.") 

3212 if num.iscomplexobj(x): 

3213 logger.warning('imaginary part of x ignored.') 

3214 x = num.real(x) 

3215 

3216 Xf = num.fft.fft(x, N, axis=0) 

3217 h = num.zeros(N) 

3218 if N % 2 == 0: 

3219 h[0] = h[N//2] = 1 

3220 h[1:N//2] = 2 

3221 else: 

3222 h[0] = 1 

3223 h[1:(N+1)//2] = 2 

3224 

3225 if len(x.shape) > 1: 

3226 h = h[:, num.newaxis] 

3227 x = num.fft.ifft(Xf*h) 

3228 return x 

3229 

3230 

3231def near(a, b, eps): 

3232 return abs(a-b) < eps 

3233 

3234 

3235def coroutine(func): 

3236 def wrapper(*args, **kwargs): 

3237 gen = func(*args, **kwargs) 

3238 next(gen) 

3239 return gen 

3240 

3241 wrapper.__name__ = func.__name__ 

3242 wrapper.__dict__ = func.__dict__ 

3243 wrapper.__doc__ = func.__doc__ 

3244 return wrapper 

3245 

3246 

3247class States(object): 

3248 ''' 

3249 Utility to store channel-specific state in coroutines. 

3250 ''' 

3251 

3252 def __init__(self): 

3253 self._states = {} 

3254 

3255 def get(self, tr): 

3256 k = tr.nslc_id 

3257 if k in self._states: 

3258 tmin, deltat, dtype, value = self._states[k] 

3259 if (near(tmin, tr.tmin, deltat/100.) 

3260 and near(deltat, tr.deltat, deltat/10000.) 

3261 and dtype == tr.ydata.dtype): 

3262 

3263 return value 

3264 

3265 return None 

3266 

3267 def set(self, tr, value): 

3268 k = tr.nslc_id 

3269 if k in self._states and self._states[k][-1] is not value: 

3270 self.free(self._states[k][-1]) 

3271 

3272 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3273 

3274 def free(self, value): 

3275 pass 

3276 

3277 

3278@coroutine 

3279def co_list_append(list): 

3280 while True: 

3281 list.append((yield)) 

3282 

3283 

3284class ScipyBug(Exception): 

3285 pass 

3286 

3287 

3288@coroutine 

3289def co_lfilter(target, b, a): 

3290 ''' 

3291 Successively filter broken continuous trace data (coroutine). 

3292 

3293 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3294 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3295 objects containing the filtered data to target. This is useful, if one 

3296 wants to filter a long continuous time series, which is split into many 

3297 successive traces without producing filter artifacts at trace boundaries. 

3298 

3299 Filter states are kept *per channel*, specifically, for each (network, 

3300 station, location, channel) combination occuring in the input traces, a 

3301 separate state is created and maintained. This makes it possible to filter 

3302 multichannel or multistation data with only one :py:func:`co_lfilter` 

3303 instance. 

3304 

3305 Filter state is reset, when gaps occur. 

3306 

3307 Use it like this:: 

3308 

3309 from pyrocko.trace import co_lfilter, co_list_append 

3310 

3311 filtered_traces = [] 

3312 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3313 for trace in traces: 

3314 pipe.send(trace) 

3315 

3316 pipe.close() 

3317 

3318 ''' 

3319 

3320 try: 

3321 states = States() 

3322 output = None 

3323 while True: 

3324 input = (yield) 

3325 

3326 zi = states.get(input) 

3327 if zi is None: 

3328 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3329 

3330 output = input.copy(data=False) 

3331 try: 

3332 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3333 except ValueError: 

3334 raise ScipyBug( 

3335 'signal.lfilter failed: could be related to a bug ' 

3336 'in some older scipy versions, e.g. on opensuse42.1') 

3337 

3338 output.set_ydata(ydata) 

3339 states.set(input, zf) 

3340 target.send(output) 

3341 

3342 except GeneratorExit: 

3343 target.close() 

3344 

3345 

3346def co_antialias(target, q, n=None, ftype='fir'): 

3347 b, a, n = util.decimate_coeffs(q, n, ftype) 

3348 anti = co_lfilter(target, b, a) 

3349 return anti 

3350 

3351 

3352@coroutine 

3353def co_dropsamples(target, q, nfir): 

3354 try: 

3355 states = States() 

3356 while True: 

3357 tr = (yield) 

3358 newdeltat = q * tr.deltat 

3359 ioffset = states.get(tr) 

3360 if ioffset is None: 

3361 # for fir filter, the first nfir samples are pulluted by 

3362 # boundary effects; cut it off. 

3363 # for iir this may be (much) more, we do not correct for that. 

3364 # put sample instances to a time which is a multiple of the 

3365 # new sampling interval. 

3366 newtmin_want = math.ceil( 

3367 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3368 - (nfir/2*tr.deltat) 

3369 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3370 if ioffset < 0: 

3371 ioffset = ioffset % q 

3372 

3373 newtmin_have = tr.tmin + ioffset * tr.deltat 

3374 newtr = tr.copy(data=False) 

3375 newtr.deltat = newdeltat 

3376 # because the fir kernel shifts data by nfir/2 samples: 

3377 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3378 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3379 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3380 target.send(newtr) 

3381 

3382 except GeneratorExit: 

3383 target.close() 

3384 

3385 

3386def co_downsample(target, q, n=None, ftype='fir'): 

3387 ''' 

3388 Successively downsample broken continuous trace data (coroutine). 

3389 

3390 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3391 data and sends new :py:class:`Trace` objects containing the downsampled 

3392 data to target. This is useful, if one wants to downsample a long 

3393 continuous time series, which is split into many successive traces without 

3394 producing filter artifacts and gaps at trace boundaries. 

3395 

3396 Filter states are kept *per channel*, specifically, for each (network, 

3397 station, location, channel) combination occuring in the input traces, a 

3398 separate state is created and maintained. This makes it possible to filter 

3399 multichannel or multistation data with only one :py:func:`co_lfilter` 

3400 instance. 

3401 

3402 Filter state is reset, when gaps occur. The sampling instances are choosen 

3403 so that they occur at (or as close as possible) to even multiples of the 

3404 sampling interval of the downsampled trace (based on system time). 

3405 ''' 

3406 

3407 b, a, n = util.decimate_coeffs(q, n, ftype) 

3408 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3409 

3410 

3411@coroutine 

3412def co_downsample_to(target, deltat): 

3413 

3414 decimators = {} 

3415 try: 

3416 while True: 

3417 tr = (yield) 

3418 ratio = deltat / tr.deltat 

3419 rratio = round(ratio) 

3420 if abs(rratio - ratio)/ratio > 0.0001: 

3421 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3422 

3423 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3424 if deci_seq not in decimators: 

3425 pipe = target 

3426 for q in deci_seq[::-1]: 

3427 pipe = co_downsample(pipe, q) 

3428 

3429 decimators[deci_seq] = pipe 

3430 

3431 decimators[deci_seq].send(tr) 

3432 

3433 except GeneratorExit: 

3434 for g in decimators.values(): 

3435 g.close() 

3436 

3437 

3438class DomainChoice(StringChoice): 

3439 choices = [ 

3440 'time_domain', 

3441 'frequency_domain', 

3442 'envelope', 

3443 'absolute', 

3444 'cc_max_norm'] 

3445 

3446 

3447class MisfitSetup(Object): 

3448 ''' 

3449 Contains misfit setup to be used in :py:func:`trace.misfit` 

3450 

3451 :param description: Description of the setup 

3452 :param norm: L-norm classifier 

3453 :param taper: Object of :py:class:`Taper` 

3454 :param filter: Object of :py:class:`FrequencyResponse` 

3455 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3456 'cc_max_norm'] 

3457 

3458 Can be dumped to a yaml file. 

3459 ''' 

3460 

3461 xmltagname = 'misfitsetup' 

3462 description = String.T(optional=True) 

3463 norm = Int.T(optional=False) 

3464 taper = Taper.T(optional=False) 

3465 filter = FrequencyResponse.T(optional=True) 

3466 domain = DomainChoice.T(default='time_domain') 

3467 

3468 

3469def equalize_sampling_rates(trace_1, trace_2): 

3470 ''' 

3471 Equalize sampling rates of two traces (reduce higher sampling rate to 

3472 lower). 

3473 

3474 :param trace_1: :py:class:`Trace` object 

3475 :param trace_2: :py:class:`Trace` object 

3476 

3477 Returns a copy of the resampled trace if resampling is needed. 

3478 ''' 

3479 

3480 if same_sampling_rate(trace_1, trace_2): 

3481 return trace_1, trace_2 

3482 

3483 if trace_1.deltat < trace_2.deltat: 

3484 t1_out = trace_1.copy() 

3485 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3486 logger.debug('Trace downsampled (return copy of trace): %s' 

3487 % '.'.join(t1_out.nslc_id)) 

3488 return t1_out, trace_2 

3489 

3490 elif trace_1.deltat > trace_2.deltat: 

3491 t2_out = trace_2.copy() 

3492 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3493 logger.debug('Trace downsampled (return copy of trace): %s' 

3494 % '.'.join(t2_out.nslc_id)) 

3495 return trace_1, t2_out 

3496 

3497 

3498def Lx_norm(u, v, norm=2): 

3499 ''' 

3500 Calculate the misfit denominator *m* and the normalization devisor *n* 

3501 according to norm. 

3502 

3503 The normalization divisor *n* is calculated from ``v``. 

3504 

3505 :param u: :py:class:`numpy.array` 

3506 :param v: :py:class:`numpy.array` 

3507 :param norm: (default = 2) 

3508 

3509 ``u`` and ``v`` must be of same size. 

3510 ''' 

3511 

3512 if norm == 1: 

3513 return ( 

3514 num.sum(num.abs(v-u)), 

3515 num.sum(num.abs(v))) 

3516 

3517 elif norm == 2: 

3518 return ( 

3519 num.sqrt(num.sum((v-u)**2)), 

3520 num.sqrt(num.sum(v**2))) 

3521 

3522 else: 

3523 return ( 

3524 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3525 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3526 

3527 

3528def do_downsample(tr, deltat): 

3529 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3530 tr = tr.copy() 

3531 tr.downsample_to(deltat, snap=True, demean=False) 

3532 else: 

3533 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3534 tr = tr.copy() 

3535 tr.snap() 

3536 return tr 

3537 

3538 

3539def do_extend(tr, tmin, tmax): 

3540 if tmin < tr.tmin or tmax > tr.tmax: 

3541 tr = tr.copy() 

3542 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3543 

3544 return tr 

3545 

3546 

3547def do_pre_taper(tr, taper): 

3548 return tr.taper(taper, inplace=False, chop=True) 

3549 

3550 

3551def do_fft(tr, filter): 

3552 if filter is None: 

3553 return tr 

3554 else: 

3555 ndata = tr.ydata.size 

3556 nfft = nextpow2(ndata) 

3557 padded = num.zeros(nfft, dtype=float) 

3558 padded[:ndata] = tr.ydata 

3559 spectrum = num.fft.rfft(padded) 

3560 df = 1.0 / (tr.deltat * nfft) 

3561 frequencies = num.arange(spectrum.size)*df 

3562 return [tr, frequencies, spectrum] 

3563 

3564 

3565def do_filter(inp, filter): 

3566 if filter is None: 

3567 return inp 

3568 else: 

3569 tr, frequencies, spectrum = inp 

3570 spectrum *= filter.evaluate(frequencies) 

3571 return [tr, frequencies, spectrum] 

3572 

3573 

3574def do_ifft(inp): 

3575 if isinstance(inp, Trace): 

3576 return inp 

3577 else: 

3578 tr, _, spectrum = inp 

3579 ndata = tr.ydata.size 

3580 tr = tr.copy(data=False) 

3581 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3582 return tr 

3583 

3584 

3585def check_alignment(t1, t2): 

3586 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3587 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3588 t1.ydata.shape != t2.ydata.shape: 

3589 raise MisalignedTraces( 

3590 'Cannot calculate misfit of %s and %s due to misaligned ' 

3591 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))