1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6This module provides basic signal processing for seismic traces. 

7''' 

8 

9from __future__ import division, absolute_import 

10 

11import time 

12import math 

13import copy 

14import logging 

15import hashlib 

16from collections import defaultdict 

17 

18import numpy as num 

19from scipy import signal 

20 

21from pyrocko import util, orthodrome, pchain, model 

22from pyrocko.util import reuse 

23from pyrocko.guts import Object, Float, Int, String, List, \ 

24 StringChoice, Timestamp 

25from pyrocko.guts_array import Array 

26from pyrocko.model import squirrel_content 

27 

28# backward compatibility 

29from pyrocko.util import UnavailableDecimation # noqa 

30from pyrocko.response import ( # noqa 

31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

32 ButterworthResponse, SampledResponse, IntegrationResponse, 

33 DifferentiationResponse, MultiplyResponse) 

34 

35 

36guts_prefix = 'pf' 

37 

38logger = logging.getLogger('pyrocko.trace') 

39 

40 

41@squirrel_content 

42class Trace(Object): 

43 

44 ''' 

45 Create new trace object. 

46 

47 A ``Trace`` object represents a single continuous strip of evenly sampled 

48 time series data. It is built from a 1D NumPy array containing the data 

49 samples and some attributes describing its beginning and ending time, its 

50 sampling rate and four string identifiers (its network, station, location 

51 and channel code). 

52 

53 :param network: network code 

54 :param station: station code 

55 :param location: location code 

56 :param channel: channel code 

57 :param extra: extra code 

58 :param tmin: system time of first sample in [s] 

59 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

60 computed from length of ``ydata``) 

61 :param deltat: sampling interval in [s] 

62 :param ydata: 1D numpy array with data samples (can be ``None`` when 

63 ``tmax`` is not ``None``) 

64 :param mtime: optional modification time 

65 

66 :param meta: additional meta information (not used, but maintained by the 

67 library) 

68 

69 The length of the network, station, location and channel codes is not 

70 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

71 have different limits on the lengths of these codes. The codes set here are 

72 silently truncated when the trace is stored 

73 ''' 

74 

75 network = String.T(default='', help='Deployment/network code (1-8)') 

76 station = String.T(default='STA', help='Station code (1-5)') 

77 location = String.T(default='', help='Location code (0-2)') 

78 channel = String.T(default='', help='Channel code (3)') 

79 extra = String.T(default='', help='Extra/custom code') 

80 

81 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

82 tmax = Timestamp.T() 

83 

84 deltat = Float.T(default=1.0) 

85 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

86 

87 mtime = Timestamp.T(optional=True) 

88 

89 cached_frequencies = {} 

90 

91 def __init__(self, network='', station='STA', location='', channel='', 

92 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

93 meta=None, extra=''): 

94 

95 Object.__init__(self, init_props=False) 

96 

97 time_float = util.get_time_float() 

98 

99 if not isinstance(tmin, time_float): 

100 tmin = Trace.tmin.regularize_extra(tmin) 

101 

102 if tmax is not None and not isinstance(tmax, time_float): 

103 tmax = Trace.tmax.regularize_extra(tmax) 

104 

105 if mtime is not None and not isinstance(mtime, time_float): 

106 mtime = Trace.mtime.regularize_extra(mtime) 

107 

108 self._growbuffer = None 

109 

110 tmin = time_float(tmin) 

111 if tmax is not None: 

112 tmax = time_float(tmax) 

113 

114 if mtime is None: 

115 mtime = time_float(time.time()) 

116 

117 self.network, self.station, self.location, self.channel, \ 

118 self.extra = [ 

119 reuse(x) for x in ( 

120 network, station, location, channel, extra)] 

121 

122 self.tmin = tmin 

123 self.deltat = deltat 

124 

125 if tmax is None: 

126 if ydata is not None: 

127 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

128 else: 

129 raise Exception( 

130 'fixme: trace must be created with tmax or ydata') 

131 else: 

132 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

133 self.tmax = self.tmin + (n - 1) * self.deltat 

134 

135 self.meta = meta 

136 self.ydata = ydata 

137 self.mtime = mtime 

138 self._update_ids() 

139 self.file = None 

140 self._pchain = None 

141 

142 def __str__(self): 

143 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

144 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

145 s += ' timerange: %s - %s\n' % ( 

146 util.time_to_str(self.tmin, format=fmt), 

147 util.time_to_str(self.tmax, format=fmt)) 

148 

149 s += ' delta t: %g\n' % self.deltat 

150 if self.meta: 

151 for k in sorted(self.meta.keys()): 

152 s += ' %s: %s\n' % (k, self.meta[k]) 

153 return s 

154 

155 @property 

156 def codes(self): 

157 from pyrocko.squirrel import model 

158 return model.CodesNSLCE( 

159 self.network, self.station, self.location, self.channel, 

160 self.extra) 

161 

162 @property 

163 def time_span(self): 

164 return self.tmin, self.tmax 

165 

166 @property 

167 def summary(self): 

168 return '%s %-16s %s %g' % ( 

169 self.__class__.__name__, self.str_codes, self.str_time_span, 

170 self.deltat) 

171 

172 def __getstate__(self): 

173 return (self.network, self.station, self.location, self.channel, 

174 self.tmin, self.tmax, self.deltat, self.mtime, 

175 self.ydata, self.meta, self.extra) 

176 

177 def __setstate__(self, state): 

178 if len(state) == 11: 

179 self.network, self.station, self.location, self.channel, \ 

180 self.tmin, self.tmax, self.deltat, self.mtime, \ 

181 self.ydata, self.meta, self.extra = state 

182 

183 elif len(state) == 12: 

184 # backward compatibility 0 

185 self.network, self.station, self.location, self.channel, \ 

186 self.tmin, self.tmax, self.deltat, self.mtime, \ 

187 self.ydata, self.meta, _, self.extra = state 

188 

189 elif len(state) == 10: 

190 # backward compatibility 1 

191 self.network, self.station, self.location, self.channel, \ 

192 self.tmin, self.tmax, self.deltat, self.mtime, \ 

193 self.ydata, self.meta = state 

194 

195 self.extra = '' 

196 

197 else: 

198 # backward compatibility 2 

199 self.network, self.station, self.location, self.channel, \ 

200 self.tmin, self.tmax, self.deltat, self.mtime = state 

201 self.ydata = None 

202 self.meta = None 

203 self.extra = '' 

204 

205 self._growbuffer = None 

206 self._update_ids() 

207 

208 def hash(self, unsafe=False): 

209 sha1 = hashlib.sha1() 

210 for code in self.nslc_id: 

211 sha1.update(code.encode()) 

212 sha1.update(self.tmin.hex().encode()) 

213 sha1.update(self.tmax.hex().encode()) 

214 sha1.update(self.deltat.hex().encode()) 

215 

216 if self.ydata is not None: 

217 if not unsafe: 

218 sha1.update(memoryview(self.ydata)) 

219 else: 

220 sha1.update(memoryview(self.ydata[:32])) 

221 sha1.update(memoryview(self.ydata[-32:])) 

222 

223 return sha1.hexdigest() 

224 

225 def name(self): 

226 ''' 

227 Get a short string description. 

228 ''' 

229 

230 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

231 util.time_to_str(self.tmin), 

232 util.time_to_str(self.tmax))) 

233 

234 return s 

235 

236 def __eq__(self, other): 

237 return ( 

238 isinstance(other, Trace) 

239 and self.network == other.network 

240 and self.station == other.station 

241 and self.location == other.location 

242 and self.channel == other.channel 

243 and (abs(self.deltat - other.deltat) 

244 < (self.deltat + other.deltat)*1e-6) 

245 and abs(self.tmin-other.tmin) < self.deltat*0.01 

246 and abs(self.tmax-other.tmax) < self.deltat*0.01 

247 and num.all(self.ydata == other.ydata)) 

248 

249 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

250 return ( 

251 self.network == other.network 

252 and self.station == other.station 

253 and self.location == other.location 

254 and self.channel == other.channel 

255 and (abs(self.deltat - other.deltat) 

256 < (self.deltat + other.deltat)*1e-6) 

257 and abs(self.tmin-other.tmin) < self.deltat*0.01 

258 and abs(self.tmax-other.tmax) < self.deltat*0.01 

259 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

260 

261 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

262 

263 assert self.network == other.network, \ 

264 'network codes differ: %s, %s' % (self.network, other.network) 

265 assert self.station == other.station, \ 

266 'station codes differ: %s, %s' % (self.station, other.station) 

267 assert self.location == other.location, \ 

268 'location codes differ: %s, %s' % (self.location, other.location) 

269 assert self.channel == other.channel, 'channel codes differ' 

270 assert (abs(self.deltat - other.deltat) 

271 < (self.deltat + other.deltat)*1e-6), \ 

272 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

273 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

274 'start times differ: %s, %s' % ( 

275 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

276 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

277 'end times differ: %s, %s' % ( 

278 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

279 

280 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

281 'trace values differ' 

282 

283 def __hash__(self): 

284 return id(self) 

285 

286 def __call__(self, t, clip=False, snap=round): 

287 it = int(snap((t-self.tmin)/self.deltat)) 

288 if clip: 

289 it = max(0, min(it, self.ydata.size-1)) 

290 else: 

291 if it < 0 or self.ydata.size <= it: 

292 raise IndexError() 

293 

294 return self.tmin+it*self.deltat, self.ydata[it] 

295 

296 def interpolate(self, t, clip=False): 

297 ''' 

298 Value of trace between supporting points through linear interpolation. 

299 

300 :param t: time instant 

301 :param clip: whether to clip indices to trace ends 

302 ''' 

303 

304 t0, y0 = self(t, clip=clip, snap=math.floor) 

305 t1, y1 = self(t, clip=clip, snap=math.ceil) 

306 if t0 == t1: 

307 return y0 

308 else: 

309 return y0+(t-t0)/(t1-t0)*(y1-y0) 

310 

311 def index_clip(self, i): 

312 ''' 

313 Clip index to valid range. 

314 ''' 

315 

316 return min(max(0, i), self.ydata.size) 

317 

318 def add(self, other, interpolate=True, left=0., right=0.): 

319 ''' 

320 Add values of other trace (self += other). 

321 

322 Add values of ``other`` trace to the values of ``self``, where it 

323 intersects with ``other``. This method does not change the extent of 

324 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

325 ``other`` to be added are interpolated at sampling instants of 

326 ``self``. Linear interpolation is performed. In this case the sampling 

327 rate of ``other`` must be equal to or lower than that of ``self``. If 

328 ``interpolate`` is ``False``, the sampling rates of the two traces must 

329 match. 

330 ''' 

331 

332 if interpolate: 

333 assert self.deltat <= other.deltat \ 

334 or same_sampling_rate(self, other) 

335 

336 other_xdata = other.get_xdata() 

337 xdata = self.get_xdata() 

338 self.ydata += num.interp( 

339 xdata, other_xdata, other.ydata, left=left, right=left) 

340 else: 

341 assert self.deltat == other.deltat 

342 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

343 ibeg = max(0, ioff) 

344 iend = min(self.data_len(), ioff+other.data_len()) 

345 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

346 

347 def mult(self, other, interpolate=True): 

348 ''' 

349 Muliply with values of other trace ``(self *= other)``. 

350 

351 Multiply values of ``other`` trace to the values of ``self``, where it 

352 intersects with ``other``. This method does not change the extent of 

353 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

354 ``other`` to be multiplied are interpolated at sampling instants of 

355 ``self``. Linear interpolation is performed. In this case the sampling 

356 rate of ``other`` must be equal to or lower than that of ``self``. If 

357 ``interpolate`` is ``False``, the sampling rates of the two traces must 

358 match. 

359 ''' 

360 

361 if interpolate: 

362 assert self.deltat <= other.deltat or \ 

363 same_sampling_rate(self, other) 

364 

365 other_xdata = other.get_xdata() 

366 xdata = self.get_xdata() 

367 self.ydata *= num.interp( 

368 xdata, other_xdata, other.ydata, left=0., right=0.) 

369 else: 

370 assert self.deltat == other.deltat 

371 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

372 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

373 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

374 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

375 

376 ibeg1 = self.index_clip(ibeg1) 

377 iend1 = self.index_clip(iend1) 

378 ibeg2 = self.index_clip(ibeg2) 

379 iend2 = self.index_clip(iend2) 

380 

381 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

382 

383 def max(self): 

384 ''' 

385 Get time and value of data maximum. 

386 ''' 

387 

388 i = num.argmax(self.ydata) 

389 return self.tmin + i*self.deltat, self.ydata[i] 

390 

391 def min(self): 

392 ''' 

393 Get time and value of data minimum. 

394 ''' 

395 

396 i = num.argmin(self.ydata) 

397 return self.tmin + i*self.deltat, self.ydata[i] 

398 

399 def absmax(self): 

400 ''' 

401 Get time and value of maximum of the absolute of data. 

402 ''' 

403 

404 tmi, mi = self.min() 

405 tma, ma = self.max() 

406 if abs(mi) > abs(ma): 

407 return tmi, abs(mi) 

408 else: 

409 return tma, abs(ma) 

410 

411 def set_codes( 

412 self, network=None, station=None, location=None, channel=None, 

413 extra=None): 

414 

415 ''' 

416 Set network, station, location, and channel codes. 

417 ''' 

418 

419 if network is not None: 

420 self.network = network 

421 if station is not None: 

422 self.station = station 

423 if location is not None: 

424 self.location = location 

425 if channel is not None: 

426 self.channel = channel 

427 if extra is not None: 

428 self.extra = extra 

429 

430 self._update_ids() 

431 

432 def set_network(self, network): 

433 self.network = network 

434 self._update_ids() 

435 

436 def set_station(self, station): 

437 self.station = station 

438 self._update_ids() 

439 

440 def set_location(self, location): 

441 self.location = location 

442 self._update_ids() 

443 

444 def set_channel(self, channel): 

445 self.channel = channel 

446 self._update_ids() 

447 

448 def overlaps(self, tmin, tmax): 

449 ''' 

450 Check if trace has overlap with a given time span. 

451 ''' 

452 return not (tmax < self.tmin or self.tmax < tmin) 

453 

454 def is_relevant(self, tmin, tmax, selector=None): 

455 ''' 

456 Check if trace has overlap with a given time span and matches a 

457 condition callback. (internal use) 

458 ''' 

459 

460 return not (tmax <= self.tmin or self.tmax < tmin) \ 

461 and (selector is None or selector(self)) 

462 

463 def _update_ids(self): 

464 ''' 

465 Update dependent ids. 

466 ''' 

467 

468 self.full_id = ( 

469 self.network, self.station, self.location, self.channel, self.tmin) 

470 self.nslc_id = reuse( 

471 (self.network, self.station, self.location, self.channel)) 

472 

473 def prune_from_reuse_cache(self): 

474 util.deuse(self.nslc_id) 

475 util.deuse(self.network) 

476 util.deuse(self.station) 

477 util.deuse(self.location) 

478 util.deuse(self.channel) 

479 

480 def set_mtime(self, mtime): 

481 ''' 

482 Set modification time of the trace. 

483 ''' 

484 

485 self.mtime = mtime 

486 

487 def get_xdata(self): 

488 ''' 

489 Create array for time axis. 

490 ''' 

491 

492 if self.ydata is None: 

493 raise NoData() 

494 

495 return self.tmin \ 

496 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

497 

498 def get_ydata(self): 

499 ''' 

500 Get data array. 

501 ''' 

502 

503 if self.ydata is None: 

504 raise NoData() 

505 

506 return self.ydata 

507 

508 def set_ydata(self, new_ydata): 

509 ''' 

510 Replace data array. 

511 ''' 

512 

513 self.drop_growbuffer() 

514 self.ydata = new_ydata 

515 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

516 

517 def data_len(self): 

518 if self.ydata is not None: 

519 return self.ydata.size 

520 else: 

521 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

522 

523 def drop_data(self): 

524 ''' 

525 Forget data, make dataless trace. 

526 ''' 

527 

528 self.drop_growbuffer() 

529 self.ydata = None 

530 

531 def drop_growbuffer(self): 

532 ''' 

533 Detach the traces grow buffer. 

534 ''' 

535 

536 self._growbuffer = None 

537 self._pchain = None 

538 

539 def copy(self, data=True): 

540 ''' 

541 Make a deep copy of the trace. 

542 ''' 

543 

544 tracecopy = copy.copy(self) 

545 tracecopy.drop_growbuffer() 

546 if data: 

547 tracecopy.ydata = self.ydata.copy() 

548 tracecopy.meta = copy.deepcopy(self.meta) 

549 return tracecopy 

550 

551 def crop_zeros(self): 

552 ''' 

553 Remove any zeros at beginning and end. 

554 ''' 

555 

556 indices = num.where(self.ydata != 0.0)[0] 

557 if indices.size == 0: 

558 raise NoData() 

559 

560 ibeg = indices[0] 

561 iend = indices[-1]+1 

562 if ibeg == 0 and iend == self.ydata.size-1: 

563 return 

564 

565 self.drop_growbuffer() 

566 self.ydata = self.ydata[ibeg:iend].copy() 

567 self.tmin = self.tmin+ibeg*self.deltat 

568 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

569 self._update_ids() 

570 

571 def append(self, data): 

572 ''' 

573 Append data to the end of the trace. 

574 

575 To make this method efficient when successively very few or even single 

576 samples are appended, a larger grow buffer is allocated upon first 

577 invocation. The traces data is then changed to be a view into the 

578 currently filled portion of the grow buffer array. 

579 ''' 

580 

581 assert self.ydata.dtype == data.dtype 

582 newlen = data.size + self.ydata.size 

583 if self._growbuffer is None or self._growbuffer.size < newlen: 

584 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

585 self._growbuffer[:self.ydata.size] = self.ydata 

586 self._growbuffer[self.ydata.size:newlen] = data 

587 self.ydata = self._growbuffer[:newlen] 

588 self.tmax = self.tmin + (newlen-1)*self.deltat 

589 

590 def chop( 

591 self, tmin, tmax, inplace=True, include_last=False, 

592 snap=(round, round), want_incomplete=True): 

593 

594 ''' 

595 Cut the trace to given time span. 

596 

597 If the ``inplace`` argument is True (the default) the trace is cut in 

598 place, otherwise a new trace with the cut part is returned. By 

599 default, the indices where to start and end the trace data array are 

600 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

601 using Python's :py:func:`round` function. This behaviour can be changed 

602 with the ``snap`` argument, which takes a tuple of two functions (one 

603 for the lower and one for the upper end) to be used instead of 

604 :py:func:`round`. The last sample is by default not included unless 

605 ``include_last`` is set to True. If the given time span exceeds the 

606 available time span of the trace, the available part is returned, 

607 unless ``want_incomplete`` is set to False - in that case, a 

608 :py:exc:`NoData` exception is raised. This exception is always raised, 

609 when the requested time span does dot overlap with the trace's time 

610 span. 

611 ''' 

612 

613 if want_incomplete: 

614 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

615 raise NoData() 

616 else: 

617 if tmin < self.tmin or self.tmax < tmax: 

618 raise NoData() 

619 

620 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

621 iplus = 0 

622 if include_last: 

623 iplus = 1 

624 

625 iend = min( 

626 self.data_len(), 

627 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

628 

629 if ibeg >= iend: 

630 raise NoData() 

631 

632 obj = self 

633 if not inplace: 

634 obj = self.copy(data=False) 

635 

636 self.drop_growbuffer() 

637 if self.ydata is not None: 

638 obj.ydata = self.ydata[ibeg:iend].copy() 

639 else: 

640 obj.ydata = None 

641 

642 obj.tmin = obj.tmin+ibeg*obj.deltat 

643 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

644 

645 obj._update_ids() 

646 

647 return obj 

648 

649 def downsample(self, ndecimate, snap=False, initials=None, demean=False, 

650 ftype='fir-remez'): 

651 ''' 

652 Downsample trace by a given integer factor. 

653 

654 Comparison of the available FIR filters `fir` and `fir-remez`: 

655 

656 

657 :param ndecimate: decimation factor, avoid values larger than 8 

658 :param snap: whether to put the new sampling instances closest to 

659 multiples of the sampling rate. 

660 :param initials: ``None``, ``True``, or initial conditions for the 

661 anti-aliasing filter, obtained from a previous run. In the latter 

662 two cases the final state of the filter is returned instead of 

663 ``None``. 

664 :param demean: whether to demean the signal before filtering. 

665 :param ftype: which FIR filter to use, choose from `fir, fir-remez`. 

666 Default is `fir-remez`. 

667 ''' 

668 newdeltat = self.deltat*ndecimate 

669 if snap: 

670 ilag = int(round( 

671 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

672 / self.deltat)) 

673 else: 

674 ilag = 0 

675 

676 if snap and ilag > 0 and ilag < self.ydata.size: 

677 data = self.ydata.astype(num.float64) 

678 self.tmin += ilag*self.deltat 

679 else: 

680 data = self.ydata.astype(num.float64) 

681 

682 if demean: 

683 data -= num.mean(data) 

684 

685 if data.size != 0: 

686 result = util.decimate( 

687 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag) 

688 else: 

689 result = data 

690 

691 if initials is None: 

692 self.ydata, finals = result, None 

693 else: 

694 self.ydata, finals = result 

695 

696 self.deltat = reuse(self.deltat*ndecimate) 

697 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

698 self._update_ids() 

699 

700 return finals 

701 

702 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

703 initials=None, demean=False, ftype='fir-remez'): 

704 

705 ''' 

706 Downsample to given sampling rate. 

707 

708 Tries to downsample the trace to a target sampling interval of 

709 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

710 times. If allow_upsample_max is set to a value larger than 1, 

711 intermediate upsampling steps are allowed, in order to increase the 

712 number of possible downsampling ratios. 

713 

714 If the requested ratio is not supported, an exception of type 

715 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

716 

717 :param deltat: desired deltat in [s] 

718 :param allow_upsample_max: maximum upsampling of the signal to archive 

719 the desired deltat. Default is `1`. 

720 :param snap: whether to put the new sampling instances closest to 

721 multiples of the sampling rate. 

722 :param initials: ``None``, ``True``, or initial conditions for the 

723 anti-aliasing filter, obtained from a previous run. In the latter 

724 two cases the final state of the filter is returned instead of 

725 ``None``. 

726 :param demean: whether to demean the signal before filtering. 

727 :param ftype: which FIR filter to use, choose from `fir, fir-remez`. 

728 Default is `fir-remez`. 

729 ''' 

730 

731 ratio = deltat/self.deltat 

732 rratio = round(ratio) 

733 

734 ok = False 

735 for upsratio in range(1, allow_upsample_max+1): 

736 dratio = (upsratio/self.deltat) / (1./deltat) 

737 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

738 util.decitab(int(round(dratio))): 

739 

740 ok = True 

741 break 

742 

743 if not ok: 

744 raise util.UnavailableDecimation('ratio = %g' % ratio) 

745 

746 if upsratio > 1: 

747 self.drop_growbuffer() 

748 ydata = self.ydata 

749 self.ydata = num.zeros( 

750 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

751 self.ydata[::upsratio] = ydata 

752 for i in range(1, upsratio): 

753 self.ydata[i::upsratio] = \ 

754 float(i)/upsratio * ydata[:-1] \ 

755 + float(upsratio-i)/upsratio * ydata[1:] 

756 self.deltat = self.deltat/upsratio 

757 

758 ratio = deltat/self.deltat 

759 rratio = round(ratio) 

760 

761 deci_seq = util.decitab(int(rratio)) 

762 finals = [] 

763 for i, ndecimate in enumerate(deci_seq): 

764 if ndecimate != 1: 

765 xinitials = None 

766 if initials is not None: 

767 xinitials = initials[i] 

768 finals.append(self.downsample( 

769 ndecimate, snap=snap, initials=xinitials, demean=demean, 

770 ftype=ftype)) 

771 

772 if initials is not None: 

773 return finals 

774 

775 def resample(self, deltat): 

776 ''' 

777 Resample to given sampling rate ``deltat``. 

778 

779 Resampling is performed in the frequency domain. 

780 ''' 

781 

782 ndata = self.ydata.size 

783 ntrans = nextpow2(ndata) 

784 fntrans2 = ntrans * self.deltat/deltat 

785 ntrans2 = int(round(fntrans2)) 

786 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

787 ndata2 = int(round(ndata*self.deltat/deltat2)) 

788 if abs(fntrans2 - ntrans2) > 1e-7: 

789 logger.warning( 

790 'resample: requested deltat %g could not be matched exactly: ' 

791 '%g' % (deltat, deltat2)) 

792 

793 data = self.ydata 

794 data_pad = num.zeros(ntrans, dtype=float) 

795 data_pad[:ndata] = data 

796 fdata = num.fft.rfft(data_pad) 

797 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

798 n = min(fdata.size, fdata2.size) 

799 fdata2[:n] = fdata[:n] 

800 data2 = num.fft.irfft(fdata2) 

801 data2 = data2[:ndata2] 

802 data2 *= float(ntrans2) / float(ntrans) 

803 self.deltat = deltat2 

804 self.set_ydata(data2) 

805 

806 def resample_simple(self, deltat): 

807 tyear = 3600*24*365. 

808 

809 if deltat == self.deltat: 

810 return 

811 

812 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

813 logger.warning( 

814 'resample_simple: less than one sample would have to be ' 

815 'inserted/deleted per year. Doing nothing.') 

816 return 

817 

818 ninterval = int(round(deltat / (self.deltat - deltat))) 

819 if abs(ninterval) < 20: 

820 logger.error( 

821 'resample_simple: sample insertion/deletion interval less ' 

822 'than 20. results would be erroneous.') 

823 raise ResamplingFailed() 

824 

825 delete = False 

826 if ninterval < 0: 

827 ninterval = - ninterval 

828 delete = True 

829 

830 tyearbegin = util.year_start(self.tmin) 

831 

832 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

833 

834 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

835 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

836 if nindices > 0: 

837 indices = ibegin + num.arange(nindices) * ninterval 

838 data_split = num.split(self.ydata, indices) 

839 data = [] 

840 for ln, h in zip(data_split[:-1], data_split[1:]): 

841 if delete: 

842 ln = ln[:-1] 

843 

844 data.append(ln) 

845 if not delete: 

846 if ln.size == 0: 

847 v = h[0] 

848 else: 

849 v = 0.5*(ln[-1] + h[0]) 

850 data.append(num.array([v], dtype=ln.dtype)) 

851 

852 data.append(data_split[-1]) 

853 

854 ydata_new = num.concatenate(data) 

855 

856 self.tmin = tyearbegin + nmin * deltat 

857 self.deltat = deltat 

858 self.set_ydata(ydata_new) 

859 

860 def stretch(self, tmin_new, tmax_new): 

861 ''' 

862 Stretch signal while preserving sample rate using sinc interpolation. 

863 

864 :param tmin_new: new time of first sample 

865 :param tmax_new: new time of last sample 

866 

867 This method can be used to correct for a small linear time drift or to 

868 introduce sub-sample time shifts. The amount of stretching is limited 

869 to 10% by the implementation and is expected to be much smaller than 

870 that by the approximations used. 

871 ''' 

872 

873 from pyrocko import signal_ext 

874 

875 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

876 t_control = num.array([tmin_new, tmax_new], dtype=float) 

877 

878 r = (tmax_new - tmin_new) / self.deltat + 1.0 

879 n_new = int(round(r)) 

880 if abs(n_new - r) > 0.001: 

881 n_new = int(math.floor(r)) 

882 

883 assert n_new >= 2 

884 

885 tmax_new = tmin_new + (n_new-1) * self.deltat 

886 

887 ydata_new = num.empty(n_new, dtype=float) 

888 signal_ext.antidrift(i_control, t_control, 

889 self.ydata.astype(float), 

890 tmin_new, self.deltat, ydata_new) 

891 

892 self.tmin = tmin_new 

893 self.set_ydata(ydata_new) 

894 self._update_ids() 

895 

896 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

897 raise_exception=False): 

898 

899 ''' 

900 Check if a given frequency is above the Nyquist frequency of the trace. 

901 

902 :param intro: string used to introduce the warning/error message 

903 :param warn: whether to emit a warning 

904 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

905 exception. 

906 ''' 

907 

908 if frequency >= 0.5/self.deltat: 

909 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

910 'frequency (%g Hz). (Trace %s)' \ 

911 % (intro, frequency, 0.5/self.deltat, self.name()) 

912 if warn: 

913 logger.warning(message) 

914 if raise_exception: 

915 raise AboveNyquist(message) 

916 

917 def lowpass(self, order, corner, nyquist_warn=True, 

918 nyquist_exception=False, demean=True): 

919 

920 ''' 

921 Apply Butterworth lowpass to the trace. 

922 

923 :param order: order of the filter 

924 :param corner: corner frequency of the filter 

925 

926 Mean is removed before filtering. 

927 ''' 

928 

929 self.nyquist_check( 

930 corner, 'Corner frequency of lowpass', nyquist_warn, 

931 nyquist_exception) 

932 

933 (b, a) = _get_cached_filter_coefs( 

934 order, [corner*2.0*self.deltat], btype='low') 

935 

936 if len(a) != order+1 or len(b) != order+1: 

937 logger.warning( 

938 'Erroneous filter coefficients returned by ' 

939 'scipy.signal.butter(). You may need to downsample the ' 

940 'signal before filtering.') 

941 

942 data = self.ydata.astype(num.float64) 

943 if demean: 

944 data -= num.mean(data) 

945 self.drop_growbuffer() 

946 self.ydata = signal.lfilter(b, a, data) 

947 

948 def highpass(self, order, corner, nyquist_warn=True, 

949 nyquist_exception=False, demean=True): 

950 

951 ''' 

952 Apply butterworth highpass to the trace. 

953 

954 :param order: order of the filter 

955 :param corner: corner frequency of the filter 

956 

957 Mean is removed before filtering. 

958 ''' 

959 

960 self.nyquist_check( 

961 corner, 'Corner frequency of highpass', nyquist_warn, 

962 nyquist_exception) 

963 

964 (b, a) = _get_cached_filter_coefs( 

965 order, [corner*2.0*self.deltat], btype='high') 

966 

967 data = self.ydata.astype(num.float64) 

968 if len(a) != order+1 or len(b) != order+1: 

969 logger.warning( 

970 'Erroneous filter coefficients returned by ' 

971 'scipy.signal.butter(). You may need to downsample the ' 

972 'signal before filtering.') 

973 if demean: 

974 data -= num.mean(data) 

975 self.drop_growbuffer() 

976 self.ydata = signal.lfilter(b, a, data) 

977 

978 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

979 ''' 

980 Apply butterworth bandpass to the trace. 

981 

982 :param order: order of the filter 

983 :param corner_hp: lower corner frequency of the filter 

984 :param corner_lp: upper corner frequency of the filter 

985 

986 Mean is removed before filtering. 

987 ''' 

988 

989 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

990 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

991 (b, a) = _get_cached_filter_coefs( 

992 order, 

993 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

994 btype='band') 

995 data = self.ydata.astype(num.float64) 

996 if demean: 

997 data -= num.mean(data) 

998 self.drop_growbuffer() 

999 self.ydata = signal.lfilter(b, a, data) 

1000 

1001 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1002 ''' 

1003 Apply bandstop (attenuates frequencies in band) to the trace. 

1004 

1005 :param order: order of the filter 

1006 :param corner_hp: lower corner frequency of the filter 

1007 :param corner_lp: upper corner frequency of the filter 

1008 

1009 Mean is removed before filtering. 

1010 ''' 

1011 

1012 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1013 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1014 (b, a) = _get_cached_filter_coefs( 

1015 order, 

1016 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1017 btype='bandstop') 

1018 data = self.ydata.astype(num.float64) 

1019 if demean: 

1020 data -= num.mean(data) 

1021 self.drop_growbuffer() 

1022 self.ydata = signal.lfilter(b, a, data) 

1023 

1024 def envelope(self, inplace=True): 

1025 ''' 

1026 Calculate the envelope of the trace. 

1027 

1028 :param inplace: calculate envelope in place 

1029 

1030 The calculation follows: 

1031 

1032 .. math:: 

1033 

1034 Y' = \\sqrt{Y^2+H(Y)^2} 

1035 

1036 where H is the Hilbert-Transform of the signal Y. 

1037 ''' 

1038 

1039 y = self.ydata.astype(float) 

1040 env = num.abs(hilbert(y)) 

1041 if inplace: 

1042 self.drop_growbuffer() 

1043 self.ydata = env 

1044 else: 

1045 tr = self.copy(data=False) 

1046 tr.ydata = env 

1047 return tr 

1048 

1049 def taper(self, taperer, inplace=True, chop=False): 

1050 ''' 

1051 Apply a :py:class:`Taper` to the trace. 

1052 

1053 :param taperer: instance of :py:class:`Taper` subclass 

1054 :param inplace: apply taper inplace 

1055 :param chop: if ``True``: exclude tapered parts from the resulting 

1056 trace 

1057 ''' 

1058 

1059 if not inplace: 

1060 tr = self.copy() 

1061 else: 

1062 tr = self 

1063 

1064 if chop: 

1065 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1066 tr.shift(i*tr.deltat) 

1067 tr.set_ydata(tr.ydata[i:i+n]) 

1068 

1069 taperer(tr.ydata, tr.tmin, tr.deltat) 

1070 

1071 if not inplace: 

1072 return tr 

1073 

1074 def whiten(self, order=6): 

1075 ''' 

1076 Whiten signal in time domain using autoregression and recursive filter. 

1077 

1078 :param order: order of the autoregression process 

1079 ''' 

1080 

1081 b, a = self.whitening_coefficients(order) 

1082 self.drop_growbuffer() 

1083 self.ydata = signal.lfilter(b, a, self.ydata) 

1084 

1085 def whitening_coefficients(self, order=6): 

1086 ar = yulewalker(self.ydata, order) 

1087 b, a = [1.] + ar.tolist(), [1.] 

1088 return b, a 

1089 

1090 def ampspec_whiten( 

1091 self, 

1092 width, 

1093 td_taper='auto', 

1094 fd_taper='auto', 

1095 pad_to_pow2=True, 

1096 demean=True): 

1097 

1098 ''' 

1099 Whiten signal via frequency domain using moving average on amplitude 

1100 spectra. 

1101 

1102 :param width: width of smoothing kernel [Hz] 

1103 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1104 ``None`` or ``'auto'``. 

1105 :param fd_taper: frequency domain taper, object of type 

1106 :py:class:`Taper` or ``None`` or ``'auto'``. 

1107 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1108 of 2^n 

1109 :param demean: whether to demean the signal before tapering 

1110 

1111 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1112 the spectrum is calculated and inversely weighted with a smoothed 

1113 version of its amplitude spectrum. A moving average is used for the 

1114 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1115 Finally, the smoothed and tapered spectrum is back-transformed into the 

1116 time domain. 

1117 

1118 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1119 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1120 ''' 

1121 

1122 ndata = self.data_len() 

1123 

1124 if pad_to_pow2: 

1125 ntrans = nextpow2(ndata) 

1126 else: 

1127 ntrans = ndata 

1128 

1129 df = 1./(ntrans*self.deltat) 

1130 nw = int(round(width/df)) 

1131 if ndata//2+1 <= nw: 

1132 raise TraceTooShort( 

1133 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1134 

1135 if td_taper == 'auto': 

1136 td_taper = CosFader(1./width) 

1137 

1138 if fd_taper == 'auto': 

1139 fd_taper = CosFader(width) 

1140 

1141 if td_taper: 

1142 self.taper(td_taper) 

1143 

1144 ydata = self.get_ydata().astype(float) 

1145 if demean: 

1146 ydata -= ydata.mean() 

1147 

1148 spec = num.fft.rfft(ydata, ntrans) 

1149 

1150 amp = num.abs(spec) 

1151 nspec = amp.size 

1152 csamp = num.cumsum(amp) 

1153 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1154 n1, n2 = nw//2, nw//2 + nspec - nw 

1155 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1156 amp_smoothed[:n1] = amp_smoothed[n1] 

1157 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1158 

1159 denom = amp_smoothed * amp 

1160 numer = amp 

1161 eps = num.mean(denom) * 1e-9 

1162 if eps == 0.0: 

1163 eps = 1e-9 

1164 

1165 numer += eps 

1166 denom += eps 

1167 spec *= numer/denom 

1168 

1169 if fd_taper: 

1170 fd_taper(spec, 0., df) 

1171 

1172 ydata = num.fft.irfft(spec) 

1173 self.set_ydata(ydata[:ndata]) 

1174 

1175 def _get_cached_freqs(self, nf, deltaf): 

1176 ck = (nf, deltaf) 

1177 if ck not in Trace.cached_frequencies: 

1178 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1179 nf, dtype=float) 

1180 

1181 return Trace.cached_frequencies[ck] 

1182 

1183 def bandpass_fft(self, corner_hp, corner_lp): 

1184 ''' 

1185 Apply boxcar bandbpass to trace (in spectral domain). 

1186 ''' 

1187 

1188 n = len(self.ydata) 

1189 n2 = nextpow2(n) 

1190 data = num.zeros(n2, dtype=num.float64) 

1191 data[:n] = self.ydata 

1192 fdata = num.fft.rfft(data) 

1193 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1194 fdata[0] = 0.0 

1195 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1196 data = num.fft.irfft(fdata) 

1197 self.drop_growbuffer() 

1198 self.ydata = data[:n] 

1199 

1200 def shift(self, tshift): 

1201 ''' 

1202 Time shift the trace. 

1203 ''' 

1204 

1205 self.tmin += tshift 

1206 self.tmax += tshift 

1207 self._update_ids() 

1208 

1209 def snap(self, inplace=True, interpolate=False): 

1210 ''' 

1211 Shift trace samples to nearest even multiples of the sampling rate. 

1212 

1213 :param inplace: (boolean) snap traces inplace 

1214 

1215 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1216 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1217 :py:func:`snap` returns the unsnapped instance of the original trace. 

1218 ''' 

1219 

1220 tmin = round(self.tmin/self.deltat)*self.deltat 

1221 tmax = tmin + (self.ydata.size-1)*self.deltat 

1222 

1223 if inplace: 

1224 xself = self 

1225 else: 

1226 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1227 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1228 return self 

1229 

1230 xself = self.copy() 

1231 

1232 if interpolate: 

1233 from pyrocko import signal_ext 

1234 n = xself.data_len() 

1235 ydata_new = num.empty(n, dtype=float) 

1236 i_control = num.array([0, n-1], dtype=num.int64) 

1237 tref = tmin 

1238 t_control = num.array( 

1239 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1240 dtype=float) 

1241 

1242 signal_ext.antidrift(i_control, t_control, 

1243 xself.ydata.astype(float), 

1244 float(tmin-tref), xself.deltat, ydata_new) 

1245 

1246 xself.ydata = ydata_new 

1247 

1248 xself.tmin = tmin 

1249 xself.tmax = tmax 

1250 xself._update_ids() 

1251 

1252 return xself 

1253 

1254 def fix_deltat_rounding_errors(self): 

1255 ''' 

1256 Try to undo sampling rate rounding errors. 

1257 

1258 See :py:func:`fix_deltat_rounding_errors`. 

1259 ''' 

1260 

1261 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1262 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1263 

1264 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1265 ''' 

1266 Run special STA/LTA filter where the short time window is centered on 

1267 the long time window. 

1268 

1269 :param tshort: length of short time window in [s] 

1270 :param tlong: length of long time window in [s] 

1271 :param quad: whether to square the data prior to applying the STA/LTA 

1272 filter 

1273 :param scalingmethod: integer key to select how output values are 

1274 scaled / normalized (``1``, ``2``, or ``3``) 

1275 

1276 ============= ====================================== =========== 

1277 Scalingmethod Implementation Range 

1278 ============= ====================================== =========== 

1279 ``1`` As/Al* Ts/Tl [0,1] 

1280 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1281 ``3`` Like ``2`` but clipping range at zero [0,1] 

1282 ============= ====================================== =========== 

1283 

1284 ''' 

1285 

1286 nshort = int(round(tshort/self.deltat)) 

1287 nlong = int(round(tlong/self.deltat)) 

1288 

1289 assert nshort < nlong 

1290 if nlong > len(self.ydata): 

1291 raise TraceTooShort( 

1292 'Samples in trace: %s, samples needed: %s' 

1293 % (len(self.ydata), nlong)) 

1294 

1295 if quad: 

1296 sqrdata = self.ydata**2 

1297 else: 

1298 sqrdata = self.ydata 

1299 

1300 mavg_short = moving_avg(sqrdata, nshort) 

1301 mavg_long = moving_avg(sqrdata, nlong) 

1302 

1303 self.drop_growbuffer() 

1304 

1305 if scalingmethod not in (1, 2, 3): 

1306 raise Exception('Invalid argument to scalingrange argument.') 

1307 

1308 if scalingmethod == 1: 

1309 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1310 elif scalingmethod in (2, 3): 

1311 self.ydata = (mavg_short/mavg_long - 1.) \ 

1312 / ((float(nlong)/float(nshort)) - 1) 

1313 

1314 if scalingmethod == 3: 

1315 self.ydata = num.maximum(self.ydata, 0.) 

1316 

1317 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1318 ''' 

1319 Run special STA/LTA filter where the short time window is overlapping 

1320 with the last part of the long time window. 

1321 

1322 :param tshort: length of short time window in [s] 

1323 :param tlong: length of long time window in [s] 

1324 :param quad: whether to square the data prior to applying the STA/LTA 

1325 filter 

1326 :param scalingmethod: integer key to select how output values are 

1327 scaled / normalized (``1``, ``2``, or ``3``) 

1328 

1329 ============= ====================================== =========== 

1330 Scalingmethod Implementation Range 

1331 ============= ====================================== =========== 

1332 ``1`` As/Al* Ts/Tl [0,1] 

1333 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1334 ``3`` Like ``2`` but clipping range at zero [0,1] 

1335 ============= ====================================== =========== 

1336 

1337 With ``scalingmethod=1``, the values produced by this variant of the 

1338 STA/LTA are equivalent to 

1339 

1340 .. math:: 

1341 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1342 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1343 

1344 where :math:`f_j` are the input samples, :math:`s` are the number of 

1345 samples in the short time window and :math:`l` are the number of 

1346 samples in the long time window. 

1347 ''' 

1348 

1349 n = self.data_len() 

1350 tmin = self.tmin 

1351 

1352 nshort = max(1, int(round(tshort/self.deltat))) 

1353 nlong = max(1, int(round(tlong/self.deltat))) 

1354 

1355 assert nshort < nlong 

1356 

1357 if nlong > len(self.ydata): 

1358 raise TraceTooShort( 

1359 'Samples in trace: %s, samples needed: %s' 

1360 % (len(self.ydata), nlong)) 

1361 

1362 if quad: 

1363 sqrdata = self.ydata**2 

1364 else: 

1365 sqrdata = self.ydata 

1366 

1367 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1368 if nlong % 2 != 0 and nshort % 2 == 0: 

1369 nshift += 1 

1370 

1371 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1372 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1373 

1374 self.drop_growbuffer() 

1375 

1376 if scalingmethod not in (1, 2, 3): 

1377 raise Exception('Invalid argument to scalingrange argument.') 

1378 

1379 if scalingmethod == 1: 

1380 ydata = mavg_short/mavg_long * nshort/nlong 

1381 elif scalingmethod in (2, 3): 

1382 ydata = (mavg_short/mavg_long - 1.) \ 

1383 / ((float(nlong)/float(nshort)) - 1) 

1384 

1385 if scalingmethod == 3: 

1386 ydata = num.maximum(self.ydata, 0.) 

1387 

1388 self.set_ydata(ydata) 

1389 

1390 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1391 

1392 self.chop( 

1393 tmin + (nlong - nshort) * self.deltat, 

1394 tmin + (n - nshort) * self.deltat) 

1395 

1396 def peaks(self, threshold, tsearch, 

1397 deadtime=False, 

1398 nblock_duration_detection=100): 

1399 

1400 ''' 

1401 Detect peaks above a given threshold (method 1). 

1402 

1403 From every instant, where the signal rises above ``threshold``, a time 

1404 length of ``tsearch`` seconds is searched for a maximum. A list with 

1405 tuples (time, value) for each detected peak is returned. The 

1406 ``deadtime`` argument turns on a special deadtime duration detection 

1407 algorithm useful in combination with recursive STA/LTA filters. 

1408 ''' 

1409 

1410 y = self.ydata 

1411 above = num.where(y > threshold, 1, 0) 

1412 deriv = num.zeros(y.size, dtype=num.int8) 

1413 deriv[1:] = above[1:]-above[:-1] 

1414 itrig_positions = num.nonzero(deriv > 0)[0] 

1415 tpeaks = [] 

1416 apeaks = [] 

1417 tzeros = [] 

1418 tzero = self.tmin 

1419 

1420 for itrig_pos in itrig_positions: 

1421 ibeg = itrig_pos 

1422 iend = min( 

1423 len(self.ydata), 

1424 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1425 ipeak = num.argmax(y[ibeg:iend]) 

1426 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1427 apeak = y[ibeg+ipeak] 

1428 

1429 if tpeak < tzero: 

1430 continue 

1431 

1432 if deadtime: 

1433 ibeg = itrig_pos 

1434 iblock = 0 

1435 nblock = nblock_duration_detection 

1436 totalsum = 0. 

1437 while True: 

1438 if ibeg+iblock*nblock >= len(y): 

1439 tzero = self.tmin + (len(y)-1) * self.deltat 

1440 break 

1441 

1442 logy = num.log( 

1443 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1444 logy[0] += totalsum 

1445 ysum = num.cumsum(logy) 

1446 totalsum = ysum[-1] 

1447 below = num.where(ysum <= 0., 1, 0) 

1448 deriv = num.zeros(ysum.size, dtype=num.int8) 

1449 deriv[1:] = below[1:]-below[:-1] 

1450 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1451 if len(izero_positions) > 0: 

1452 tzero = self.tmin + self.deltat * ( 

1453 ibeg + izero_positions[0]) 

1454 break 

1455 iblock += 1 

1456 else: 

1457 tzero = ibeg*self.deltat + self.tmin + tsearch 

1458 

1459 tpeaks.append(tpeak) 

1460 apeaks.append(apeak) 

1461 tzeros.append(tzero) 

1462 

1463 if deadtime: 

1464 return tpeaks, apeaks, tzeros 

1465 else: 

1466 return tpeaks, apeaks 

1467 

1468 def peaks2(self, threshold, tsearch): 

1469 

1470 ''' 

1471 Detect peaks above a given threshold (method 2). 

1472 

1473 This variant of peak detection is a bit more robust (and slower) than 

1474 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1475 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1476 iteratively the one with the maximum amplitude ``a[j]`` and time 

1477 ``t[j]`` is choosen and potential peaks within 

1478 ``t[j] - tsearch, t[j] + tsearch`` 

1479 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1480 no more potential peaks are left. 

1481 ''' 

1482 

1483 a = num.copy(self.ydata) 

1484 

1485 amin = num.min(a) 

1486 

1487 a[0] = amin 

1488 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1489 a[-1] = amin 

1490 

1491 data = [] 

1492 while True: 

1493 imax = num.argmax(a) 

1494 amax = a[imax] 

1495 

1496 if amax < threshold or amax == amin: 

1497 break 

1498 

1499 data.append((self.tmin + imax * self.deltat, amax)) 

1500 

1501 ntsearch = int(round(tsearch / self.deltat)) 

1502 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1503 

1504 if data: 

1505 data.sort() 

1506 tpeaks, apeaks = list(zip(*data)) 

1507 else: 

1508 tpeaks, apeaks = [], [] 

1509 

1510 return tpeaks, apeaks 

1511 

1512 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1513 ''' 

1514 Extend trace to given span. 

1515 

1516 :param tmin: begin time of new span 

1517 :param tmax: end time of new span 

1518 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1519 ``'median'`` 

1520 ''' 

1521 

1522 nold = self.ydata.size 

1523 

1524 if tmin is not None: 

1525 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1526 else: 

1527 nl = 0 

1528 

1529 if tmax is not None: 

1530 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1531 else: 

1532 nh = nold - 1 

1533 

1534 n = nh - nl + 1 

1535 data = num.zeros(n, dtype=self.ydata.dtype) 

1536 data[-nl:-nl + nold] = self.ydata 

1537 if self.ydata.size >= 1: 

1538 if fillmethod == 'repeat': 

1539 data[:-nl] = self.ydata[0] 

1540 data[-nl + nold:] = self.ydata[-1] 

1541 elif fillmethod == 'median': 

1542 v = num.median(self.ydata) 

1543 data[:-nl] = v 

1544 data[-nl + nold:] = v 

1545 elif fillmethod == 'mean': 

1546 v = num.mean(self.ydata) 

1547 data[:-nl] = v 

1548 data[-nl + nold:] = v 

1549 

1550 self.drop_growbuffer() 

1551 self.ydata = data 

1552 

1553 self.tmin += nl * self.deltat 

1554 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1555 

1556 self._update_ids() 

1557 

1558 def transfer(self, 

1559 tfade=0., 

1560 freqlimits=None, 

1561 transfer_function=None, 

1562 cut_off_fading=True, 

1563 demean=True, 

1564 invert=False): 

1565 

1566 ''' 

1567 Return new trace with transfer function applied (convolution). 

1568 

1569 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1570 at both ends of trace. 

1571 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1572 :param transfer_function: FrequencyResponse object; must provide a 

1573 method 'evaluate(freqs)', which returns the transfer function 

1574 coefficients at the frequencies 'freqs'. 

1575 :param cut_off_fading: whether to cut off rise/fall interval in output 

1576 trace. 

1577 :param demean: remove mean before applying transfer function 

1578 :param invert: set to True to do a deconvolution 

1579 ''' 

1580 

1581 if transfer_function is None: 

1582 transfer_function = FrequencyResponse() 

1583 

1584 if self.tmax - self.tmin <= tfade*2.: 

1585 raise TraceTooShort( 

1586 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1587 'trace length = %g, fading length = %g' 

1588 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1589 

1590 if freqlimits is None and ( 

1591 transfer_function is None or transfer_function.is_scalar()): 

1592 

1593 # special case for flat responses 

1594 

1595 output = self.copy() 

1596 data = self.ydata 

1597 ndata = data.size 

1598 

1599 if transfer_function is not None: 

1600 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1601 

1602 if invert: 

1603 c = 1.0/c 

1604 

1605 data *= c 

1606 

1607 if tfade != 0.0: 

1608 data *= costaper( 

1609 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1610 ndata, self.deltat) 

1611 

1612 output.ydata = data 

1613 

1614 else: 

1615 ndata = self.ydata.size 

1616 ntrans = nextpow2(ndata*1.2) 

1617 coefs = self._get_tapered_coefs( 

1618 ntrans, freqlimits, transfer_function, invert=invert) 

1619 

1620 data = self.ydata 

1621 

1622 data_pad = num.zeros(ntrans, dtype=float) 

1623 data_pad[:ndata] = data 

1624 if demean: 

1625 data_pad[:ndata] -= data.mean() 

1626 

1627 if tfade != 0.0: 

1628 data_pad[:ndata] *= costaper( 

1629 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1630 ndata, self.deltat) 

1631 

1632 fdata = num.fft.rfft(data_pad) 

1633 fdata *= coefs 

1634 ddata = num.fft.irfft(fdata) 

1635 output = self.copy() 

1636 output.ydata = ddata[:ndata] 

1637 

1638 if cut_off_fading and tfade != 0.0: 

1639 try: 

1640 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1641 except NoData: 

1642 raise TraceTooShort( 

1643 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1644 'trace length = %g, fading length = %g' 

1645 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1646 else: 

1647 output.ydata = output.ydata.copy() 

1648 

1649 return output 

1650 

1651 def differentiate(self, n=1, order=4, inplace=True): 

1652 ''' 

1653 Approximate first or second derivative of the trace. 

1654 

1655 :param n: 1 for first derivative, 2 for second 

1656 :param order: order of the approximation 2 and 4 are supported 

1657 :param inplace: if ``True`` the trace is differentiated in place, 

1658 otherwise a new trace object with the derivative is returned. 

1659 

1660 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1661 

1662 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1663 ''' 

1664 

1665 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1666 

1667 if inplace: 

1668 self.ydata = ddata 

1669 else: 

1670 output = self.copy(data=False) 

1671 output.set_ydata(ddata) 

1672 return output 

1673 

1674 def drop_chain_cache(self): 

1675 if self._pchain: 

1676 self._pchain.clear() 

1677 

1678 def init_chain(self): 

1679 self._pchain = pchain.Chain( 

1680 do_downsample, 

1681 do_extend, 

1682 do_pre_taper, 

1683 do_fft, 

1684 do_filter, 

1685 do_ifft) 

1686 

1687 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1688 if setup.domain == 'frequency_domain': 

1689 _, _, data = self._pchain( 

1690 (self, deltat), 

1691 (tmin, tmax), 

1692 (setup.taper,), 

1693 (setup.filter,), 

1694 (setup.filter,), 

1695 nocache=nocache) 

1696 

1697 return num.abs(data), num.abs(data) 

1698 

1699 else: 

1700 processed = self._pchain( 

1701 (self, deltat), 

1702 (tmin, tmax), 

1703 (setup.taper,), 

1704 (setup.filter,), 

1705 (setup.filter,), 

1706 (), 

1707 nocache=nocache) 

1708 

1709 if setup.domain == 'time_domain': 

1710 data = processed.get_ydata() 

1711 

1712 elif setup.domain == 'envelope': 

1713 processed = processed.envelope(inplace=False) 

1714 

1715 elif setup.domain == 'absolute': 

1716 processed.set_ydata(num.abs(processed.get_ydata())) 

1717 

1718 return processed.get_ydata(), processed 

1719 

1720 def misfit(self, candidate, setup, nocache=False, debug=False): 

1721 ''' 

1722 Calculate misfit and normalization factor against candidate trace. 

1723 

1724 :param candidate: :py:class:`Trace` object 

1725 :param setup: :py:class:`MisfitSetup` object 

1726 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1727 normalization divisor 

1728 

1729 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1730 with the higher sampling rate will be downsampled. 

1731 ''' 

1732 

1733 a = self 

1734 b = candidate 

1735 

1736 for tr in (a, b): 

1737 if not tr._pchain: 

1738 tr.init_chain() 

1739 

1740 deltat = max(a.deltat, b.deltat) 

1741 tmin = min(a.tmin, b.tmin) - deltat 

1742 tmax = max(a.tmax, b.tmax) + deltat 

1743 

1744 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1745 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1746 

1747 if setup.domain != 'cc_max_norm': 

1748 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1749 else: 

1750 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1751 ccmax = ctr.max()[1] 

1752 m = 0.5 - 0.5 * ccmax 

1753 n = 0.5 

1754 

1755 if debug: 

1756 return m, n, aproc, bproc 

1757 else: 

1758 return m, n 

1759 

1760 def spectrum(self, pad_to_pow2=False, tfade=None): 

1761 ''' 

1762 Get FFT spectrum of trace. 

1763 

1764 :param pad_to_pow2: whether to zero-pad the data to next larger 

1765 power-of-two length 

1766 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1767 shaped tapers to both 

1768 

1769 :returns: a tuple with (frequencies, values) 

1770 ''' 

1771 

1772 ndata = self.ydata.size 

1773 

1774 if pad_to_pow2: 

1775 ntrans = nextpow2(ndata) 

1776 else: 

1777 ntrans = ndata 

1778 

1779 if tfade is None: 

1780 ydata = self.ydata 

1781 else: 

1782 ydata = self.ydata * costaper( 

1783 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1784 ndata, self.deltat) 

1785 

1786 fydata = num.fft.rfft(ydata, ntrans) 

1787 df = 1./(ntrans*self.deltat) 

1788 fxdata = num.arange(len(fydata))*df 

1789 return fxdata, fydata 

1790 

1791 def multi_filter(self, filter_freqs, bandwidth): 

1792 

1793 class Gauss(FrequencyResponse): 

1794 f0 = Float.T() 

1795 a = Float.T(default=1.0) 

1796 

1797 def __init__(self, f0, a=1.0, **kwargs): 

1798 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1799 

1800 def evaluate(self, freqs): 

1801 omega0 = 2.*math.pi*self.f0 

1802 omega = 2.*math.pi*freqs 

1803 return num.exp(-((omega-omega0) 

1804 / (self.a*omega0))**2) 

1805 

1806 freqs, coefs = self.spectrum() 

1807 n = self.data_len() 

1808 nfilt = len(filter_freqs) 

1809 signal_tf = num.zeros((nfilt, n)) 

1810 centroid_freqs = num.zeros(nfilt) 

1811 for ifilt, f0 in enumerate(filter_freqs): 

1812 taper = Gauss(f0, a=bandwidth) 

1813 weights = taper.evaluate(freqs) 

1814 nhalf = freqs.size 

1815 analytic_spec = num.zeros(n, dtype=complex) 

1816 analytic_spec[:nhalf] = coefs*weights 

1817 

1818 enorm = num.abs(analytic_spec[:nhalf])**2 

1819 enorm /= num.sum(enorm) 

1820 

1821 if n % 2 == 0: 

1822 analytic_spec[1:nhalf-1] *= 2. 

1823 else: 

1824 analytic_spec[1:nhalf] *= 2. 

1825 

1826 analytic = num.fft.ifft(analytic_spec) 

1827 signal_tf[ifilt, :] = num.abs(analytic) 

1828 

1829 enorm = num.abs(analytic_spec[:nhalf])**2 

1830 enorm /= num.sum(enorm) 

1831 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1832 

1833 return centroid_freqs, signal_tf 

1834 

1835 def _get_tapered_coefs( 

1836 self, ntrans, freqlimits, transfer_function, invert=False): 

1837 

1838 deltaf = 1./(self.deltat*ntrans) 

1839 nfreqs = ntrans//2 + 1 

1840 transfer = num.ones(nfreqs, dtype=complex) 

1841 hi = snapper(nfreqs, deltaf) 

1842 if freqlimits is not None: 

1843 a, b, c, d = freqlimits 

1844 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1845 + hi(a)*deltaf 

1846 

1847 if invert: 

1848 coeffs = transfer_function.evaluate(freqs) 

1849 if num.any(coeffs == 0.0): 

1850 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1851 

1852 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1853 else: 

1854 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1855 

1856 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1857 else: 

1858 if invert: 

1859 raise Exception( 

1860 'transfer: `freqlimits` must be given when `invert` is ' 

1861 'set to `True`') 

1862 

1863 freqs = num.arange(nfreqs) * deltaf 

1864 tapered_transfer = transfer_function.evaluate(freqs) 

1865 

1866 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1867 return tapered_transfer 

1868 

1869 def fill_template(self, template, **additional): 

1870 ''' 

1871 Fill string template with trace metadata. 

1872 

1873 Uses normal python '%(placeholder)s' string templates. The following 

1874 placeholders are considered: ``network``, ``station``, ``location``, 

1875 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1876 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1877 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1878 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1879 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1880 microseconds, those with ``'_year'`` contain only the year. 

1881 ''' 

1882 

1883 template = template.replace('%n', '%(network)s')\ 

1884 .replace('%s', '%(station)s')\ 

1885 .replace('%l', '%(location)s')\ 

1886 .replace('%c', '%(channel)s')\ 

1887 .replace('%b', '%(tmin)s')\ 

1888 .replace('%e', '%(tmax)s')\ 

1889 .replace('%j', '%(julianday)s') 

1890 

1891 params = dict( 

1892 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1893 params['tmin'] = util.time_to_str( 

1894 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1895 params['tmax'] = util.time_to_str( 

1896 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1897 params['tmin_ms'] = util.time_to_str( 

1898 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1899 params['tmax_ms'] = util.time_to_str( 

1900 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1901 params['tmin_us'] = util.time_to_str( 

1902 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1903 params['tmax_us'] = util.time_to_str( 

1904 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1905 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1906 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1907 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1908 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1909 params['julianday'] = util.julian_day_of_year(self.tmin) 

1910 params.update(additional) 

1911 return template % params 

1912 

1913 def plot(self): 

1914 ''' 

1915 Show trace with matplotlib. 

1916 

1917 See also: :py:meth:`Trace.snuffle`. 

1918 ''' 

1919 

1920 import pylab 

1921 pylab.plot(self.get_xdata(), self.get_ydata()) 

1922 name = '%s %s %s - %s' % ( 

1923 self.channel, 

1924 self.station, 

1925 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1926 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1927 

1928 pylab.title(name) 

1929 pylab.show() 

1930 

1931 def snuffle(self, **kwargs): 

1932 ''' 

1933 Show trace in a snuffler window. 

1934 

1935 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1936 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1937 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1938 :param ntracks: float, number of tracks to be shown initially (default: 

1939 12) 

1940 :param follow: time interval (in seconds) for real time follow mode or 

1941 ``None`` 

1942 :param controls: bool, whether to show the main controls (default: 

1943 ``True``) 

1944 :param opengl: bool, whether to use opengl (default: ``False``) 

1945 ''' 

1946 

1947 return snuffle([self], **kwargs) 

1948 

1949 

1950def snuffle(traces, **kwargs): 

1951 ''' 

1952 Show traces in a snuffler window. 

1953 

1954 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1955 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1956 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1957 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1958 :param follow: time interval (in seconds) for real time follow mode or 

1959 ``None`` 

1960 :param controls: bool, whether to show the main controls (default: 

1961 ``True``) 

1962 :param opengl: bool, whether to use opengl (default: ``False``) 

1963 ''' 

1964 

1965 from pyrocko import pile 

1966 from pyrocko.gui import snuffler 

1967 p = pile.Pile() 

1968 if traces: 

1969 trf = pile.MemTracesFile(None, traces) 

1970 p.add_file(trf) 

1971 return snuffler.snuffle(p, **kwargs) 

1972 

1973 

1974class InfiniteResponse(Exception): 

1975 ''' 

1976 This exception is raised by :py:class:`Trace` operations when deconvolution 

1977 of a frequency response (instrument response transfer function) would 

1978 result in a division by zero. 

1979 ''' 

1980 

1981 

1982class MisalignedTraces(Exception): 

1983 ''' 

1984 This exception is raised by some :py:class:`Trace` operations when tmin, 

1985 tmax or number of samples do not match. 

1986 ''' 

1987 

1988 pass 

1989 

1990 

1991class NoData(Exception): 

1992 ''' 

1993 This exception is raised by some :py:class:`Trace` operations when no or 

1994 not enough data is available. 

1995 ''' 

1996 

1997 pass 

1998 

1999 

2000class AboveNyquist(Exception): 

2001 ''' 

2002 This exception is raised by some :py:class:`Trace` operations when given 

2003 frequencies are above the Nyquist frequency. 

2004 ''' 

2005 

2006 pass 

2007 

2008 

2009class TraceTooShort(Exception): 

2010 ''' 

2011 This exception is raised by some :py:class:`Trace` operations when the 

2012 trace is too short. 

2013 ''' 

2014 

2015 pass 

2016 

2017 

2018class ResamplingFailed(Exception): 

2019 pass 

2020 

2021 

2022def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2023 

2024 ''' 

2025 Get data range given traces grouped by selected pattern. 

2026 

2027 :param key: a callable which takes as single argument a trace and returns a 

2028 key for the grouping of the results. If this is ``None``, the default, 

2029 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2030 used. 

2031 :param mode: ``'minmax'`` or floating point number. If this is 

2032 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2033 number, mean +- standard deviation times ``mode`` is used. 

2034 

2035 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2036 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2037 extreme values on either end. 

2038 

2039 :returns: a dict with the combined data ranges. 

2040 

2041 Examples:: 

2042 

2043 ranges = minmax(traces, lambda tr: tr.channel) 

2044 print ranges['N'] # print min & max of all traces with channel == 'N' 

2045 print ranges['E'] # print min & max of all traces with channel == 'E' 

2046 

2047 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2048 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2049 # network == 'GR' and station == 'HAM3' 

2050 

2051 ranges = minmax(traces, lambda tr: None) 

2052 print ranges[None] # prints min & max of all traces 

2053 ''' 

2054 

2055 if key is None: 

2056 key = _default_key 

2057 

2058 ranges = defaultdict(list) 

2059 for trace in traces: 

2060 if isinstance(mode, str) and mode == 'minmax': 

2061 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2062 else: 

2063 mean = trace.ydata.mean() 

2064 std = trace.ydata.std() 

2065 mi, ma = mean-std*mode, mean+std*mode 

2066 

2067 k = key(trace) 

2068 ranges[k].append((mi, ma)) 

2069 

2070 for k in ranges: 

2071 mins, maxs = num.array(ranges[k]).T 

2072 if outer_mode == 'minmax': 

2073 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2074 elif outer_mode == 'robust': 

2075 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2076 

2077 return ranges 

2078 

2079 

2080def minmaxtime(traces, key=None): 

2081 

2082 ''' 

2083 Get time range given traces grouped by selected pattern. 

2084 

2085 :param key: a callable which takes as single argument a trace and returns a 

2086 key for the grouping of the results. If this is ``None``, the default, 

2087 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2088 used. 

2089 

2090 :returns: a dict with the combined data ranges. 

2091 ''' 

2092 

2093 if key is None: 

2094 key = _default_key 

2095 

2096 ranges = {} 

2097 for trace in traces: 

2098 mi, ma = trace.tmin, trace.tmax 

2099 k = key(trace) 

2100 if k not in ranges: 

2101 ranges[k] = mi, ma 

2102 else: 

2103 tmi, tma = ranges[k] 

2104 ranges[k] = min(tmi, mi), max(tma, ma) 

2105 

2106 return ranges 

2107 

2108 

2109def degapper( 

2110 traces, 

2111 maxgap=5, 

2112 fillmethod='interpolate', 

2113 deoverlap='use_second', 

2114 maxlap=None): 

2115 

2116 ''' 

2117 Try to connect traces and remove gaps. 

2118 

2119 This method will combine adjacent traces, which match in their network, 

2120 station, location and channel attributes. Overlapping parts are handled 

2121 according to the ``deoverlap`` argument. 

2122 

2123 :param traces: input traces, must be sorted by their full_id attribute. 

2124 :param maxgap: maximum number of samples to interpolate. 

2125 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2126 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2127 second trace (default), 'use_first' to use data from first trace, 

2128 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2129 values. 

2130 :param maxlap: maximum number of samples of overlap which are removed 

2131 

2132 :returns: list of traces 

2133 ''' 

2134 

2135 in_traces = traces 

2136 out_traces = [] 

2137 if not in_traces: 

2138 return out_traces 

2139 out_traces.append(in_traces.pop(0)) 

2140 while in_traces: 

2141 

2142 a = out_traces[-1] 

2143 b = in_traces.pop(0) 

2144 

2145 avirt, bvirt = a.ydata is None, b.ydata is None 

2146 assert avirt == bvirt, \ 

2147 'traces given to degapper() must either all have data or have ' \ 

2148 'no data.' 

2149 

2150 virtual = avirt and bvirt 

2151 

2152 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2153 and a.data_len() >= 1 and b.data_len() >= 1 

2154 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2155 

2156 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2157 idist = int(round(dist)) 

2158 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2159 # logger.warning('Cannot degap traces with displaced sampling ' 

2160 # '(%s, %s, %s, %s)' % a.nslc_id) 

2161 pass 

2162 else: 

2163 if 1 < idist <= maxgap: 

2164 if not virtual: 

2165 if fillmethod == 'interpolate': 

2166 filler = a.ydata[-1] + ( 

2167 ((1.0 + num.arange(idist-1, dtype=float)) 

2168 / idist) * (b.ydata[0]-a.ydata[-1]) 

2169 ).astype(a.ydata.dtype) 

2170 elif fillmethod == 'zeros': 

2171 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2172 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2173 a.tmax = b.tmax 

2174 if a.mtime and b.mtime: 

2175 a.mtime = max(a.mtime, b.mtime) 

2176 continue 

2177 

2178 elif idist == 1: 

2179 if not virtual: 

2180 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2181 a.tmax = b.tmax 

2182 if a.mtime and b.mtime: 

2183 a.mtime = max(a.mtime, b.mtime) 

2184 continue 

2185 

2186 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2187 if b.tmax > a.tmax: 

2188 if not virtual: 

2189 na = a.ydata.size 

2190 n = -idist+1 

2191 if deoverlap == 'use_second': 

2192 a.ydata = num.concatenate( 

2193 (a.ydata[:-n], b.ydata)) 

2194 elif deoverlap in ('use_first', 'crossfade_cos'): 

2195 a.ydata = num.concatenate( 

2196 (a.ydata, b.ydata[n:])) 

2197 elif deoverlap == 'add': 

2198 a.ydata[-n:] += b.ydata[:n] 

2199 a.ydata = num.concatenate( 

2200 (a.ydata, b.ydata[n:])) 

2201 else: 

2202 assert False, 'unknown deoverlap method' 

2203 

2204 if deoverlap == 'crossfade_cos': 

2205 n = -idist+1 

2206 taper = 0.5-0.5*num.cos( 

2207 (1.+num.arange(n))/(1.+n)*num.pi) 

2208 a.ydata[na-n:na] *= 1.-taper 

2209 a.ydata[na-n:na] += b.ydata[:n] * taper 

2210 

2211 a.tmax = b.tmax 

2212 if a.mtime and b.mtime: 

2213 a.mtime = max(a.mtime, b.mtime) 

2214 continue 

2215 else: 

2216 # make short second trace vanish 

2217 continue 

2218 

2219 if b.data_len() >= 1: 

2220 out_traces.append(b) 

2221 

2222 for tr in out_traces: 

2223 tr._update_ids() 

2224 

2225 return out_traces 

2226 

2227 

2228def rotate(traces, azimuth, in_channels, out_channels): 

2229 ''' 

2230 2D rotation of traces. 

2231 

2232 :param traces: list of input traces 

2233 :param azimuth: difference of the azimuths of the component directions 

2234 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2235 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2236 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2237 :returns: list of rotated traces 

2238 ''' 

2239 

2240 phi = azimuth/180.*math.pi 

2241 cphi = math.cos(phi) 

2242 sphi = math.sin(phi) 

2243 rotated = [] 

2244 in_channels = tuple(_channels_to_names(in_channels)) 

2245 out_channels = tuple(_channels_to_names(out_channels)) 

2246 for a in traces: 

2247 for b in traces: 

2248 if ((a.channel, b.channel) == in_channels and 

2249 a.nslc_id[:3] == b.nslc_id[:3] and 

2250 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2251 tmin = max(a.tmin, b.tmin) 

2252 tmax = min(a.tmax, b.tmax) 

2253 

2254 if tmin < tmax: 

2255 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2256 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2257 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2258 logger.warning( 

2259 'Cannot rotate traces with displaced sampling ' 

2260 '(%s, %s, %s, %s)' % a.nslc_id) 

2261 continue 

2262 

2263 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2264 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2265 ac.set_ydata(acydata) 

2266 bc.set_ydata(bcydata) 

2267 

2268 ac.set_codes(channel=out_channels[0]) 

2269 bc.set_codes(channel=out_channels[1]) 

2270 rotated.append(ac) 

2271 rotated.append(bc) 

2272 

2273 return rotated 

2274 

2275 

2276def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2277 ''' 

2278 Rotate traces from NE to RT system. 

2279 

2280 :param n: 

2281 North trace. 

2282 :type n: 

2283 :py:class:`~pyrocko.trace.Trace` 

2284 

2285 :param e: 

2286 East trace. 

2287 :type e: 

2288 :py:class:`~pyrocko.trace.Trace` 

2289 

2290 :param source: 

2291 Source of the recorded signal. 

2292 :type source: 

2293 :py:class:`pyrocko.gf.seismosizer.Source` 

2294 

2295 :param receiver: 

2296 Receiver of the recorded signal. 

2297 :type receiver: 

2298 :py:class:`pyrocko.model.location.Location` 

2299 

2300 :param out_channels: 

2301 Channel codes of the output channels (radial, transversal). 

2302 Default is ('R', 'T'). 

2303 . 

2304 :type out_channels 

2305 optional, tuple[str, str] 

2306 

2307 :returns: 

2308 Rotated traces (radial, transversal). 

2309 :rtype: 

2310 tuple[ 

2311 :py:class:`~pyrocko.trace.Trace`, 

2312 :py:class:`~pyrocko.trace.Trace`] 

2313 ''' 

2314 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2315 in_channels = n.channel, e.channel 

2316 out = rotate( 

2317 [n, e], azimuth, 

2318 in_channels=in_channels, 

2319 out_channels=out_channels) 

2320 

2321 assert len(out) == 2 

2322 for tr in out: 

2323 if tr.channel == out_channels[0]: 

2324 r = tr 

2325 elif tr.channel == out_channels[1]: 

2326 t = tr 

2327 else: 

2328 assert False 

2329 

2330 return r, t 

2331 

2332 

2333def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2334 out_channels=('L', 'Q', 'T')): 

2335 ''' 

2336 Rotate traces from ZNE to LQT system. 

2337 

2338 :param traces: list of traces in arbitrary order 

2339 :param backazimuth: backazimuth in degrees clockwise from north 

2340 :param incidence: incidence angle in degrees from vertical 

2341 :param in_channels: input channel names 

2342 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2343 :returns: list of transformed traces 

2344 ''' 

2345 i = incidence/180.*num.pi 

2346 b = backazimuth/180.*num.pi 

2347 

2348 ci = num.cos(i) 

2349 cb = num.cos(b) 

2350 si = num.sin(i) 

2351 sb = num.sin(b) 

2352 

2353 rotmat = num.array( 

2354 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2355 return project(traces, rotmat, in_channels, out_channels) 

2356 

2357 

2358def _decompose(a): 

2359 ''' 

2360 Decompose matrix into independent submatrices. 

2361 ''' 

2362 

2363 def depends(iout, a): 

2364 row = a[iout, :] 

2365 return set(num.arange(row.size).compress(row != 0.0)) 

2366 

2367 def provides(iin, a): 

2368 col = a[:, iin] 

2369 return set(num.arange(col.size).compress(col != 0.0)) 

2370 

2371 a = num.asarray(a) 

2372 outs = set(range(a.shape[0])) 

2373 systems = [] 

2374 while outs: 

2375 iout = outs.pop() 

2376 

2377 gout = set() 

2378 for iin in depends(iout, a): 

2379 gout.update(provides(iin, a)) 

2380 

2381 if not gout: 

2382 continue 

2383 

2384 gin = set() 

2385 for iout2 in gout: 

2386 gin.update(depends(iout2, a)) 

2387 

2388 if not gin: 

2389 continue 

2390 

2391 for iout2 in gout: 

2392 if iout2 in outs: 

2393 outs.remove(iout2) 

2394 

2395 gin = list(gin) 

2396 gin.sort() 

2397 gout = list(gout) 

2398 gout.sort() 

2399 

2400 systems.append((gin, gout, a[gout, :][:, gin])) 

2401 

2402 return systems 

2403 

2404 

2405def _channels_to_names(channels): 

2406 from pyrocko import squirrel 

2407 names = [] 

2408 for ch in channels: 

2409 if isinstance(ch, model.Channel): 

2410 names.append(ch.name) 

2411 elif isinstance(ch, squirrel.Channel): 

2412 names.append(ch.codes.channel) 

2413 else: 

2414 names.append(ch) 

2415 

2416 return names 

2417 

2418 

2419def project(traces, matrix, in_channels, out_channels): 

2420 ''' 

2421 Affine transform of three-component traces. 

2422 

2423 Compute matrix-vector product of three-component traces, to e.g. rotate 

2424 traces into a different basis. The traces are distinguished and ordered by 

2425 their channel attribute. The tranform is applied to overlapping parts of 

2426 any appropriate combinations of the input traces. This should allow this 

2427 function to be robust with data gaps. It also tries to apply the 

2428 tranformation to subsets of the channels, if this is possible, so that, if 

2429 for example a vertical compontent is missing, horizontal components can 

2430 still be rotated. 

2431 

2432 :param traces: list of traces in arbitrary order 

2433 :param matrix: tranformation matrix 

2434 :param in_channels: input channel names 

2435 :param out_channels: output channel names 

2436 :returns: list of transformed traces 

2437 ''' 

2438 

2439 in_channels = tuple(_channels_to_names(in_channels)) 

2440 out_channels = tuple(_channels_to_names(out_channels)) 

2441 systems = _decompose(matrix) 

2442 

2443 # fallback to full matrix if some are not quadratic 

2444 for iins, iouts, submatrix in systems: 

2445 if submatrix.shape[0] != submatrix.shape[1]: 

2446 if len(in_channels) != 3 or len(out_channels) != 3: 

2447 return [] 

2448 else: 

2449 return _project3(traces, matrix, in_channels, out_channels) 

2450 

2451 projected = [] 

2452 for iins, iouts, submatrix in systems: 

2453 in_cha = tuple([in_channels[iin] for iin in iins]) 

2454 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2455 if submatrix.shape[0] == 1: 

2456 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2457 elif submatrix.shape[1] == 2: 

2458 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2459 else: 

2460 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2461 

2462 return projected 

2463 

2464 

2465def project_dependencies(matrix, in_channels, out_channels): 

2466 ''' 

2467 Figure out what dependencies project() would produce. 

2468 ''' 

2469 

2470 in_channels = tuple(_channels_to_names(in_channels)) 

2471 out_channels = tuple(_channels_to_names(out_channels)) 

2472 systems = _decompose(matrix) 

2473 

2474 subpro = [] 

2475 for iins, iouts, submatrix in systems: 

2476 if submatrix.shape[0] != submatrix.shape[1]: 

2477 subpro.append((matrix, in_channels, out_channels)) 

2478 

2479 if not subpro: 

2480 for iins, iouts, submatrix in systems: 

2481 in_cha = tuple([in_channels[iin] for iin in iins]) 

2482 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2483 subpro.append((submatrix, in_cha, out_cha)) 

2484 

2485 deps = {} 

2486 for mat, in_cha, out_cha in subpro: 

2487 for oc in out_cha: 

2488 if oc not in deps: 

2489 deps[oc] = [] 

2490 

2491 for ic in in_cha: 

2492 deps[oc].append(ic) 

2493 

2494 return deps 

2495 

2496 

2497def _project1(traces, matrix, in_channels, out_channels): 

2498 assert len(in_channels) == 1 

2499 assert len(out_channels) == 1 

2500 assert matrix.shape == (1, 1) 

2501 

2502 projected = [] 

2503 for a in traces: 

2504 if not (a.channel,) == in_channels: 

2505 continue 

2506 

2507 ac = a.copy() 

2508 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2509 ac.set_codes(channel=out_channels[0]) 

2510 projected.append(ac) 

2511 

2512 return projected 

2513 

2514 

2515def _project2(traces, matrix, in_channels, out_channels): 

2516 assert len(in_channels) == 2 

2517 assert len(out_channels) == 2 

2518 assert matrix.shape == (2, 2) 

2519 projected = [] 

2520 for a in traces: 

2521 for b in traces: 

2522 if not ((a.channel, b.channel) == in_channels and 

2523 a.nslc_id[:3] == b.nslc_id[:3] and 

2524 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2525 continue 

2526 

2527 tmin = max(a.tmin, b.tmin) 

2528 tmax = min(a.tmax, b.tmax) 

2529 

2530 if tmin > tmax: 

2531 continue 

2532 

2533 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2534 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2535 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2536 logger.warning( 

2537 'Cannot project traces with displaced sampling ' 

2538 '(%s, %s, %s, %s)' % a.nslc_id) 

2539 continue 

2540 

2541 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2542 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2543 

2544 ac.set_ydata(acydata) 

2545 bc.set_ydata(bcydata) 

2546 

2547 ac.set_codes(channel=out_channels[0]) 

2548 bc.set_codes(channel=out_channels[1]) 

2549 

2550 projected.append(ac) 

2551 projected.append(bc) 

2552 

2553 return projected 

2554 

2555 

2556def _project3(traces, matrix, in_channels, out_channels): 

2557 assert len(in_channels) == 3 

2558 assert len(out_channels) == 3 

2559 assert matrix.shape == (3, 3) 

2560 projected = [] 

2561 for a in traces: 

2562 for b in traces: 

2563 for c in traces: 

2564 if not ((a.channel, b.channel, c.channel) == in_channels 

2565 and a.nslc_id[:3] == b.nslc_id[:3] 

2566 and b.nslc_id[:3] == c.nslc_id[:3] 

2567 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2568 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2569 

2570 continue 

2571 

2572 tmin = max(a.tmin, b.tmin, c.tmin) 

2573 tmax = min(a.tmax, b.tmax, c.tmax) 

2574 

2575 if tmin >= tmax: 

2576 continue 

2577 

2578 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2579 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2580 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2581 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2582 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2583 

2584 logger.warning( 

2585 'Cannot project traces with displaced sampling ' 

2586 '(%s, %s, %s, %s)' % a.nslc_id) 

2587 continue 

2588 

2589 acydata = num.dot( 

2590 matrix[0], 

2591 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2592 bcydata = num.dot( 

2593 matrix[1], 

2594 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2595 ccydata = num.dot( 

2596 matrix[2], 

2597 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2598 

2599 ac.set_ydata(acydata) 

2600 bc.set_ydata(bcydata) 

2601 cc.set_ydata(ccydata) 

2602 

2603 ac.set_codes(channel=out_channels[0]) 

2604 bc.set_codes(channel=out_channels[1]) 

2605 cc.set_codes(channel=out_channels[2]) 

2606 

2607 projected.append(ac) 

2608 projected.append(bc) 

2609 projected.append(cc) 

2610 

2611 return projected 

2612 

2613 

2614def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2615 ''' 

2616 Cross correlation of two traces. 

2617 

2618 :param a,b: input traces 

2619 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2620 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2621 :param use_fft: bool, whether to do cross correlation in spectral domain 

2622 

2623 :returns: trace containing cross correlation coefficients 

2624 

2625 This function computes the cross correlation between two traces. It 

2626 evaluates the discrete equivalent of 

2627 

2628 .. math:: 

2629 

2630 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2631 

2632 where the star denotes complex conjugate. Note, that the arguments here are 

2633 swapped when compared with the :py:func:`numpy.correlate` function, 

2634 which is internally called. This function should be safe even with older 

2635 versions of NumPy, where the correlate function has some problems. 

2636 

2637 A trace containing the cross correlation coefficients is returned. The time 

2638 information of the output trace is set so that the returned cross 

2639 correlation can be viewed directly as a function of time lag. 

2640 

2641 Example:: 

2642 

2643 # align two traces a and b containing a time shifted similar signal: 

2644 c = pyrocko.trace.correlate(a,b) 

2645 t, coef = c.max() # get time and value of maximum 

2646 b.shift(-t) # align b with a 

2647 

2648 ''' 

2649 

2650 assert_same_sampling_rate(a, b) 

2651 

2652 ya, yb = a.ydata, b.ydata 

2653 

2654 # need reversed order here: 

2655 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2656 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2657 

2658 if normalization == 'normal': 

2659 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2660 yc = yc/normfac 

2661 

2662 elif normalization == 'gliding': 

2663 if mode != 'valid': 

2664 assert False, 'gliding normalization currently only available ' \ 

2665 'with "valid" mode.' 

2666 

2667 if ya.size < yb.size: 

2668 yshort, ylong = ya, yb 

2669 else: 

2670 yshort, ylong = yb, ya 

2671 

2672 epsilon = 0.00001 

2673 normfac_short = num.sqrt(num.sum(yshort**2)) 

2674 normfac = normfac_short * num.sqrt( 

2675 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2676 + normfac_short*epsilon 

2677 

2678 if yb.size <= ya.size: 

2679 normfac = normfac[::-1] 

2680 

2681 yc /= normfac 

2682 

2683 c = a.copy() 

2684 c.set_ydata(yc) 

2685 c.set_codes(*merge_codes(a, b, '~')) 

2686 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2687 

2688 return c 

2689 

2690 

2691def deconvolve( 

2692 a, b, waterlevel, 

2693 tshift=0., 

2694 pad=0.5, 

2695 fd_taper=None, 

2696 pad_to_pow2=True): 

2697 

2698 same_sampling_rate(a, b) 

2699 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2700 deltat = a.deltat 

2701 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2702 

2703 ndata = max(a.data_len(), b.data_len()) 

2704 ndata_pad = ndata + npad 

2705 

2706 if pad_to_pow2: 

2707 ntrans = nextpow2(ndata_pad) 

2708 else: 

2709 ntrans = ndata 

2710 

2711 aspec = num.fft.rfft(a.ydata, ntrans) 

2712 bspec = num.fft.rfft(b.ydata, ntrans) 

2713 

2714 out = aspec * num.conj(bspec) 

2715 

2716 bautocorr = bspec*num.conj(bspec) 

2717 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2718 

2719 out /= denom 

2720 df = 1/(ntrans*deltat) 

2721 

2722 if fd_taper is not None: 

2723 fd_taper(out, 0.0, df) 

2724 

2725 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2726 c = a.copy(data=False) 

2727 c.set_ydata(ydata[:ndata]) 

2728 c.set_codes(*merge_codes(a, b, '/')) 

2729 return c 

2730 

2731 

2732def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2733 assert same_sampling_rate(a, b, eps), \ 

2734 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2735 

2736 

2737def same_sampling_rate(a, b, eps=1.0e-6): 

2738 ''' 

2739 Check if two traces have the same sampling rate. 

2740 

2741 :param a,b: input traces 

2742 :param eps: relative tolerance 

2743 ''' 

2744 

2745 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2746 

2747 

2748def fix_deltat_rounding_errors(deltat): 

2749 ''' 

2750 Try to undo sampling rate rounding errors. 

2751 

2752 Fix rounding errors of sampling intervals when these are read from single 

2753 precision floating point values. 

2754 

2755 Assumes that the true sampling rate or sampling interval was an integer 

2756 value. No correction will be applied if this would change the sampling 

2757 rate by more than 0.001%. 

2758 ''' 

2759 

2760 if deltat <= 1.0: 

2761 deltat_new = 1.0 / round(1.0 / deltat) 

2762 else: 

2763 deltat_new = round(deltat) 

2764 

2765 if abs(deltat_new - deltat) / deltat > 1e-5: 

2766 deltat_new = deltat 

2767 

2768 return deltat_new 

2769 

2770 

2771def merge_codes(a, b, sep='-'): 

2772 ''' 

2773 Merge network-station-location-channel codes of a pair of traces. 

2774 ''' 

2775 

2776 o = [] 

2777 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2778 if xa == xb: 

2779 o.append(xa) 

2780 else: 

2781 o.append(sep.join((xa, xb))) 

2782 return o 

2783 

2784 

2785class Taper(Object): 

2786 ''' 

2787 Base class for tapers. 

2788 

2789 Does nothing by default. 

2790 ''' 

2791 

2792 def __call__(self, y, x0, dx): 

2793 pass 

2794 

2795 

2796class CosTaper(Taper): 

2797 ''' 

2798 Cosine Taper. 

2799 

2800 :param a: start of fading in 

2801 :param b: end of fading in 

2802 :param c: start of fading out 

2803 :param d: end of fading out 

2804 ''' 

2805 

2806 a = Float.T() 

2807 b = Float.T() 

2808 c = Float.T() 

2809 d = Float.T() 

2810 

2811 def __init__(self, a, b, c, d): 

2812 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2813 

2814 def __call__(self, y, x0, dx): 

2815 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2816 

2817 def span(self, y, x0, dx): 

2818 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2819 

2820 def time_span(self): 

2821 return self.a, self.d 

2822 

2823 

2824class CosFader(Taper): 

2825 ''' 

2826 Cosine Fader. 

2827 

2828 :param xfade: fade in and fade out time in seconds (optional) 

2829 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2830 

2831 Only one argument can be set. The other should to be ``None``. 

2832 ''' 

2833 

2834 xfade = Float.T(optional=True) 

2835 xfrac = Float.T(optional=True) 

2836 

2837 def __init__(self, xfade=None, xfrac=None): 

2838 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2839 assert (xfade is None) != (xfrac is None) 

2840 self._xfade = xfade 

2841 self._xfrac = xfrac 

2842 

2843 def __call__(self, y, x0, dx): 

2844 

2845 xfade = self._xfade 

2846 

2847 xlen = (y.size - 1)*dx 

2848 if xfade is None: 

2849 xfade = xlen * self._xfrac 

2850 

2851 a = x0 

2852 b = x0 + xfade 

2853 c = x0 + xlen - xfade 

2854 d = x0 + xlen 

2855 

2856 apply_costaper(a, b, c, d, y, x0, dx) 

2857 

2858 def span(self, y, x0, dx): 

2859 return 0, y.size 

2860 

2861 def time_span(self): 

2862 return None, None 

2863 

2864 

2865def none_min(li): 

2866 if None in li: 

2867 return None 

2868 else: 

2869 return min(x for x in li if x is not None) 

2870 

2871 

2872def none_max(li): 

2873 if None in li: 

2874 return None 

2875 else: 

2876 return max(x for x in li if x is not None) 

2877 

2878 

2879class MultiplyTaper(Taper): 

2880 ''' 

2881 Multiplication of several tapers. 

2882 ''' 

2883 

2884 tapers = List.T(Taper.T()) 

2885 

2886 def __init__(self, tapers=None): 

2887 if tapers is None: 

2888 tapers = [] 

2889 

2890 Taper.__init__(self, tapers=tapers) 

2891 

2892 def __call__(self, y, x0, dx): 

2893 for taper in self.tapers: 

2894 taper(y, x0, dx) 

2895 

2896 def span(self, y, x0, dx): 

2897 spans = [] 

2898 for taper in self.tapers: 

2899 spans.append(taper.span(y, x0, dx)) 

2900 

2901 mins, maxs = list(zip(*spans)) 

2902 return min(mins), max(maxs) 

2903 

2904 def time_span(self): 

2905 spans = [] 

2906 for taper in self.tapers: 

2907 spans.append(taper.time_span()) 

2908 

2909 mins, maxs = list(zip(*spans)) 

2910 return none_min(mins), none_max(maxs) 

2911 

2912 

2913class GaussTaper(Taper): 

2914 ''' 

2915 Frequency domain Gaussian filter. 

2916 ''' 

2917 

2918 alpha = Float.T() 

2919 

2920 def __init__(self, alpha): 

2921 Taper.__init__(self, alpha=alpha) 

2922 self._alpha = alpha 

2923 

2924 def __call__(self, y, x0, dx): 

2925 f = x0 + num.arange(y.size)*dx 

2926 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2927 

2928 

2929cached_coefficients = {} 

2930 

2931 

2932def _get_cached_filter_coefs(order, corners, btype): 

2933 ck = (order, tuple(corners), btype) 

2934 if ck not in cached_coefficients: 

2935 if len(corners) == 0: 

2936 cached_coefficients[ck] = signal.butter( 

2937 order, corners[0], btype=btype) 

2938 else: 

2939 cached_coefficients[ck] = signal.butter( 

2940 order, corners, btype=btype) 

2941 

2942 return cached_coefficients[ck] 

2943 

2944 

2945class _globals(object): 

2946 _numpy_has_correlate_flip_bug = None 

2947 

2948 

2949def _default_key(tr): 

2950 return (tr.network, tr.station, tr.location, tr.channel) 

2951 

2952 

2953def numpy_has_correlate_flip_bug(): 

2954 ''' 

2955 Check if NumPy's correlate function reveals old behaviour. 

2956 ''' 

2957 

2958 if _globals._numpy_has_correlate_flip_bug is None: 

2959 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

2960 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

2961 ab = num.correlate(a, b, mode='same') 

2962 ba = num.correlate(b, a, mode='same') 

2963 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

2964 

2965 return _globals._numpy_has_correlate_flip_bug 

2966 

2967 

2968def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

2969 ''' 

2970 Call :py:func:`numpy.correlate` with fixes. 

2971 

2972 c[k] = sum_i a[i+k] * conj(b[i]) 

2973 

2974 Note that the result produced by newer numpy.correlate is always flipped 

2975 with respect to the formula given in its documentation (if ascending k 

2976 assumed for the output). 

2977 ''' 

2978 

2979 if use_fft: 

2980 if a.size < b.size: 

2981 c = signal.fftconvolve(b[::-1], a, mode=mode) 

2982 else: 

2983 c = signal.fftconvolve(a, b[::-1], mode=mode) 

2984 return c 

2985 

2986 else: 

2987 buggy = numpy_has_correlate_flip_bug() 

2988 

2989 a = num.asarray(a) 

2990 b = num.asarray(b) 

2991 

2992 if buggy: 

2993 b = num.conj(b) 

2994 

2995 c = num.correlate(a, b, mode=mode) 

2996 

2997 if buggy and a.size < b.size: 

2998 return c[::-1] 

2999 else: 

3000 return c 

3001 

3002 

3003def numpy_correlate_emulate(a, b, mode='valid'): 

3004 ''' 

3005 Slow version of :py:func:`numpy.correlate` for comparison. 

3006 ''' 

3007 

3008 a = num.asarray(a) 

3009 b = num.asarray(b) 

3010 kmin = -(b.size-1) 

3011 klen = a.size-kmin 

3012 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3013 kmin = int(kmin) 

3014 kmax = int(kmax) 

3015 klen = kmax - kmin + 1 

3016 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3017 for k in range(kmin, kmin+klen): 

3018 imin = max(0, -k) 

3019 ilen = min(b.size, a.size-k) - imin 

3020 c[k-kmin] = num.sum( 

3021 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3022 

3023 return c 

3024 

3025 

3026def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3027 ''' 

3028 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3029 ''' 

3030 

3031 a = num.asarray(a) 

3032 b = num.asarray(b) 

3033 

3034 kmin = -(b.size-1) 

3035 if mode == 'full': 

3036 klen = a.size-kmin 

3037 elif mode == 'same': 

3038 klen = max(a.size, b.size) 

3039 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3040 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3041 elif mode == 'valid': 

3042 klen = abs(a.size - b.size) + 1 

3043 kmin += min(a.size, b.size) - 1 

3044 

3045 return kmin, kmin + klen - 1 

3046 

3047 

3048def autocorr(x, nshifts): 

3049 ''' 

3050 Compute biased estimate of the first autocorrelation coefficients. 

3051 

3052 :param x: input array 

3053 :param nshifts: number of coefficients to calculate 

3054 ''' 

3055 

3056 mean = num.mean(x) 

3057 std = num.std(x) 

3058 n = x.size 

3059 xdm = x - mean 

3060 r = num.zeros(nshifts) 

3061 for k in range(nshifts): 

3062 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3063 

3064 return r 

3065 

3066 

3067def yulewalker(x, order): 

3068 ''' 

3069 Compute autoregression coefficients using Yule-Walker method. 

3070 

3071 :param x: input array 

3072 :param order: number of coefficients to produce 

3073 

3074 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3075 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3076 recursion which is normally used. 

3077 ''' 

3078 

3079 gamma = autocorr(x, order+1) 

3080 d = gamma[1:1+order] 

3081 a = num.zeros((order, order)) 

3082 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3083 for i in range(order): 

3084 ioff = order-i 

3085 a[i, :] = gamma2[ioff:ioff+order] 

3086 

3087 return num.dot(num.linalg.inv(a), -d) 

3088 

3089 

3090def moving_avg(x, n): 

3091 n = int(n) 

3092 cx = x.cumsum() 

3093 nn = len(x) 

3094 y = num.zeros(nn, dtype=cx.dtype) 

3095 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3096 y[:n//2] = y[n//2] 

3097 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3098 return y 

3099 

3100 

3101def moving_sum(x, n, mode='valid'): 

3102 n = int(n) 

3103 cx = x.cumsum() 

3104 nn = len(x) 

3105 

3106 if mode == 'valid': 

3107 if nn-n+1 <= 0: 

3108 return num.zeros(0, dtype=cx.dtype) 

3109 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3110 y[0] = cx[n-1] 

3111 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3112 

3113 if mode == 'full': 

3114 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3115 if n <= nn: 

3116 y[0:n] = cx[0:n] 

3117 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3118 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3119 else: 

3120 y[0:nn] = cx[0:nn] 

3121 y[nn:n] = cx[nn-1] 

3122 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3123 

3124 if mode == 'same': 

3125 n1 = (n-1)//2 

3126 y = num.zeros(nn, dtype=cx.dtype) 

3127 if n <= nn: 

3128 y[0:n-n1] = cx[n1:n] 

3129 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3130 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3131 else: 

3132 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3133 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3134 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3135 

3136 return y 

3137 

3138 

3139def nextpow2(i): 

3140 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3141 

3142 

3143def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3144 def snap(x): 

3145 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3146 return snap 

3147 

3148 

3149def snapper(nmax, delta, snapfun=math.ceil): 

3150 def snap(x): 

3151 return max(0, min(int(snapfun(x/delta)), nmax)) 

3152 return snap 

3153 

3154 

3155def apply_costaper(a, b, c, d, y, x0, dx): 

3156 hi = snapper_w_offset(y.size, x0, dx) 

3157 y[:hi(a)] = 0. 

3158 y[hi(a):hi(b)] *= 0.5 \ 

3159 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3160 y[hi(c):hi(d)] *= 0.5 \ 

3161 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3162 y[hi(d):] = 0. 

3163 

3164 

3165def span_costaper(a, b, c, d, y, x0, dx): 

3166 hi = snapper_w_offset(y.size, x0, dx) 

3167 return hi(a), hi(d) - hi(a) 

3168 

3169 

3170def costaper(a, b, c, d, nfreqs, deltaf): 

3171 hi = snapper(nfreqs, deltaf) 

3172 tap = num.zeros(nfreqs) 

3173 tap[hi(a):hi(b)] = 0.5 \ 

3174 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3175 tap[hi(b):hi(c)] = 1. 

3176 tap[hi(c):hi(d)] = 0.5 \ 

3177 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3178 

3179 return tap 

3180 

3181 

3182def t2ind(t, tdelta, snap=round): 

3183 return int(snap(t/tdelta)) 

3184 

3185 

3186def hilbert(x, N=None): 

3187 ''' 

3188 Return the hilbert transform of x of length N. 

3189 

3190 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3191 ''' 

3192 

3193 x = num.asarray(x) 

3194 if N is None: 

3195 N = len(x) 

3196 if N <= 0: 

3197 raise ValueError("N must be positive.") 

3198 if num.iscomplexobj(x): 

3199 logger.warning('imaginary part of x ignored.') 

3200 x = num.real(x) 

3201 

3202 Xf = num.fft.fft(x, N, axis=0) 

3203 h = num.zeros(N) 

3204 if N % 2 == 0: 

3205 h[0] = h[N//2] = 1 

3206 h[1:N//2] = 2 

3207 else: 

3208 h[0] = 1 

3209 h[1:(N+1)//2] = 2 

3210 

3211 if len(x.shape) > 1: 

3212 h = h[:, num.newaxis] 

3213 x = num.fft.ifft(Xf*h) 

3214 return x 

3215 

3216 

3217def near(a, b, eps): 

3218 return abs(a-b) < eps 

3219 

3220 

3221def coroutine(func): 

3222 def wrapper(*args, **kwargs): 

3223 gen = func(*args, **kwargs) 

3224 next(gen) 

3225 return gen 

3226 

3227 wrapper.__name__ = func.__name__ 

3228 wrapper.__dict__ = func.__dict__ 

3229 wrapper.__doc__ = func.__doc__ 

3230 return wrapper 

3231 

3232 

3233class States(object): 

3234 ''' 

3235 Utility to store channel-specific state in coroutines. 

3236 ''' 

3237 

3238 def __init__(self): 

3239 self._states = {} 

3240 

3241 def get(self, tr): 

3242 k = tr.nslc_id 

3243 if k in self._states: 

3244 tmin, deltat, dtype, value = self._states[k] 

3245 if (near(tmin, tr.tmin, deltat/100.) 

3246 and near(deltat, tr.deltat, deltat/10000.) 

3247 and dtype == tr.ydata.dtype): 

3248 

3249 return value 

3250 

3251 return None 

3252 

3253 def set(self, tr, value): 

3254 k = tr.nslc_id 

3255 if k in self._states and self._states[k][-1] is not value: 

3256 self.free(self._states[k][-1]) 

3257 

3258 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3259 

3260 def free(self, value): 

3261 pass 

3262 

3263 

3264@coroutine 

3265def co_list_append(list): 

3266 while True: 

3267 list.append((yield)) 

3268 

3269 

3270class ScipyBug(Exception): 

3271 pass 

3272 

3273 

3274@coroutine 

3275def co_lfilter(target, b, a): 

3276 ''' 

3277 Successively filter broken continuous trace data (coroutine). 

3278 

3279 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3280 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3281 objects containing the filtered data to target. This is useful, if one 

3282 wants to filter a long continuous time series, which is split into many 

3283 successive traces without producing filter artifacts at trace boundaries. 

3284 

3285 Filter states are kept *per channel*, specifically, for each (network, 

3286 station, location, channel) combination occuring in the input traces, a 

3287 separate state is created and maintained. This makes it possible to filter 

3288 multichannel or multistation data with only one :py:func:`co_lfilter` 

3289 instance. 

3290 

3291 Filter state is reset, when gaps occur. 

3292 

3293 Use it like this:: 

3294 

3295 from pyrocko.trace import co_lfilter, co_list_append 

3296 

3297 filtered_traces = [] 

3298 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3299 for trace in traces: 

3300 pipe.send(trace) 

3301 

3302 pipe.close() 

3303 

3304 ''' 

3305 

3306 try: 

3307 states = States() 

3308 output = None 

3309 while True: 

3310 input = (yield) 

3311 

3312 zi = states.get(input) 

3313 if zi is None: 

3314 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3315 

3316 output = input.copy(data=False) 

3317 try: 

3318 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3319 except ValueError: 

3320 raise ScipyBug( 

3321 'signal.lfilter failed: could be related to a bug ' 

3322 'in some older scipy versions, e.g. on opensuse42.1') 

3323 

3324 output.set_ydata(ydata) 

3325 states.set(input, zf) 

3326 target.send(output) 

3327 

3328 except GeneratorExit: 

3329 target.close() 

3330 

3331 

3332def co_antialias(target, q, n=None, ftype='fir'): 

3333 b, a, n = util.decimate_coeffs(q, n, ftype) 

3334 anti = co_lfilter(target, b, a) 

3335 return anti 

3336 

3337 

3338@coroutine 

3339def co_dropsamples(target, q, nfir): 

3340 try: 

3341 states = States() 

3342 while True: 

3343 tr = (yield) 

3344 newdeltat = q * tr.deltat 

3345 ioffset = states.get(tr) 

3346 if ioffset is None: 

3347 # for fir filter, the first nfir samples are pulluted by 

3348 # boundary effects; cut it off. 

3349 # for iir this may be (much) more, we do not correct for that. 

3350 # put sample instances to a time which is a multiple of the 

3351 # new sampling interval. 

3352 newtmin_want = math.ceil( 

3353 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3354 - (nfir/2*tr.deltat) 

3355 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3356 if ioffset < 0: 

3357 ioffset = ioffset % q 

3358 

3359 newtmin_have = tr.tmin + ioffset * tr.deltat 

3360 newtr = tr.copy(data=False) 

3361 newtr.deltat = newdeltat 

3362 # because the fir kernel shifts data by nfir/2 samples: 

3363 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3364 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3365 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3366 target.send(newtr) 

3367 

3368 except GeneratorExit: 

3369 target.close() 

3370 

3371 

3372def co_downsample(target, q, n=None, ftype='fir'): 

3373 ''' 

3374 Successively downsample broken continuous trace data (coroutine). 

3375 

3376 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3377 data and sends new :py:class:`Trace` objects containing the downsampled 

3378 data to target. This is useful, if one wants to downsample a long 

3379 continuous time series, which is split into many successive traces without 

3380 producing filter artifacts and gaps at trace boundaries. 

3381 

3382 Filter states are kept *per channel*, specifically, for each (network, 

3383 station, location, channel) combination occuring in the input traces, a 

3384 separate state is created and maintained. This makes it possible to filter 

3385 multichannel or multistation data with only one :py:func:`co_lfilter` 

3386 instance. 

3387 

3388 Filter state is reset, when gaps occur. The sampling instances are choosen 

3389 so that they occur at (or as close as possible) to even multiples of the 

3390 sampling interval of the downsampled trace (based on system time). 

3391 ''' 

3392 

3393 b, a, n = util.decimate_coeffs(q, n, ftype) 

3394 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3395 

3396 

3397@coroutine 

3398def co_downsample_to(target, deltat): 

3399 

3400 decimators = {} 

3401 try: 

3402 while True: 

3403 tr = (yield) 

3404 ratio = deltat / tr.deltat 

3405 rratio = round(ratio) 

3406 if abs(rratio - ratio)/ratio > 0.0001: 

3407 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3408 

3409 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3410 if deci_seq not in decimators: 

3411 pipe = target 

3412 for q in deci_seq[::-1]: 

3413 pipe = co_downsample(pipe, q) 

3414 

3415 decimators[deci_seq] = pipe 

3416 

3417 decimators[deci_seq].send(tr) 

3418 

3419 except GeneratorExit: 

3420 for g in decimators.values(): 

3421 g.close() 

3422 

3423 

3424class DomainChoice(StringChoice): 

3425 choices = [ 

3426 'time_domain', 

3427 'frequency_domain', 

3428 'envelope', 

3429 'absolute', 

3430 'cc_max_norm'] 

3431 

3432 

3433class MisfitSetup(Object): 

3434 ''' 

3435 Contains misfit setup to be used in :py:func:`trace.misfit` 

3436 

3437 :param description: Description of the setup 

3438 :param norm: L-norm classifier 

3439 :param taper: Object of :py:class:`Taper` 

3440 :param filter: Object of :py:class:`FrequencyResponse` 

3441 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3442 'cc_max_norm'] 

3443 

3444 Can be dumped to a yaml file. 

3445 ''' 

3446 

3447 xmltagname = 'misfitsetup' 

3448 description = String.T(optional=True) 

3449 norm = Int.T(optional=False) 

3450 taper = Taper.T(optional=False) 

3451 filter = FrequencyResponse.T(optional=True) 

3452 domain = DomainChoice.T(default='time_domain') 

3453 

3454 

3455def equalize_sampling_rates(trace_1, trace_2): 

3456 ''' 

3457 Equalize sampling rates of two traces (reduce higher sampling rate to 

3458 lower). 

3459 

3460 :param trace_1: :py:class:`Trace` object 

3461 :param trace_2: :py:class:`Trace` object 

3462 

3463 Returns a copy of the resampled trace if resampling is needed. 

3464 ''' 

3465 

3466 if same_sampling_rate(trace_1, trace_2): 

3467 return trace_1, trace_2 

3468 

3469 if trace_1.deltat < trace_2.deltat: 

3470 t1_out = trace_1.copy() 

3471 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3472 logger.debug('Trace downsampled (return copy of trace): %s' 

3473 % '.'.join(t1_out.nslc_id)) 

3474 return t1_out, trace_2 

3475 

3476 elif trace_1.deltat > trace_2.deltat: 

3477 t2_out = trace_2.copy() 

3478 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3479 logger.debug('Trace downsampled (return copy of trace): %s' 

3480 % '.'.join(t2_out.nslc_id)) 

3481 return trace_1, t2_out 

3482 

3483 

3484def Lx_norm(u, v, norm=2): 

3485 ''' 

3486 Calculate the misfit denominator *m* and the normalization devisor *n* 

3487 according to norm. 

3488 

3489 The normalization divisor *n* is calculated from ``v``. 

3490 

3491 :param u: :py:class:`numpy.array` 

3492 :param v: :py:class:`numpy.array` 

3493 :param norm: (default = 2) 

3494 

3495 ``u`` and ``v`` must be of same size. 

3496 ''' 

3497 

3498 if norm == 1: 

3499 return ( 

3500 num.sum(num.abs(v-u)), 

3501 num.sum(num.abs(v))) 

3502 

3503 elif norm == 2: 

3504 return ( 

3505 num.sqrt(num.sum((v-u)**2)), 

3506 num.sqrt(num.sum(v**2))) 

3507 

3508 else: 

3509 return ( 

3510 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3511 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3512 

3513 

3514def do_downsample(tr, deltat): 

3515 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3516 tr = tr.copy() 

3517 tr.downsample_to(deltat, snap=True, demean=False) 

3518 else: 

3519 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3520 tr = tr.copy() 

3521 tr.snap() 

3522 return tr 

3523 

3524 

3525def do_extend(tr, tmin, tmax): 

3526 if tmin < tr.tmin or tmax > tr.tmax: 

3527 tr = tr.copy() 

3528 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3529 

3530 return tr 

3531 

3532 

3533def do_pre_taper(tr, taper): 

3534 return tr.taper(taper, inplace=False, chop=True) 

3535 

3536 

3537def do_fft(tr, filter): 

3538 if filter is None: 

3539 return tr 

3540 else: 

3541 ndata = tr.ydata.size 

3542 nfft = nextpow2(ndata) 

3543 padded = num.zeros(nfft, dtype=float) 

3544 padded[:ndata] = tr.ydata 

3545 spectrum = num.fft.rfft(padded) 

3546 df = 1.0 / (tr.deltat * nfft) 

3547 frequencies = num.arange(spectrum.size)*df 

3548 return [tr, frequencies, spectrum] 

3549 

3550 

3551def do_filter(inp, filter): 

3552 if filter is None: 

3553 return inp 

3554 else: 

3555 tr, frequencies, spectrum = inp 

3556 spectrum *= filter.evaluate(frequencies) 

3557 return [tr, frequencies, spectrum] 

3558 

3559 

3560def do_ifft(inp): 

3561 if isinstance(inp, Trace): 

3562 return inp 

3563 else: 

3564 tr, _, spectrum = inp 

3565 ndata = tr.ydata.size 

3566 tr = tr.copy(data=False) 

3567 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3568 return tr 

3569 

3570 

3571def check_alignment(t1, t2): 

3572 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3573 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3574 t1.ydata.shape != t2.ydata.shape: 

3575 raise MisalignedTraces( 

3576 'Cannot calculate misfit of %s and %s due to misaligned ' 

3577 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))