1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6This module provides basic signal processing for seismic traces. 

7''' 

8 

9from __future__ import division, absolute_import 

10 

11import time 

12import math 

13import copy 

14import logging 

15import hashlib 

16 

17import numpy as num 

18from scipy import signal 

19 

20from pyrocko import util, orthodrome, pchain, model 

21from pyrocko.util import reuse 

22from pyrocko.guts import Object, Float, Int, String, List, \ 

23 StringChoice, Timestamp 

24from pyrocko.guts_array import Array 

25from pyrocko.model import squirrel_content 

26 

27# backward compatibility 

28from pyrocko.util import UnavailableDecimation # noqa 

29from pyrocko.response import ( # noqa 

30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

31 ButterworthResponse, SampledResponse, IntegrationResponse, 

32 DifferentiationResponse, MultiplyResponse) 

33 

34 

35guts_prefix = 'pf' 

36 

37logger = logging.getLogger('pyrocko.trace') 

38 

39 

40@squirrel_content 

41class Trace(Object): 

42 

43 ''' 

44 Create new trace object. 

45 

46 A ``Trace`` object represents a single continuous strip of evenly sampled 

47 time series data. It is built from a 1D NumPy array containing the data 

48 samples and some attributes describing its beginning and ending time, its 

49 sampling rate and four string identifiers (its network, station, location 

50 and channel code). 

51 

52 :param network: network code 

53 :param station: station code 

54 :param location: location code 

55 :param channel: channel code 

56 :param extra: extra code 

57 :param tmin: system time of first sample in [s] 

58 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

59 computed from length of ``ydata``) 

60 :param deltat: sampling interval in [s] 

61 :param ydata: 1D numpy array with data samples (can be ``None`` when 

62 ``tmax`` is not ``None``) 

63 :param mtime: optional modification time 

64 

65 :param meta: additional meta information (not used, but maintained by the 

66 library) 

67 

68 The length of the network, station, location and channel codes is not 

69 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

70 have different limits on the lengths of these codes. The codes set here are 

71 silently truncated when the trace is stored 

72 ''' 

73 

74 network = String.T(default='', help='Deployment/network code (1-8)') 

75 station = String.T(default='STA', help='Station code (1-5)') 

76 location = String.T(default='', help='Location code (0-2)') 

77 channel = String.T(default='', help='Channel code (3)') 

78 extra = String.T(default='', help='Extra/custom code') 

79 

80 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

81 tmax = Timestamp.T() 

82 

83 deltat = Float.T(default=1.0) 

84 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

85 

86 mtime = Timestamp.T(optional=True) 

87 

88 cached_frequencies = {} 

89 

90 def __init__(self, network='', station='STA', location='', channel='', 

91 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

92 meta=None, extra=''): 

93 

94 Object.__init__(self, init_props=False) 

95 

96 time_float = util.get_time_float() 

97 

98 if not isinstance(tmin, time_float): 

99 tmin = Trace.tmin.regularize_extra(tmin) 

100 

101 if tmax is not None and not isinstance(tmax, time_float): 

102 tmax = Trace.tmax.regularize_extra(tmax) 

103 

104 if mtime is not None and not isinstance(mtime, time_float): 

105 mtime = Trace.mtime.regularize_extra(mtime) 

106 

107 self._growbuffer = None 

108 

109 tmin = time_float(tmin) 

110 if tmax is not None: 

111 tmax = time_float(tmax) 

112 

113 if mtime is None: 

114 mtime = time_float(time.time()) 

115 

116 self.network, self.station, self.location, self.channel, \ 

117 self.extra = [ 

118 reuse(x) for x in ( 

119 network, station, location, channel, extra)] 

120 

121 self.tmin = tmin 

122 self.deltat = deltat 

123 

124 if tmax is None: 

125 if ydata is not None: 

126 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

127 else: 

128 raise Exception( 

129 'fixme: trace must be created with tmax or ydata') 

130 else: 

131 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

132 self.tmax = self.tmin + (n - 1) * self.deltat 

133 

134 self.meta = meta 

135 self.ydata = ydata 

136 self.mtime = mtime 

137 self._update_ids() 

138 self.file = None 

139 self._pchain = None 

140 

141 def __str__(self): 

142 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

143 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

144 s += ' timerange: %s - %s\n' % ( 

145 util.time_to_str(self.tmin, format=fmt), 

146 util.time_to_str(self.tmax, format=fmt)) 

147 

148 s += ' delta t: %g\n' % self.deltat 

149 if self.meta: 

150 for k in sorted(self.meta.keys()): 

151 s += ' %s: %s\n' % (k, self.meta[k]) 

152 return s 

153 

154 @property 

155 def codes(self): 

156 from pyrocko.squirrel import model 

157 return model.CodesNSLCE( 

158 self.network, self.station, self.location, self.channel, 

159 self.extra) 

160 

161 @property 

162 def time_span(self): 

163 return self.tmin, self.tmax 

164 

165 @property 

166 def summary(self): 

167 return '%s %-16s %s %g' % ( 

168 self.__class__.__name__, self.str_codes, self.str_time_span, 

169 self.deltat) 

170 

171 def __getstate__(self): 

172 return (self.network, self.station, self.location, self.channel, 

173 self.tmin, self.tmax, self.deltat, self.mtime, 

174 self.ydata, self.meta, self.extra) 

175 

176 def __setstate__(self, state): 

177 if len(state) == 11: 

178 self.network, self.station, self.location, self.channel, \ 

179 self.tmin, self.tmax, self.deltat, self.mtime, \ 

180 self.ydata, self.meta, self.extra = state 

181 

182 elif len(state) == 12: 

183 # backward compatibility 0 

184 self.network, self.station, self.location, self.channel, \ 

185 self.tmin, self.tmax, self.deltat, self.mtime, \ 

186 self.ydata, self.meta, _, self.extra = state 

187 

188 elif len(state) == 10: 

189 # backward compatibility 1 

190 self.network, self.station, self.location, self.channel, \ 

191 self.tmin, self.tmax, self.deltat, self.mtime, \ 

192 self.ydata, self.meta = state 

193 

194 self.extra = '' 

195 

196 else: 

197 # backward compatibility 2 

198 self.network, self.station, self.location, self.channel, \ 

199 self.tmin, self.tmax, self.deltat, self.mtime = state 

200 self.ydata = None 

201 self.meta = None 

202 self.extra = '' 

203 

204 self._growbuffer = None 

205 self._update_ids() 

206 

207 def hash(self, unsafe=False): 

208 sha1 = hashlib.sha1() 

209 for code in self.nslc_id: 

210 sha1.update(code.encode()) 

211 sha1.update(self.tmin.hex().encode()) 

212 sha1.update(self.tmax.hex().encode()) 

213 sha1.update(self.deltat.hex().encode()) 

214 

215 if self.ydata is not None: 

216 if not unsafe: 

217 sha1.update(memoryview(self.ydata)) 

218 else: 

219 sha1.update(memoryview(self.ydata[:32])) 

220 sha1.update(memoryview(self.ydata[-32:])) 

221 

222 return sha1.hexdigest() 

223 

224 def name(self): 

225 ''' 

226 Get a short string description. 

227 ''' 

228 

229 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

230 util.time_to_str(self.tmin), 

231 util.time_to_str(self.tmax))) 

232 

233 return s 

234 

235 def __eq__(self, other): 

236 return ( 

237 isinstance(other, Trace) 

238 and self.network == other.network 

239 and self.station == other.station 

240 and self.location == other.location 

241 and self.channel == other.channel 

242 and (abs(self.deltat - other.deltat) 

243 < (self.deltat + other.deltat)*1e-6) 

244 and abs(self.tmin-other.tmin) < self.deltat*0.01 

245 and abs(self.tmax-other.tmax) < self.deltat*0.01 

246 and num.all(self.ydata == other.ydata)) 

247 

248 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

249 return ( 

250 self.network == other.network 

251 and self.station == other.station 

252 and self.location == other.location 

253 and self.channel == other.channel 

254 and (abs(self.deltat - other.deltat) 

255 < (self.deltat + other.deltat)*1e-6) 

256 and abs(self.tmin-other.tmin) < self.deltat*0.01 

257 and abs(self.tmax-other.tmax) < self.deltat*0.01 

258 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

259 

260 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

261 

262 assert self.network == other.network, \ 

263 'network codes differ: %s, %s' % (self.network, other.network) 

264 assert self.station == other.station, \ 

265 'station codes differ: %s, %s' % (self.station, other.station) 

266 assert self.location == other.location, \ 

267 'location codes differ: %s, %s' % (self.location, other.location) 

268 assert self.channel == other.channel, 'channel codes differ' 

269 assert (abs(self.deltat - other.deltat) 

270 < (self.deltat + other.deltat)*1e-6), \ 

271 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

272 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

273 'start times differ: %s, %s' % ( 

274 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

275 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

276 'end times differ: %s, %s' % ( 

277 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

278 

279 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

280 'trace values differ' 

281 

282 def __hash__(self): 

283 return id(self) 

284 

285 def __call__(self, t, clip=False, snap=round): 

286 it = int(snap((t-self.tmin)/self.deltat)) 

287 if clip: 

288 it = max(0, min(it, self.ydata.size-1)) 

289 else: 

290 if it < 0 or self.ydata.size <= it: 

291 raise IndexError() 

292 

293 return self.tmin+it*self.deltat, self.ydata[it] 

294 

295 def interpolate(self, t, clip=False): 

296 ''' 

297 Value of trace between supporting points through linear interpolation. 

298 

299 :param t: time instant 

300 :param clip: whether to clip indices to trace ends 

301 ''' 

302 

303 t0, y0 = self(t, clip=clip, snap=math.floor) 

304 t1, y1 = self(t, clip=clip, snap=math.ceil) 

305 if t0 == t1: 

306 return y0 

307 else: 

308 return y0+(t-t0)/(t1-t0)*(y1-y0) 

309 

310 def index_clip(self, i): 

311 ''' 

312 Clip index to valid range. 

313 ''' 

314 

315 return min(max(0, i), self.ydata.size) 

316 

317 def add(self, other, interpolate=True, left=0., right=0.): 

318 ''' 

319 Add values of other trace (self += other). 

320 

321 Add values of ``other`` trace to the values of ``self``, where it 

322 intersects with ``other``. This method does not change the extent of 

323 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

324 ``other`` to be added are interpolated at sampling instants of 

325 ``self``. Linear interpolation is performed. In this case the sampling 

326 rate of ``other`` must be equal to or lower than that of ``self``. If 

327 ``interpolate`` is ``False``, the sampling rates of the two traces must 

328 match. 

329 ''' 

330 

331 if interpolate: 

332 assert self.deltat <= other.deltat \ 

333 or same_sampling_rate(self, other) 

334 

335 other_xdata = other.get_xdata() 

336 xdata = self.get_xdata() 

337 self.ydata += num.interp( 

338 xdata, other_xdata, other.ydata, left=left, right=left) 

339 else: 

340 assert self.deltat == other.deltat 

341 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

342 ibeg = max(0, ioff) 

343 iend = min(self.data_len(), ioff+other.data_len()) 

344 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

345 

346 def mult(self, other, interpolate=True): 

347 ''' 

348 Muliply with values of other trace ``(self *= other)``. 

349 

350 Multiply values of ``other`` trace to the values of ``self``, where it 

351 intersects with ``other``. This method does not change the extent of 

352 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

353 ``other`` to be multiplied are interpolated at sampling instants of 

354 ``self``. Linear interpolation is performed. In this case the sampling 

355 rate of ``other`` must be equal to or lower than that of ``self``. If 

356 ``interpolate`` is ``False``, the sampling rates of the two traces must 

357 match. 

358 ''' 

359 

360 if interpolate: 

361 assert self.deltat <= other.deltat or \ 

362 same_sampling_rate(self, other) 

363 

364 other_xdata = other.get_xdata() 

365 xdata = self.get_xdata() 

366 self.ydata *= num.interp( 

367 xdata, other_xdata, other.ydata, left=0., right=0.) 

368 else: 

369 assert self.deltat == other.deltat 

370 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

371 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

372 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

373 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

374 

375 ibeg1 = self.index_clip(ibeg1) 

376 iend1 = self.index_clip(iend1) 

377 ibeg2 = self.index_clip(ibeg2) 

378 iend2 = self.index_clip(iend2) 

379 

380 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

381 

382 def max(self): 

383 ''' 

384 Get time and value of data maximum. 

385 ''' 

386 

387 i = num.argmax(self.ydata) 

388 return self.tmin + i*self.deltat, self.ydata[i] 

389 

390 def min(self): 

391 ''' 

392 Get time and value of data minimum. 

393 ''' 

394 

395 i = num.argmin(self.ydata) 

396 return self.tmin + i*self.deltat, self.ydata[i] 

397 

398 def absmax(self): 

399 ''' 

400 Get time and value of maximum of the absolute of data. 

401 ''' 

402 

403 tmi, mi = self.min() 

404 tma, ma = self.max() 

405 if abs(mi) > abs(ma): 

406 return tmi, abs(mi) 

407 else: 

408 return tma, abs(ma) 

409 

410 def set_codes( 

411 self, network=None, station=None, location=None, channel=None, 

412 extra=None): 

413 

414 ''' 

415 Set network, station, location, and channel codes. 

416 ''' 

417 

418 if network is not None: 

419 self.network = network 

420 if station is not None: 

421 self.station = station 

422 if location is not None: 

423 self.location = location 

424 if channel is not None: 

425 self.channel = channel 

426 if extra is not None: 

427 self.extra = extra 

428 

429 self._update_ids() 

430 

431 def set_network(self, network): 

432 self.network = network 

433 self._update_ids() 

434 

435 def set_station(self, station): 

436 self.station = station 

437 self._update_ids() 

438 

439 def set_location(self, location): 

440 self.location = location 

441 self._update_ids() 

442 

443 def set_channel(self, channel): 

444 self.channel = channel 

445 self._update_ids() 

446 

447 def overlaps(self, tmin, tmax): 

448 ''' 

449 Check if trace has overlap with a given time span. 

450 ''' 

451 return not (tmax < self.tmin or self.tmax < tmin) 

452 

453 def is_relevant(self, tmin, tmax, selector=None): 

454 ''' 

455 Check if trace has overlap with a given time span and matches a 

456 condition callback. (internal use) 

457 ''' 

458 

459 return not (tmax <= self.tmin or self.tmax < tmin) \ 

460 and (selector is None or selector(self)) 

461 

462 def _update_ids(self): 

463 ''' 

464 Update dependent ids. 

465 ''' 

466 

467 self.full_id = ( 

468 self.network, self.station, self.location, self.channel, self.tmin) 

469 self.nslc_id = reuse( 

470 (self.network, self.station, self.location, self.channel)) 

471 

472 def prune_from_reuse_cache(self): 

473 util.deuse(self.nslc_id) 

474 util.deuse(self.network) 

475 util.deuse(self.station) 

476 util.deuse(self.location) 

477 util.deuse(self.channel) 

478 

479 def set_mtime(self, mtime): 

480 ''' 

481 Set modification time of the trace. 

482 ''' 

483 

484 self.mtime = mtime 

485 

486 def get_xdata(self): 

487 ''' 

488 Create array for time axis. 

489 ''' 

490 

491 if self.ydata is None: 

492 raise NoData() 

493 

494 return self.tmin \ 

495 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

496 

497 def get_ydata(self): 

498 ''' 

499 Get data array. 

500 ''' 

501 

502 if self.ydata is None: 

503 raise NoData() 

504 

505 return self.ydata 

506 

507 def set_ydata(self, new_ydata): 

508 ''' 

509 Replace data array. 

510 ''' 

511 

512 self.drop_growbuffer() 

513 self.ydata = new_ydata 

514 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

515 

516 def data_len(self): 

517 if self.ydata is not None: 

518 return self.ydata.size 

519 else: 

520 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

521 

522 def drop_data(self): 

523 ''' 

524 Forget data, make dataless trace. 

525 ''' 

526 

527 self.drop_growbuffer() 

528 self.ydata = None 

529 

530 def drop_growbuffer(self): 

531 ''' 

532 Detach the traces grow buffer. 

533 ''' 

534 

535 self._growbuffer = None 

536 self._pchain = None 

537 

538 def copy(self, data=True): 

539 ''' 

540 Make a deep copy of the trace. 

541 ''' 

542 

543 tracecopy = copy.copy(self) 

544 tracecopy.drop_growbuffer() 

545 if data: 

546 tracecopy.ydata = self.ydata.copy() 

547 tracecopy.meta = copy.deepcopy(self.meta) 

548 return tracecopy 

549 

550 def crop_zeros(self): 

551 ''' 

552 Remove any zeros at beginning and end. 

553 ''' 

554 

555 indices = num.where(self.ydata != 0.0)[0] 

556 if indices.size == 0: 

557 raise NoData() 

558 

559 ibeg = indices[0] 

560 iend = indices[-1]+1 

561 if ibeg == 0 and iend == self.ydata.size-1: 

562 return 

563 

564 self.drop_growbuffer() 

565 self.ydata = self.ydata[ibeg:iend].copy() 

566 self.tmin = self.tmin+ibeg*self.deltat 

567 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

568 self._update_ids() 

569 

570 def append(self, data): 

571 ''' 

572 Append data to the end of the trace. 

573 

574 To make this method efficient when successively very few or even single 

575 samples are appended, a larger grow buffer is allocated upon first 

576 invocation. The traces data is then changed to be a view into the 

577 currently filled portion of the grow buffer array. 

578 ''' 

579 

580 assert self.ydata.dtype == data.dtype 

581 newlen = data.size + self.ydata.size 

582 if self._growbuffer is None or self._growbuffer.size < newlen: 

583 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

584 self._growbuffer[:self.ydata.size] = self.ydata 

585 self._growbuffer[self.ydata.size:newlen] = data 

586 self.ydata = self._growbuffer[:newlen] 

587 self.tmax = self.tmin + (newlen-1)*self.deltat 

588 

589 def chop( 

590 self, tmin, tmax, inplace=True, include_last=False, 

591 snap=(round, round), want_incomplete=True): 

592 

593 ''' 

594 Cut the trace to given time span. 

595 

596 If the ``inplace`` argument is True (the default) the trace is cut in 

597 place, otherwise a new trace with the cut part is returned. By 

598 default, the indices where to start and end the trace data array are 

599 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

600 using Python's :py:func:`round` function. This behaviour can be changed 

601 with the ``snap`` argument, which takes a tuple of two functions (one 

602 for the lower and one for the upper end) to be used instead of 

603 :py:func:`round`. The last sample is by default not included unless 

604 ``include_last`` is set to True. If the given time span exceeds the 

605 available time span of the trace, the available part is returned, 

606 unless ``want_incomplete`` is set to False - in that case, a 

607 :py:exc:`NoData` exception is raised. This exception is always raised, 

608 when the requested time span does dot overlap with the trace's time 

609 span. 

610 ''' 

611 

612 if want_incomplete: 

613 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

614 raise NoData() 

615 else: 

616 if tmin < self.tmin or self.tmax < tmax: 

617 raise NoData() 

618 

619 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

620 iplus = 0 

621 if include_last: 

622 iplus = 1 

623 

624 iend = min( 

625 self.data_len(), 

626 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

627 

628 if ibeg >= iend: 

629 raise NoData() 

630 

631 obj = self 

632 if not inplace: 

633 obj = self.copy(data=False) 

634 

635 self.drop_growbuffer() 

636 if self.ydata is not None: 

637 obj.ydata = self.ydata[ibeg:iend].copy() 

638 else: 

639 obj.ydata = None 

640 

641 obj.tmin = obj.tmin+ibeg*obj.deltat 

642 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

643 

644 obj._update_ids() 

645 

646 return obj 

647 

648 def downsample(self, ndecimate, snap=False, initials=None, demean=False, 

649 ftype='fir-remez'): 

650 ''' 

651 Downsample trace by a given integer factor. 

652 

653 Comparison of the available FIR filters `fir` and `fir-remez`: 

654 

655 

656 :param ndecimate: decimation factor, avoid values larger than 8 

657 :param snap: whether to put the new sampling instances closest to 

658 multiples of the sampling rate. 

659 :param initials: ``None``, ``True``, or initial conditions for the 

660 anti-aliasing filter, obtained from a previous run. In the latter 

661 two cases the final state of the filter is returned instead of 

662 ``None``. 

663 :param demean: whether to demean the signal before filtering. 

664 :param ftype: which FIR filter to use, choose from `fir, fir-remez`. 

665 Default is `fir-remez`. 

666 ''' 

667 newdeltat = self.deltat*ndecimate 

668 if snap: 

669 ilag = int(round( 

670 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

671 / self.deltat)) 

672 else: 

673 ilag = 0 

674 

675 if snap and ilag > 0 and ilag < self.ydata.size: 

676 data = self.ydata.astype(num.float64) 

677 self.tmin += ilag*self.deltat 

678 else: 

679 data = self.ydata.astype(num.float64) 

680 

681 if demean: 

682 data -= num.mean(data) 

683 

684 if data.size != 0: 

685 result = util.decimate( 

686 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag) 

687 else: 

688 result = data 

689 

690 if initials is None: 

691 self.ydata, finals = result, None 

692 else: 

693 self.ydata, finals = result 

694 

695 self.deltat = reuse(self.deltat*ndecimate) 

696 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

697 self._update_ids() 

698 

699 return finals 

700 

701 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

702 initials=None, demean=False, ftype='fir-remez'): 

703 

704 ''' 

705 Downsample to given sampling rate. 

706 

707 Tries to downsample the trace to a target sampling interval of 

708 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

709 times. If allow_upsample_max is set to a value larger than 1, 

710 intermediate upsampling steps are allowed, in order to increase the 

711 number of possible downsampling ratios. 

712 

713 If the requested ratio is not supported, an exception of type 

714 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

715 

716 :param deltat: desired deltat in [s] 

717 :param allow_upsample_max: maximum upsampling of the signal to archive 

718 the desired deltat. Default is `1`. 

719 :param snap: whether to put the new sampling instances closest to 

720 multiples of the sampling rate. 

721 :param initials: ``None``, ``True``, or initial conditions for the 

722 anti-aliasing filter, obtained from a previous run. In the latter 

723 two cases the final state of the filter is returned instead of 

724 ``None``. 

725 :param demean: whether to demean the signal before filtering. 

726 :param ftype: which FIR filter to use, choose from `fir, fir-remez`. 

727 Default is `fir-remez`. 

728 ''' 

729 

730 ratio = deltat/self.deltat 

731 rratio = round(ratio) 

732 

733 ok = False 

734 for upsratio in range(1, allow_upsample_max+1): 

735 dratio = (upsratio/self.deltat) / (1./deltat) 

736 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

737 util.decitab(int(round(dratio))): 

738 

739 ok = True 

740 break 

741 

742 if not ok: 

743 raise util.UnavailableDecimation('ratio = %g' % ratio) 

744 

745 if upsratio > 1: 

746 self.drop_growbuffer() 

747 ydata = self.ydata 

748 self.ydata = num.zeros( 

749 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

750 self.ydata[::upsratio] = ydata 

751 for i in range(1, upsratio): 

752 self.ydata[i::upsratio] = \ 

753 float(i)/upsratio * ydata[:-1] \ 

754 + float(upsratio-i)/upsratio * ydata[1:] 

755 self.deltat = self.deltat/upsratio 

756 

757 ratio = deltat/self.deltat 

758 rratio = round(ratio) 

759 

760 deci_seq = util.decitab(int(rratio)) 

761 finals = [] 

762 for i, ndecimate in enumerate(deci_seq): 

763 if ndecimate != 1: 

764 xinitials = None 

765 if initials is not None: 

766 xinitials = initials[i] 

767 finals.append(self.downsample( 

768 ndecimate, snap=snap, initials=xinitials, demean=demean, 

769 ftype=ftype)) 

770 

771 if initials is not None: 

772 return finals 

773 

774 def resample(self, deltat): 

775 ''' 

776 Resample to given sampling rate ``deltat``. 

777 

778 Resampling is performed in the frequency domain. 

779 ''' 

780 

781 ndata = self.ydata.size 

782 ntrans = nextpow2(ndata) 

783 fntrans2 = ntrans * self.deltat/deltat 

784 ntrans2 = int(round(fntrans2)) 

785 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

786 ndata2 = int(round(ndata*self.deltat/deltat2)) 

787 if abs(fntrans2 - ntrans2) > 1e-7: 

788 logger.warning( 

789 'resample: requested deltat %g could not be matched exactly: ' 

790 '%g' % (deltat, deltat2)) 

791 

792 data = self.ydata 

793 data_pad = num.zeros(ntrans, dtype=float) 

794 data_pad[:ndata] = data 

795 fdata = num.fft.rfft(data_pad) 

796 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

797 n = min(fdata.size, fdata2.size) 

798 fdata2[:n] = fdata[:n] 

799 data2 = num.fft.irfft(fdata2) 

800 data2 = data2[:ndata2] 

801 data2 *= float(ntrans2) / float(ntrans) 

802 self.deltat = deltat2 

803 self.set_ydata(data2) 

804 

805 def resample_simple(self, deltat): 

806 tyear = 3600*24*365. 

807 

808 if deltat == self.deltat: 

809 return 

810 

811 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

812 logger.warning( 

813 'resample_simple: less than one sample would have to be ' 

814 'inserted/deleted per year. Doing nothing.') 

815 return 

816 

817 ninterval = int(round(deltat / (self.deltat - deltat))) 

818 if abs(ninterval) < 20: 

819 logger.error( 

820 'resample_simple: sample insertion/deletion interval less ' 

821 'than 20. results would be erroneous.') 

822 raise ResamplingFailed() 

823 

824 delete = False 

825 if ninterval < 0: 

826 ninterval = - ninterval 

827 delete = True 

828 

829 tyearbegin = util.year_start(self.tmin) 

830 

831 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

832 

833 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

834 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

835 if nindices > 0: 

836 indices = ibegin + num.arange(nindices) * ninterval 

837 data_split = num.split(self.ydata, indices) 

838 data = [] 

839 for ln, h in zip(data_split[:-1], data_split[1:]): 

840 if delete: 

841 ln = ln[:-1] 

842 

843 data.append(ln) 

844 if not delete: 

845 if ln.size == 0: 

846 v = h[0] 

847 else: 

848 v = 0.5*(ln[-1] + h[0]) 

849 data.append(num.array([v], dtype=ln.dtype)) 

850 

851 data.append(data_split[-1]) 

852 

853 ydata_new = num.concatenate(data) 

854 

855 self.tmin = tyearbegin + nmin * deltat 

856 self.deltat = deltat 

857 self.set_ydata(ydata_new) 

858 

859 def stretch(self, tmin_new, tmax_new): 

860 ''' 

861 Stretch signal while preserving sample rate using sinc interpolation. 

862 

863 :param tmin_new: new time of first sample 

864 :param tmax_new: new time of last sample 

865 

866 This method can be used to correct for a small linear time drift or to 

867 introduce sub-sample time shifts. The amount of stretching is limited 

868 to 10% by the implementation and is expected to be much smaller than 

869 that by the approximations used. 

870 ''' 

871 

872 from pyrocko import signal_ext 

873 

874 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

875 t_control = num.array([tmin_new, tmax_new], dtype=float) 

876 

877 r = (tmax_new - tmin_new) / self.deltat + 1.0 

878 n_new = int(round(r)) 

879 if abs(n_new - r) > 0.001: 

880 n_new = int(math.floor(r)) 

881 

882 assert n_new >= 2 

883 

884 tmax_new = tmin_new + (n_new-1) * self.deltat 

885 

886 ydata_new = num.empty(n_new, dtype=float) 

887 signal_ext.antidrift(i_control, t_control, 

888 self.ydata.astype(float), 

889 tmin_new, self.deltat, ydata_new) 

890 

891 self.tmin = tmin_new 

892 self.set_ydata(ydata_new) 

893 self._update_ids() 

894 

895 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

896 raise_exception=False): 

897 

898 ''' 

899 Check if a given frequency is above the Nyquist frequency of the trace. 

900 

901 :param intro: string used to introduce the warning/error message 

902 :param warn: whether to emit a warning 

903 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

904 exception. 

905 ''' 

906 

907 if frequency >= 0.5/self.deltat: 

908 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

909 'frequency (%g Hz). (Trace %s)' \ 

910 % (intro, frequency, 0.5/self.deltat, self.name()) 

911 if warn: 

912 logger.warning(message) 

913 if raise_exception: 

914 raise AboveNyquist(message) 

915 

916 def lowpass(self, order, corner, nyquist_warn=True, 

917 nyquist_exception=False, demean=True): 

918 

919 ''' 

920 Apply Butterworth lowpass to the trace. 

921 

922 :param order: order of the filter 

923 :param corner: corner frequency of the filter 

924 

925 Mean is removed before filtering. 

926 ''' 

927 

928 self.nyquist_check( 

929 corner, 'Corner frequency of lowpass', nyquist_warn, 

930 nyquist_exception) 

931 

932 (b, a) = _get_cached_filter_coefs( 

933 order, [corner*2.0*self.deltat], btype='low') 

934 

935 if len(a) != order+1 or len(b) != order+1: 

936 logger.warning( 

937 'Erroneous filter coefficients returned by ' 

938 'scipy.signal.butter(). You may need to downsample the ' 

939 'signal before filtering.') 

940 

941 data = self.ydata.astype(num.float64) 

942 if demean: 

943 data -= num.mean(data) 

944 self.drop_growbuffer() 

945 self.ydata = signal.lfilter(b, a, data) 

946 

947 def highpass(self, order, corner, nyquist_warn=True, 

948 nyquist_exception=False, demean=True): 

949 

950 ''' 

951 Apply butterworth highpass to the trace. 

952 

953 :param order: order of the filter 

954 :param corner: corner frequency of the filter 

955 

956 Mean is removed before filtering. 

957 ''' 

958 

959 self.nyquist_check( 

960 corner, 'Corner frequency of highpass', nyquist_warn, 

961 nyquist_exception) 

962 

963 (b, a) = _get_cached_filter_coefs( 

964 order, [corner*2.0*self.deltat], btype='high') 

965 

966 data = self.ydata.astype(num.float64) 

967 if len(a) != order+1 or len(b) != order+1: 

968 logger.warning( 

969 'Erroneous filter coefficients returned by ' 

970 'scipy.signal.butter(). You may need to downsample the ' 

971 'signal before filtering.') 

972 if demean: 

973 data -= num.mean(data) 

974 self.drop_growbuffer() 

975 self.ydata = signal.lfilter(b, a, data) 

976 

977 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

978 ''' 

979 Apply butterworth bandpass to the trace. 

980 

981 :param order: order of the filter 

982 :param corner_hp: lower corner frequency of the filter 

983 :param corner_lp: upper corner frequency of the filter 

984 

985 Mean is removed before filtering. 

986 ''' 

987 

988 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

989 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

990 (b, a) = _get_cached_filter_coefs( 

991 order, 

992 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

993 btype='band') 

994 data = self.ydata.astype(num.float64) 

995 if demean: 

996 data -= num.mean(data) 

997 self.drop_growbuffer() 

998 self.ydata = signal.lfilter(b, a, data) 

999 

1000 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1001 ''' 

1002 Apply bandstop (attenuates frequencies in band) to the trace. 

1003 

1004 :param order: order of the filter 

1005 :param corner_hp: lower corner frequency of the filter 

1006 :param corner_lp: upper corner frequency of the filter 

1007 

1008 Mean is removed before filtering. 

1009 ''' 

1010 

1011 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1012 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1013 (b, a) = _get_cached_filter_coefs( 

1014 order, 

1015 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1016 btype='bandstop') 

1017 data = self.ydata.astype(num.float64) 

1018 if demean: 

1019 data -= num.mean(data) 

1020 self.drop_growbuffer() 

1021 self.ydata = signal.lfilter(b, a, data) 

1022 

1023 def envelope(self, inplace=True): 

1024 ''' 

1025 Calculate the envelope of the trace. 

1026 

1027 :param inplace: calculate envelope in place 

1028 

1029 The calculation follows: 

1030 

1031 .. math:: 

1032 

1033 Y' = \\sqrt{Y^2+H(Y)^2} 

1034 

1035 where H is the Hilbert-Transform of the signal Y. 

1036 ''' 

1037 

1038 y = self.ydata.astype(float) 

1039 env = num.abs(hilbert(y)) 

1040 if inplace: 

1041 self.drop_growbuffer() 

1042 self.ydata = env 

1043 else: 

1044 tr = self.copy(data=False) 

1045 tr.ydata = env 

1046 return tr 

1047 

1048 def taper(self, taperer, inplace=True, chop=False): 

1049 ''' 

1050 Apply a :py:class:`Taper` to the trace. 

1051 

1052 :param taperer: instance of :py:class:`Taper` subclass 

1053 :param inplace: apply taper inplace 

1054 :param chop: if ``True``: exclude tapered parts from the resulting 

1055 trace 

1056 ''' 

1057 

1058 if not inplace: 

1059 tr = self.copy() 

1060 else: 

1061 tr = self 

1062 

1063 if chop: 

1064 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1065 tr.shift(i*tr.deltat) 

1066 tr.set_ydata(tr.ydata[i:i+n]) 

1067 

1068 taperer(tr.ydata, tr.tmin, tr.deltat) 

1069 

1070 if not inplace: 

1071 return tr 

1072 

1073 def whiten(self, order=6): 

1074 ''' 

1075 Whiten signal in time domain using autoregression and recursive filter. 

1076 

1077 :param order: order of the autoregression process 

1078 ''' 

1079 

1080 b, a = self.whitening_coefficients(order) 

1081 self.drop_growbuffer() 

1082 self.ydata = signal.lfilter(b, a, self.ydata) 

1083 

1084 def whitening_coefficients(self, order=6): 

1085 ar = yulewalker(self.ydata, order) 

1086 b, a = [1.] + ar.tolist(), [1.] 

1087 return b, a 

1088 

1089 def ampspec_whiten( 

1090 self, 

1091 width, 

1092 td_taper='auto', 

1093 fd_taper='auto', 

1094 pad_to_pow2=True, 

1095 demean=True): 

1096 

1097 ''' 

1098 Whiten signal via frequency domain using moving average on amplitude 

1099 spectra. 

1100 

1101 :param width: width of smoothing kernel [Hz] 

1102 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1103 ``None`` or ``'auto'``. 

1104 :param fd_taper: frequency domain taper, object of type 

1105 :py:class:`Taper` or ``None`` or ``'auto'``. 

1106 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1107 of 2^n 

1108 :param demean: whether to demean the signal before tapering 

1109 

1110 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1111 the spectrum is calculated and inversely weighted with a smoothed 

1112 version of its amplitude spectrum. A moving average is used for the 

1113 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1114 Finally, the smoothed and tapered spectrum is back-transformed into the 

1115 time domain. 

1116 

1117 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1118 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1119 ''' 

1120 

1121 ndata = self.data_len() 

1122 

1123 if pad_to_pow2: 

1124 ntrans = nextpow2(ndata) 

1125 else: 

1126 ntrans = ndata 

1127 

1128 df = 1./(ntrans*self.deltat) 

1129 nw = int(round(width/df)) 

1130 if ndata//2+1 <= nw: 

1131 raise TraceTooShort( 

1132 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1133 

1134 if td_taper == 'auto': 

1135 td_taper = CosFader(1./width) 

1136 

1137 if fd_taper == 'auto': 

1138 fd_taper = CosFader(width) 

1139 

1140 if td_taper: 

1141 self.taper(td_taper) 

1142 

1143 ydata = self.get_ydata().astype(float) 

1144 if demean: 

1145 ydata -= ydata.mean() 

1146 

1147 spec = num.fft.rfft(ydata, ntrans) 

1148 

1149 amp = num.abs(spec) 

1150 nspec = amp.size 

1151 csamp = num.cumsum(amp) 

1152 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1153 n1, n2 = nw//2, nw//2 + nspec - nw 

1154 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1155 amp_smoothed[:n1] = amp_smoothed[n1] 

1156 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1157 

1158 denom = amp_smoothed * amp 

1159 numer = amp 

1160 eps = num.mean(denom) * 1e-9 

1161 if eps == 0.0: 

1162 eps = 1e-9 

1163 

1164 numer += eps 

1165 denom += eps 

1166 spec *= numer/denom 

1167 

1168 if fd_taper: 

1169 fd_taper(spec, 0., df) 

1170 

1171 ydata = num.fft.irfft(spec) 

1172 self.set_ydata(ydata[:ndata]) 

1173 

1174 def _get_cached_freqs(self, nf, deltaf): 

1175 ck = (nf, deltaf) 

1176 if ck not in Trace.cached_frequencies: 

1177 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1178 nf, dtype=float) 

1179 

1180 return Trace.cached_frequencies[ck] 

1181 

1182 def bandpass_fft(self, corner_hp, corner_lp): 

1183 ''' 

1184 Apply boxcar bandbpass to trace (in spectral domain). 

1185 ''' 

1186 

1187 n = len(self.ydata) 

1188 n2 = nextpow2(n) 

1189 data = num.zeros(n2, dtype=num.float64) 

1190 data[:n] = self.ydata 

1191 fdata = num.fft.rfft(data) 

1192 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1193 fdata[0] = 0.0 

1194 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1195 data = num.fft.irfft(fdata) 

1196 self.drop_growbuffer() 

1197 self.ydata = data[:n] 

1198 

1199 def shift(self, tshift): 

1200 ''' 

1201 Time shift the trace. 

1202 ''' 

1203 

1204 self.tmin += tshift 

1205 self.tmax += tshift 

1206 self._update_ids() 

1207 

1208 def snap(self, inplace=True, interpolate=False): 

1209 ''' 

1210 Shift trace samples to nearest even multiples of the sampling rate. 

1211 

1212 :param inplace: (boolean) snap traces inplace 

1213 

1214 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1215 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1216 :py:func:`snap` returns the unsnapped instance of the original trace. 

1217 ''' 

1218 

1219 tmin = round(self.tmin/self.deltat)*self.deltat 

1220 tmax = tmin + (self.ydata.size-1)*self.deltat 

1221 

1222 if inplace: 

1223 xself = self 

1224 else: 

1225 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1226 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1227 return self 

1228 

1229 xself = self.copy() 

1230 

1231 if interpolate: 

1232 from pyrocko import signal_ext 

1233 n = xself.data_len() 

1234 ydata_new = num.empty(n, dtype=float) 

1235 i_control = num.array([0, n-1], dtype=num.int64) 

1236 tref = tmin 

1237 t_control = num.array( 

1238 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1239 dtype=float) 

1240 

1241 signal_ext.antidrift(i_control, t_control, 

1242 xself.ydata.astype(float), 

1243 float(tmin-tref), xself.deltat, ydata_new) 

1244 

1245 xself.ydata = ydata_new 

1246 

1247 xself.tmin = tmin 

1248 xself.tmax = tmax 

1249 xself._update_ids() 

1250 

1251 return xself 

1252 

1253 def fix_deltat_rounding_errors(self): 

1254 ''' 

1255 Try to undo sampling rate rounding errors. 

1256 

1257 See :py:func:`fix_deltat_rounding_errors`. 

1258 ''' 

1259 

1260 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1261 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1262 

1263 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1264 ''' 

1265 Run special STA/LTA filter where the short time window is centered on 

1266 the long time window. 

1267 

1268 :param tshort: length of short time window in [s] 

1269 :param tlong: length of long time window in [s] 

1270 :param quad: whether to square the data prior to applying the STA/LTA 

1271 filter 

1272 :param scalingmethod: integer key to select how output values are 

1273 scaled / normalized (``1``, ``2``, or ``3``) 

1274 

1275 ============= ====================================== =========== 

1276 Scalingmethod Implementation Range 

1277 ============= ====================================== =========== 

1278 ``1`` As/Al* Ts/Tl [0,1] 

1279 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1280 ``3`` Like ``2`` but clipping range at zero [0,1] 

1281 ============= ====================================== =========== 

1282 

1283 ''' 

1284 

1285 nshort = int(round(tshort/self.deltat)) 

1286 nlong = int(round(tlong/self.deltat)) 

1287 

1288 assert nshort < nlong 

1289 if nlong > len(self.ydata): 

1290 raise TraceTooShort( 

1291 'Samples in trace: %s, samples needed: %s' 

1292 % (len(self.ydata), nlong)) 

1293 

1294 if quad: 

1295 sqrdata = self.ydata**2 

1296 else: 

1297 sqrdata = self.ydata 

1298 

1299 mavg_short = moving_avg(sqrdata, nshort) 

1300 mavg_long = moving_avg(sqrdata, nlong) 

1301 

1302 self.drop_growbuffer() 

1303 

1304 if scalingmethod not in (1, 2, 3): 

1305 raise Exception('Invalid argument to scalingrange argument.') 

1306 

1307 if scalingmethod == 1: 

1308 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1309 elif scalingmethod in (2, 3): 

1310 self.ydata = (mavg_short/mavg_long - 1.) \ 

1311 / ((float(nlong)/float(nshort)) - 1) 

1312 

1313 if scalingmethod == 3: 

1314 self.ydata = num.maximum(self.ydata, 0.) 

1315 

1316 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1317 ''' 

1318 Run special STA/LTA filter where the short time window is overlapping 

1319 with the last part of the long time window. 

1320 

1321 :param tshort: length of short time window in [s] 

1322 :param tlong: length of long time window in [s] 

1323 :param quad: whether to square the data prior to applying the STA/LTA 

1324 filter 

1325 :param scalingmethod: integer key to select how output values are 

1326 scaled / normalized (``1``, ``2``, or ``3``) 

1327 

1328 ============= ====================================== =========== 

1329 Scalingmethod Implementation Range 

1330 ============= ====================================== =========== 

1331 ``1`` As/Al* Ts/Tl [0,1] 

1332 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1333 ``3`` Like ``2`` but clipping range at zero [0,1] 

1334 ============= ====================================== =========== 

1335 

1336 With ``scalingmethod=1``, the values produced by this variant of the 

1337 STA/LTA are equivalent to 

1338 

1339 .. math:: 

1340 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1341 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1342 

1343 where :math:`f_j` are the input samples, :math:`s` are the number of 

1344 samples in the short time window and :math:`l` are the number of 

1345 samples in the long time window. 

1346 ''' 

1347 

1348 n = self.data_len() 

1349 tmin = self.tmin 

1350 

1351 nshort = max(1, int(round(tshort/self.deltat))) 

1352 nlong = max(1, int(round(tlong/self.deltat))) 

1353 

1354 assert nshort < nlong 

1355 

1356 if nlong > len(self.ydata): 

1357 raise TraceTooShort( 

1358 'Samples in trace: %s, samples needed: %s' 

1359 % (len(self.ydata), nlong)) 

1360 

1361 if quad: 

1362 sqrdata = self.ydata**2 

1363 else: 

1364 sqrdata = self.ydata 

1365 

1366 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1367 if nlong % 2 != 0 and nshort % 2 == 0: 

1368 nshift += 1 

1369 

1370 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1371 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1372 

1373 self.drop_growbuffer() 

1374 

1375 if scalingmethod not in (1, 2, 3): 

1376 raise Exception('Invalid argument to scalingrange argument.') 

1377 

1378 if scalingmethod == 1: 

1379 ydata = mavg_short/mavg_long * nshort/nlong 

1380 elif scalingmethod in (2, 3): 

1381 ydata = (mavg_short/mavg_long - 1.) \ 

1382 / ((float(nlong)/float(nshort)) - 1) 

1383 

1384 if scalingmethod == 3: 

1385 ydata = num.maximum(self.ydata, 0.) 

1386 

1387 self.set_ydata(ydata) 

1388 

1389 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1390 

1391 self.chop( 

1392 tmin + (nlong - nshort) * self.deltat, 

1393 tmin + (n - nshort) * self.deltat) 

1394 

1395 def peaks(self, threshold, tsearch, 

1396 deadtime=False, 

1397 nblock_duration_detection=100): 

1398 

1399 ''' 

1400 Detect peaks above a given threshold (method 1). 

1401 

1402 From every instant, where the signal rises above ``threshold``, a time 

1403 length of ``tsearch`` seconds is searched for a maximum. A list with 

1404 tuples (time, value) for each detected peak is returned. The 

1405 ``deadtime`` argument turns on a special deadtime duration detection 

1406 algorithm useful in combination with recursive STA/LTA filters. 

1407 ''' 

1408 

1409 y = self.ydata 

1410 above = num.where(y > threshold, 1, 0) 

1411 deriv = num.zeros(y.size, dtype=num.int8) 

1412 deriv[1:] = above[1:]-above[:-1] 

1413 itrig_positions = num.nonzero(deriv > 0)[0] 

1414 tpeaks = [] 

1415 apeaks = [] 

1416 tzeros = [] 

1417 tzero = self.tmin 

1418 

1419 for itrig_pos in itrig_positions: 

1420 ibeg = itrig_pos 

1421 iend = min( 

1422 len(self.ydata), 

1423 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1424 ipeak = num.argmax(y[ibeg:iend]) 

1425 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1426 apeak = y[ibeg+ipeak] 

1427 

1428 if tpeak < tzero: 

1429 continue 

1430 

1431 if deadtime: 

1432 ibeg = itrig_pos 

1433 iblock = 0 

1434 nblock = nblock_duration_detection 

1435 totalsum = 0. 

1436 while True: 

1437 if ibeg+iblock*nblock >= len(y): 

1438 tzero = self.tmin + (len(y)-1) * self.deltat 

1439 break 

1440 

1441 logy = num.log( 

1442 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1443 logy[0] += totalsum 

1444 ysum = num.cumsum(logy) 

1445 totalsum = ysum[-1] 

1446 below = num.where(ysum <= 0., 1, 0) 

1447 deriv = num.zeros(ysum.size, dtype=num.int8) 

1448 deriv[1:] = below[1:]-below[:-1] 

1449 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1450 if len(izero_positions) > 0: 

1451 tzero = self.tmin + self.deltat * ( 

1452 ibeg + izero_positions[0]) 

1453 break 

1454 iblock += 1 

1455 else: 

1456 tzero = ibeg*self.deltat + self.tmin + tsearch 

1457 

1458 tpeaks.append(tpeak) 

1459 apeaks.append(apeak) 

1460 tzeros.append(tzero) 

1461 

1462 if deadtime: 

1463 return tpeaks, apeaks, tzeros 

1464 else: 

1465 return tpeaks, apeaks 

1466 

1467 def peaks2(self, threshold, tsearch): 

1468 

1469 ''' 

1470 Detect peaks above a given threshold (method 2). 

1471 

1472 This variant of peak detection is a bit more robust (and slower) than 

1473 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1474 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1475 iteratively the one with the maximum amplitude ``a[j]`` and time 

1476 ``t[j]`` is choosen and potential peaks within 

1477 ``t[j] - tsearch, t[j] + tsearch`` 

1478 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1479 no more potential peaks are left. 

1480 ''' 

1481 

1482 a = num.copy(self.ydata) 

1483 

1484 amin = num.min(a) 

1485 

1486 a[0] = amin 

1487 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1488 a[-1] = amin 

1489 

1490 data = [] 

1491 while True: 

1492 imax = num.argmax(a) 

1493 amax = a[imax] 

1494 

1495 if amax < threshold or amax == amin: 

1496 break 

1497 

1498 data.append((self.tmin + imax * self.deltat, amax)) 

1499 

1500 ntsearch = int(round(tsearch / self.deltat)) 

1501 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1502 

1503 if data: 

1504 data.sort() 

1505 tpeaks, apeaks = list(zip(*data)) 

1506 else: 

1507 tpeaks, apeaks = [], [] 

1508 

1509 return tpeaks, apeaks 

1510 

1511 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1512 ''' 

1513 Extend trace to given span. 

1514 

1515 :param tmin: begin time of new span 

1516 :param tmax: end time of new span 

1517 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1518 ``'median'`` 

1519 ''' 

1520 

1521 nold = self.ydata.size 

1522 

1523 if tmin is not None: 

1524 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1525 else: 

1526 nl = 0 

1527 

1528 if tmax is not None: 

1529 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1530 else: 

1531 nh = nold - 1 

1532 

1533 n = nh - nl + 1 

1534 data = num.zeros(n, dtype=self.ydata.dtype) 

1535 data[-nl:-nl + nold] = self.ydata 

1536 if self.ydata.size >= 1: 

1537 if fillmethod == 'repeat': 

1538 data[:-nl] = self.ydata[0] 

1539 data[-nl + nold:] = self.ydata[-1] 

1540 elif fillmethod == 'median': 

1541 v = num.median(self.ydata) 

1542 data[:-nl] = v 

1543 data[-nl + nold:] = v 

1544 elif fillmethod == 'mean': 

1545 v = num.mean(self.ydata) 

1546 data[:-nl] = v 

1547 data[-nl + nold:] = v 

1548 

1549 self.drop_growbuffer() 

1550 self.ydata = data 

1551 

1552 self.tmin += nl * self.deltat 

1553 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1554 

1555 self._update_ids() 

1556 

1557 def transfer(self, 

1558 tfade=0., 

1559 freqlimits=None, 

1560 transfer_function=None, 

1561 cut_off_fading=True, 

1562 demean=True, 

1563 invert=False): 

1564 

1565 ''' 

1566 Return new trace with transfer function applied (convolution). 

1567 

1568 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1569 at both ends of trace. 

1570 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1571 :param transfer_function: FrequencyResponse object; must provide a 

1572 method 'evaluate(freqs)', which returns the transfer function 

1573 coefficients at the frequencies 'freqs'. 

1574 :param cut_off_fading: whether to cut off rise/fall interval in output 

1575 trace. 

1576 :param demean: remove mean before applying transfer function 

1577 :param invert: set to True to do a deconvolution 

1578 ''' 

1579 

1580 if transfer_function is None: 

1581 transfer_function = FrequencyResponse() 

1582 

1583 if self.tmax - self.tmin <= tfade*2.: 

1584 raise TraceTooShort( 

1585 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1586 'trace length = %g, fading length = %g' 

1587 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1588 

1589 if freqlimits is None and ( 

1590 transfer_function is None or transfer_function.is_scalar()): 

1591 

1592 # special case for flat responses 

1593 

1594 output = self.copy() 

1595 data = self.ydata 

1596 ndata = data.size 

1597 

1598 if transfer_function is not None: 

1599 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1600 

1601 if invert: 

1602 c = 1.0/c 

1603 

1604 data *= c 

1605 

1606 if tfade != 0.0: 

1607 data *= costaper( 

1608 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1609 ndata, self.deltat) 

1610 

1611 output.ydata = data 

1612 

1613 else: 

1614 ndata = self.ydata.size 

1615 ntrans = nextpow2(ndata*1.2) 

1616 coefs = self._get_tapered_coefs( 

1617 ntrans, freqlimits, transfer_function, invert=invert) 

1618 

1619 data = self.ydata 

1620 

1621 data_pad = num.zeros(ntrans, dtype=float) 

1622 data_pad[:ndata] = data 

1623 if demean: 

1624 data_pad[:ndata] -= data.mean() 

1625 

1626 if tfade != 0.0: 

1627 data_pad[:ndata] *= costaper( 

1628 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1629 ndata, self.deltat) 

1630 

1631 fdata = num.fft.rfft(data_pad) 

1632 fdata *= coefs 

1633 ddata = num.fft.irfft(fdata) 

1634 output = self.copy() 

1635 output.ydata = ddata[:ndata] 

1636 

1637 if cut_off_fading and tfade != 0.0: 

1638 try: 

1639 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1640 except NoData: 

1641 raise TraceTooShort( 

1642 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1643 'trace length = %g, fading length = %g' 

1644 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1645 else: 

1646 output.ydata = output.ydata.copy() 

1647 

1648 return output 

1649 

1650 def differentiate(self, n=1, order=4, inplace=True): 

1651 ''' 

1652 Approximate first or second derivative of the trace. 

1653 

1654 :param n: 1 for first derivative, 2 for second 

1655 :param order: order of the approximation 2 and 4 are supported 

1656 :param inplace: if ``True`` the trace is differentiated in place, 

1657 otherwise a new trace object with the derivative is returned. 

1658 

1659 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1660 

1661 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1662 ''' 

1663 

1664 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1665 

1666 if inplace: 

1667 self.ydata = ddata 

1668 else: 

1669 output = self.copy(data=False) 

1670 output.set_ydata(ddata) 

1671 return output 

1672 

1673 def drop_chain_cache(self): 

1674 if self._pchain: 

1675 self._pchain.clear() 

1676 

1677 def init_chain(self): 

1678 self._pchain = pchain.Chain( 

1679 do_downsample, 

1680 do_extend, 

1681 do_pre_taper, 

1682 do_fft, 

1683 do_filter, 

1684 do_ifft) 

1685 

1686 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1687 if setup.domain == 'frequency_domain': 

1688 _, _, data = self._pchain( 

1689 (self, deltat), 

1690 (tmin, tmax), 

1691 (setup.taper,), 

1692 (setup.filter,), 

1693 (setup.filter,), 

1694 nocache=nocache) 

1695 

1696 return num.abs(data), num.abs(data) 

1697 

1698 else: 

1699 processed = self._pchain( 

1700 (self, deltat), 

1701 (tmin, tmax), 

1702 (setup.taper,), 

1703 (setup.filter,), 

1704 (setup.filter,), 

1705 (), 

1706 nocache=nocache) 

1707 

1708 if setup.domain == 'time_domain': 

1709 data = processed.get_ydata() 

1710 

1711 elif setup.domain == 'envelope': 

1712 processed = processed.envelope(inplace=False) 

1713 

1714 elif setup.domain == 'absolute': 

1715 processed.set_ydata(num.abs(processed.get_ydata())) 

1716 

1717 return processed.get_ydata(), processed 

1718 

1719 def misfit(self, candidate, setup, nocache=False, debug=False): 

1720 ''' 

1721 Calculate misfit and normalization factor against candidate trace. 

1722 

1723 :param candidate: :py:class:`Trace` object 

1724 :param setup: :py:class:`MisfitSetup` object 

1725 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1726 normalization divisor 

1727 

1728 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1729 with the higher sampling rate will be downsampled. 

1730 ''' 

1731 

1732 a = self 

1733 b = candidate 

1734 

1735 for tr in (a, b): 

1736 if not tr._pchain: 

1737 tr.init_chain() 

1738 

1739 deltat = max(a.deltat, b.deltat) 

1740 tmin = min(a.tmin, b.tmin) - deltat 

1741 tmax = max(a.tmax, b.tmax) + deltat 

1742 

1743 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1744 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1745 

1746 if setup.domain != 'cc_max_norm': 

1747 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1748 else: 

1749 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1750 ccmax = ctr.max()[1] 

1751 m = 0.5 - 0.5 * ccmax 

1752 n = 0.5 

1753 

1754 if debug: 

1755 return m, n, aproc, bproc 

1756 else: 

1757 return m, n 

1758 

1759 def spectrum(self, pad_to_pow2=False, tfade=None): 

1760 ''' 

1761 Get FFT spectrum of trace. 

1762 

1763 :param pad_to_pow2: whether to zero-pad the data to next larger 

1764 power-of-two length 

1765 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1766 shaped tapers to both 

1767 

1768 :returns: a tuple with (frequencies, values) 

1769 ''' 

1770 

1771 ndata = self.ydata.size 

1772 

1773 if pad_to_pow2: 

1774 ntrans = nextpow2(ndata) 

1775 else: 

1776 ntrans = ndata 

1777 

1778 if tfade is None: 

1779 ydata = self.ydata 

1780 else: 

1781 ydata = self.ydata * costaper( 

1782 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1783 ndata, self.deltat) 

1784 

1785 fydata = num.fft.rfft(ydata, ntrans) 

1786 df = 1./(ntrans*self.deltat) 

1787 fxdata = num.arange(len(fydata))*df 

1788 return fxdata, fydata 

1789 

1790 def multi_filter(self, filter_freqs, bandwidth): 

1791 

1792 class Gauss(FrequencyResponse): 

1793 f0 = Float.T() 

1794 a = Float.T(default=1.0) 

1795 

1796 def __init__(self, f0, a=1.0, **kwargs): 

1797 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1798 

1799 def evaluate(self, freqs): 

1800 omega0 = 2.*math.pi*self.f0 

1801 omega = 2.*math.pi*freqs 

1802 return num.exp(-((omega-omega0) 

1803 / (self.a*omega0))**2) 

1804 

1805 freqs, coefs = self.spectrum() 

1806 n = self.data_len() 

1807 nfilt = len(filter_freqs) 

1808 signal_tf = num.zeros((nfilt, n)) 

1809 centroid_freqs = num.zeros(nfilt) 

1810 for ifilt, f0 in enumerate(filter_freqs): 

1811 taper = Gauss(f0, a=bandwidth) 

1812 weights = taper.evaluate(freqs) 

1813 nhalf = freqs.size 

1814 analytic_spec = num.zeros(n, dtype=complex) 

1815 analytic_spec[:nhalf] = coefs*weights 

1816 

1817 enorm = num.abs(analytic_spec[:nhalf])**2 

1818 enorm /= num.sum(enorm) 

1819 

1820 if n % 2 == 0: 

1821 analytic_spec[1:nhalf-1] *= 2. 

1822 else: 

1823 analytic_spec[1:nhalf] *= 2. 

1824 

1825 analytic = num.fft.ifft(analytic_spec) 

1826 signal_tf[ifilt, :] = num.abs(analytic) 

1827 

1828 enorm = num.abs(analytic_spec[:nhalf])**2 

1829 enorm /= num.sum(enorm) 

1830 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1831 

1832 return centroid_freqs, signal_tf 

1833 

1834 def _get_tapered_coefs( 

1835 self, ntrans, freqlimits, transfer_function, invert=False): 

1836 

1837 deltaf = 1./(self.deltat*ntrans) 

1838 nfreqs = ntrans//2 + 1 

1839 transfer = num.ones(nfreqs, dtype=complex) 

1840 hi = snapper(nfreqs, deltaf) 

1841 if freqlimits is not None: 

1842 a, b, c, d = freqlimits 

1843 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1844 + hi(a)*deltaf 

1845 

1846 if invert: 

1847 coeffs = transfer_function.evaluate(freqs) 

1848 if num.any(coeffs == 0.0): 

1849 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1850 

1851 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1852 else: 

1853 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1854 

1855 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1856 else: 

1857 if invert: 

1858 raise Exception( 

1859 'transfer: `freqlimits` must be given when `invert` is ' 

1860 'set to `True`') 

1861 

1862 freqs = num.arange(nfreqs) * deltaf 

1863 tapered_transfer = transfer_function.evaluate(freqs) 

1864 

1865 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1866 return tapered_transfer 

1867 

1868 def fill_template(self, template, **additional): 

1869 ''' 

1870 Fill string template with trace metadata. 

1871 

1872 Uses normal python '%(placeholder)s' string templates. The following 

1873 placeholders are considered: ``network``, ``station``, ``location``, 

1874 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1875 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1876 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1877 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1878 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1879 microseconds, those with ``'_year'`` contain only the year. 

1880 ''' 

1881 

1882 template = template.replace('%n', '%(network)s')\ 

1883 .replace('%s', '%(station)s')\ 

1884 .replace('%l', '%(location)s')\ 

1885 .replace('%c', '%(channel)s')\ 

1886 .replace('%b', '%(tmin)s')\ 

1887 .replace('%e', '%(tmax)s')\ 

1888 .replace('%j', '%(julianday)s') 

1889 

1890 params = dict( 

1891 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1892 params['tmin'] = util.time_to_str( 

1893 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1894 params['tmax'] = util.time_to_str( 

1895 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1896 params['tmin_ms'] = util.time_to_str( 

1897 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1898 params['tmax_ms'] = util.time_to_str( 

1899 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1900 params['tmin_us'] = util.time_to_str( 

1901 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1902 params['tmax_us'] = util.time_to_str( 

1903 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1904 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1905 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1906 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1907 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1908 params['julianday'] = util.julian_day_of_year(self.tmin) 

1909 params.update(additional) 

1910 return template % params 

1911 

1912 def plot(self): 

1913 ''' 

1914 Show trace with matplotlib. 

1915 

1916 See also: :py:meth:`Trace.snuffle`. 

1917 ''' 

1918 

1919 import pylab 

1920 pylab.plot(self.get_xdata(), self.get_ydata()) 

1921 name = '%s %s %s - %s' % ( 

1922 self.channel, 

1923 self.station, 

1924 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1925 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1926 

1927 pylab.title(name) 

1928 pylab.show() 

1929 

1930 def snuffle(self, **kwargs): 

1931 ''' 

1932 Show trace in a snuffler window. 

1933 

1934 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1935 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1936 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1937 :param ntracks: float, number of tracks to be shown initially (default: 

1938 12) 

1939 :param follow: time interval (in seconds) for real time follow mode or 

1940 ``None`` 

1941 :param controls: bool, whether to show the main controls (default: 

1942 ``True``) 

1943 :param opengl: bool, whether to use opengl (default: ``False``) 

1944 ''' 

1945 

1946 return snuffle([self], **kwargs) 

1947 

1948 

1949def snuffle(traces, **kwargs): 

1950 ''' 

1951 Show traces in a snuffler window. 

1952 

1953 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1954 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1955 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1956 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1957 :param follow: time interval (in seconds) for real time follow mode or 

1958 ``None`` 

1959 :param controls: bool, whether to show the main controls (default: 

1960 ``True``) 

1961 :param opengl: bool, whether to use opengl (default: ``False``) 

1962 ''' 

1963 

1964 from pyrocko import pile 

1965 from pyrocko.gui import snuffler 

1966 p = pile.Pile() 

1967 if traces: 

1968 trf = pile.MemTracesFile(None, traces) 

1969 p.add_file(trf) 

1970 return snuffler.snuffle(p, **kwargs) 

1971 

1972 

1973class InfiniteResponse(Exception): 

1974 ''' 

1975 This exception is raised by :py:class:`Trace` operations when deconvolution 

1976 of a frequency response (instrument response transfer function) would 

1977 result in a division by zero. 

1978 ''' 

1979 

1980 

1981class MisalignedTraces(Exception): 

1982 ''' 

1983 This exception is raised by some :py:class:`Trace` operations when tmin, 

1984 tmax or number of samples do not match. 

1985 ''' 

1986 

1987 pass 

1988 

1989 

1990class NoData(Exception): 

1991 ''' 

1992 This exception is raised by some :py:class:`Trace` operations when no or 

1993 not enough data is available. 

1994 ''' 

1995 

1996 pass 

1997 

1998 

1999class AboveNyquist(Exception): 

2000 ''' 

2001 This exception is raised by some :py:class:`Trace` operations when given 

2002 frequencies are above the Nyquist frequency. 

2003 ''' 

2004 

2005 pass 

2006 

2007 

2008class TraceTooShort(Exception): 

2009 ''' 

2010 This exception is raised by some :py:class:`Trace` operations when the 

2011 trace is too short. 

2012 ''' 

2013 

2014 pass 

2015 

2016 

2017class ResamplingFailed(Exception): 

2018 pass 

2019 

2020 

2021def minmax(traces, key=None, mode='minmax'): 

2022 

2023 ''' 

2024 Get data range given traces grouped by selected pattern. 

2025 

2026 :param key: a callable which takes as single argument a trace and returns a 

2027 key for the grouping of the results. If this is ``None``, the default, 

2028 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2029 used. 

2030 :param mode: 'minmax' or floating point number. If this is 'minmax', 

2031 minimum and maximum of the traces are used, if it is a number, mean +- 

2032 standard deviation times ``mode`` is used. 

2033 

2034 :returns: a dict with the combined data ranges. 

2035 

2036 Examples:: 

2037 

2038 ranges = minmax(traces, lambda tr: tr.channel) 

2039 print ranges['N'] # print min & max of all traces with channel == 'N' 

2040 print ranges['E'] # print min & max of all traces with channel == 'E' 

2041 

2042 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2043 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2044 # network == 'GR' and station == 'HAM3' 

2045 

2046 ranges = minmax(traces, lambda tr: None) 

2047 print ranges[None] # prints min & max of all traces 

2048 ''' 

2049 

2050 if key is None: 

2051 key = _default_key 

2052 

2053 ranges = {} 

2054 for trace in traces: 

2055 if isinstance(mode, str) and mode == 'minmax': 

2056 mi, ma = trace.ydata.min(), trace.ydata.max() 

2057 else: 

2058 mean = trace.ydata.mean() 

2059 std = trace.ydata.std() 

2060 mi, ma = mean-std*mode, mean+std*mode 

2061 

2062 k = key(trace) 

2063 if k not in ranges: 

2064 ranges[k] = mi, ma 

2065 else: 

2066 tmi, tma = ranges[k] 

2067 ranges[k] = min(tmi, mi), max(tma, ma) 

2068 

2069 return ranges 

2070 

2071 

2072def minmaxtime(traces, key=None): 

2073 

2074 ''' 

2075 Get time range given traces grouped by selected pattern. 

2076 

2077 :param key: a callable which takes as single argument a trace and returns a 

2078 key for the grouping of the results. If this is ``None``, the default, 

2079 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2080 used. 

2081 

2082 :returns: a dict with the combined data ranges. 

2083 ''' 

2084 

2085 if key is None: 

2086 key = _default_key 

2087 

2088 ranges = {} 

2089 for trace in traces: 

2090 mi, ma = trace.tmin, trace.tmax 

2091 k = key(trace) 

2092 if k not in ranges: 

2093 ranges[k] = mi, ma 

2094 else: 

2095 tmi, tma = ranges[k] 

2096 ranges[k] = min(tmi, mi), max(tma, ma) 

2097 

2098 return ranges 

2099 

2100 

2101def degapper( 

2102 traces, 

2103 maxgap=5, 

2104 fillmethod='interpolate', 

2105 deoverlap='use_second', 

2106 maxlap=None): 

2107 

2108 ''' 

2109 Try to connect traces and remove gaps. 

2110 

2111 This method will combine adjacent traces, which match in their network, 

2112 station, location and channel attributes. Overlapping parts are handled 

2113 according to the ``deoverlap`` argument. 

2114 

2115 :param traces: input traces, must be sorted by their full_id attribute. 

2116 :param maxgap: maximum number of samples to interpolate. 

2117 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2118 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2119 second trace (default), 'use_first' to use data from first trace, 

2120 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2121 values. 

2122 :param maxlap: maximum number of samples of overlap which are removed 

2123 

2124 :returns: list of traces 

2125 ''' 

2126 

2127 in_traces = traces 

2128 out_traces = [] 

2129 if not in_traces: 

2130 return out_traces 

2131 out_traces.append(in_traces.pop(0)) 

2132 while in_traces: 

2133 

2134 a = out_traces[-1] 

2135 b = in_traces.pop(0) 

2136 

2137 avirt, bvirt = a.ydata is None, b.ydata is None 

2138 assert avirt == bvirt, \ 

2139 'traces given to degapper() must either all have data or have ' \ 

2140 'no data.' 

2141 

2142 virtual = avirt and bvirt 

2143 

2144 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2145 and a.data_len() >= 1 and b.data_len() >= 1 

2146 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2147 

2148 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2149 idist = int(round(dist)) 

2150 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2151 # logger.warning('Cannot degap traces with displaced sampling ' 

2152 # '(%s, %s, %s, %s)' % a.nslc_id) 

2153 pass 

2154 else: 

2155 if 1 < idist <= maxgap: 

2156 if not virtual: 

2157 if fillmethod == 'interpolate': 

2158 filler = a.ydata[-1] + ( 

2159 ((1.0 + num.arange(idist-1, dtype=float)) 

2160 / idist) * (b.ydata[0]-a.ydata[-1]) 

2161 ).astype(a.ydata.dtype) 

2162 elif fillmethod == 'zeros': 

2163 filler = num.zeros(idist-1, dtype=a.ydist.dtype) 

2164 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2165 a.tmax = b.tmax 

2166 if a.mtime and b.mtime: 

2167 a.mtime = max(a.mtime, b.mtime) 

2168 continue 

2169 

2170 elif idist == 1: 

2171 if not virtual: 

2172 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2173 a.tmax = b.tmax 

2174 if a.mtime and b.mtime: 

2175 a.mtime = max(a.mtime, b.mtime) 

2176 continue 

2177 

2178 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2179 if b.tmax > a.tmax: 

2180 if not virtual: 

2181 na = a.ydata.size 

2182 n = -idist+1 

2183 if deoverlap == 'use_second': 

2184 a.ydata = num.concatenate( 

2185 (a.ydata[:-n], b.ydata)) 

2186 elif deoverlap in ('use_first', 'crossfade_cos'): 

2187 a.ydata = num.concatenate( 

2188 (a.ydata, b.ydata[n:])) 

2189 elif deoverlap == 'add': 

2190 a.ydata[-n:] += b.ydata[:n] 

2191 a.ydata = num.concatenate( 

2192 (a.ydata, b.ydata[n:])) 

2193 else: 

2194 assert False, 'unknown deoverlap method' 

2195 

2196 if deoverlap == 'crossfade_cos': 

2197 n = -idist+1 

2198 taper = 0.5-0.5*num.cos( 

2199 (1.+num.arange(n))/(1.+n)*num.pi) 

2200 a.ydata[na-n:na] *= 1.-taper 

2201 a.ydata[na-n:na] += b.ydata[:n] * taper 

2202 

2203 a.tmax = b.tmax 

2204 if a.mtime and b.mtime: 

2205 a.mtime = max(a.mtime, b.mtime) 

2206 continue 

2207 else: 

2208 # make short second trace vanish 

2209 continue 

2210 

2211 if b.data_len() >= 1: 

2212 out_traces.append(b) 

2213 

2214 for tr in out_traces: 

2215 tr._update_ids() 

2216 

2217 return out_traces 

2218 

2219 

2220def rotate(traces, azimuth, in_channels, out_channels): 

2221 ''' 

2222 2D rotation of traces. 

2223 

2224 :param traces: list of input traces 

2225 :param azimuth: difference of the azimuths of the component directions 

2226 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2227 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2228 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2229 :returns: list of rotated traces 

2230 ''' 

2231 

2232 phi = azimuth/180.*math.pi 

2233 cphi = math.cos(phi) 

2234 sphi = math.sin(phi) 

2235 rotated = [] 

2236 in_channels = tuple(_channels_to_names(in_channels)) 

2237 out_channels = tuple(_channels_to_names(out_channels)) 

2238 for a in traces: 

2239 for b in traces: 

2240 if ((a.channel, b.channel) == in_channels and 

2241 a.nslc_id[:3] == b.nslc_id[:3] and 

2242 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2243 tmin = max(a.tmin, b.tmin) 

2244 tmax = min(a.tmax, b.tmax) 

2245 

2246 if tmin < tmax: 

2247 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2248 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2249 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2250 logger.warning( 

2251 'Cannot rotate traces with displaced sampling ' 

2252 '(%s, %s, %s, %s)' % a.nslc_id) 

2253 continue 

2254 

2255 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2256 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2257 ac.set_ydata(acydata) 

2258 bc.set_ydata(bcydata) 

2259 

2260 ac.set_codes(channel=out_channels[0]) 

2261 bc.set_codes(channel=out_channels[1]) 

2262 rotated.append(ac) 

2263 rotated.append(bc) 

2264 

2265 return rotated 

2266 

2267 

2268def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2269 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2270 in_channels = n.channel, e.channel 

2271 out = rotate( 

2272 [n, e], azimuth, 

2273 in_channels=in_channels, 

2274 out_channels=out_channels) 

2275 

2276 assert len(out) == 2 

2277 for tr in out: 

2278 if tr.channel == 'R': 

2279 r = tr 

2280 elif tr.channel == 'T': 

2281 t = tr 

2282 

2283 return r, t 

2284 

2285 

2286def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2287 out_channels=('L', 'Q', 'T')): 

2288 ''' 

2289 Rotate traces from ZNE to LQT system. 

2290 

2291 :param traces: list of traces in arbitrary order 

2292 :param backazimuth: backazimuth in degrees clockwise from north 

2293 :param incidence: incidence angle in degrees from vertical 

2294 :param in_channels: input channel names 

2295 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2296 :returns: list of transformed traces 

2297 ''' 

2298 i = incidence/180.*num.pi 

2299 b = backazimuth/180.*num.pi 

2300 

2301 ci = num.cos(i) 

2302 cb = num.cos(b) 

2303 si = num.sin(i) 

2304 sb = num.sin(b) 

2305 

2306 rotmat = num.array( 

2307 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2308 return project(traces, rotmat, in_channels, out_channels) 

2309 

2310 

2311def _decompose(a): 

2312 ''' 

2313 Decompose matrix into independent submatrices. 

2314 ''' 

2315 

2316 def depends(iout, a): 

2317 row = a[iout, :] 

2318 return set(num.arange(row.size).compress(row != 0.0)) 

2319 

2320 def provides(iin, a): 

2321 col = a[:, iin] 

2322 return set(num.arange(col.size).compress(col != 0.0)) 

2323 

2324 a = num.asarray(a) 

2325 outs = set(range(a.shape[0])) 

2326 systems = [] 

2327 while outs: 

2328 iout = outs.pop() 

2329 

2330 gout = set() 

2331 for iin in depends(iout, a): 

2332 gout.update(provides(iin, a)) 

2333 

2334 if not gout: 

2335 continue 

2336 

2337 gin = set() 

2338 for iout2 in gout: 

2339 gin.update(depends(iout2, a)) 

2340 

2341 if not gin: 

2342 continue 

2343 

2344 for iout2 in gout: 

2345 if iout2 in outs: 

2346 outs.remove(iout2) 

2347 

2348 gin = list(gin) 

2349 gin.sort() 

2350 gout = list(gout) 

2351 gout.sort() 

2352 

2353 systems.append((gin, gout, a[gout, :][:, gin])) 

2354 

2355 return systems 

2356 

2357 

2358def _channels_to_names(channels): 

2359 names = [] 

2360 for ch in channels: 

2361 if isinstance(ch, model.Channel): 

2362 names.append(ch.name) 

2363 else: 

2364 names.append(ch) 

2365 return names 

2366 

2367 

2368def project(traces, matrix, in_channels, out_channels): 

2369 ''' 

2370 Affine transform of three-component traces. 

2371 

2372 Compute matrix-vector product of three-component traces, to e.g. rotate 

2373 traces into a different basis. The traces are distinguished and ordered by 

2374 their channel attribute. The tranform is applied to overlapping parts of 

2375 any appropriate combinations of the input traces. This should allow this 

2376 function to be robust with data gaps. It also tries to apply the 

2377 tranformation to subsets of the channels, if this is possible, so that, if 

2378 for example a vertical compontent is missing, horizontal components can 

2379 still be rotated. 

2380 

2381 :param traces: list of traces in arbitrary order 

2382 :param matrix: tranformation matrix 

2383 :param in_channels: input channel names 

2384 :param out_channels: output channel names 

2385 :returns: list of transformed traces 

2386 ''' 

2387 

2388 in_channels = tuple(_channels_to_names(in_channels)) 

2389 out_channels = tuple(_channels_to_names(out_channels)) 

2390 systems = _decompose(matrix) 

2391 

2392 # fallback to full matrix if some are not quadratic 

2393 for iins, iouts, submatrix in systems: 

2394 if submatrix.shape[0] != submatrix.shape[1]: 

2395 return _project3(traces, matrix, in_channels, out_channels) 

2396 

2397 projected = [] 

2398 for iins, iouts, submatrix in systems: 

2399 in_cha = tuple([in_channels[iin] for iin in iins]) 

2400 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2401 if submatrix.shape[0] == 1: 

2402 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2403 elif submatrix.shape[1] == 2: 

2404 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2405 else: 

2406 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2407 

2408 return projected 

2409 

2410 

2411def project_dependencies(matrix, in_channels, out_channels): 

2412 ''' 

2413 Figure out what dependencies project() would produce. 

2414 ''' 

2415 

2416 in_channels = tuple(_channels_to_names(in_channels)) 

2417 out_channels = tuple(_channels_to_names(out_channels)) 

2418 systems = _decompose(matrix) 

2419 

2420 subpro = [] 

2421 for iins, iouts, submatrix in systems: 

2422 if submatrix.shape[0] != submatrix.shape[1]: 

2423 subpro.append((matrix, in_channels, out_channels)) 

2424 

2425 if not subpro: 

2426 for iins, iouts, submatrix in systems: 

2427 in_cha = tuple([in_channels[iin] for iin in iins]) 

2428 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2429 subpro.append((submatrix, in_cha, out_cha)) 

2430 

2431 deps = {} 

2432 for mat, in_cha, out_cha in subpro: 

2433 for oc in out_cha: 

2434 if oc not in deps: 

2435 deps[oc] = [] 

2436 

2437 for ic in in_cha: 

2438 deps[oc].append(ic) 

2439 

2440 return deps 

2441 

2442 

2443def _project1(traces, matrix, in_channels, out_channels): 

2444 assert len(in_channels) == 1 

2445 assert len(out_channels) == 1 

2446 assert matrix.shape == (1, 1) 

2447 

2448 projected = [] 

2449 for a in traces: 

2450 if not (a.channel,) == in_channels: 

2451 continue 

2452 

2453 ac = a.copy() 

2454 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2455 ac.set_codes(channel=out_channels[0]) 

2456 projected.append(ac) 

2457 

2458 return projected 

2459 

2460 

2461def _project2(traces, matrix, in_channels, out_channels): 

2462 assert len(in_channels) == 2 

2463 assert len(out_channels) == 2 

2464 assert matrix.shape == (2, 2) 

2465 projected = [] 

2466 for a in traces: 

2467 for b in traces: 

2468 if not ((a.channel, b.channel) == in_channels and 

2469 a.nslc_id[:3] == b.nslc_id[:3] and 

2470 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2471 continue 

2472 

2473 tmin = max(a.tmin, b.tmin) 

2474 tmax = min(a.tmax, b.tmax) 

2475 

2476 if tmin > tmax: 

2477 continue 

2478 

2479 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2480 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2481 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2482 logger.warning( 

2483 'Cannot project traces with displaced sampling ' 

2484 '(%s, %s, %s, %s)' % a.nslc_id) 

2485 continue 

2486 

2487 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2488 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2489 

2490 ac.set_ydata(acydata) 

2491 bc.set_ydata(bcydata) 

2492 

2493 ac.set_codes(channel=out_channels[0]) 

2494 bc.set_codes(channel=out_channels[1]) 

2495 

2496 projected.append(ac) 

2497 projected.append(bc) 

2498 

2499 return projected 

2500 

2501 

2502def _project3(traces, matrix, in_channels, out_channels): 

2503 assert len(in_channels) == 3 

2504 assert len(out_channels) == 3 

2505 assert matrix.shape == (3, 3) 

2506 projected = [] 

2507 for a in traces: 

2508 for b in traces: 

2509 for c in traces: 

2510 if not ((a.channel, b.channel, c.channel) == in_channels 

2511 and a.nslc_id[:3] == b.nslc_id[:3] 

2512 and b.nslc_id[:3] == c.nslc_id[:3] 

2513 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2514 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2515 

2516 continue 

2517 

2518 tmin = max(a.tmin, b.tmin, c.tmin) 

2519 tmax = min(a.tmax, b.tmax, c.tmax) 

2520 

2521 if tmin >= tmax: 

2522 continue 

2523 

2524 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2525 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2526 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2527 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2528 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2529 

2530 logger.warning( 

2531 'Cannot project traces with displaced sampling ' 

2532 '(%s, %s, %s, %s)' % a.nslc_id) 

2533 continue 

2534 

2535 acydata = num.dot( 

2536 matrix[0], 

2537 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2538 bcydata = num.dot( 

2539 matrix[1], 

2540 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2541 ccydata = num.dot( 

2542 matrix[2], 

2543 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2544 

2545 ac.set_ydata(acydata) 

2546 bc.set_ydata(bcydata) 

2547 cc.set_ydata(ccydata) 

2548 

2549 ac.set_codes(channel=out_channels[0]) 

2550 bc.set_codes(channel=out_channels[1]) 

2551 cc.set_codes(channel=out_channels[2]) 

2552 

2553 projected.append(ac) 

2554 projected.append(bc) 

2555 projected.append(cc) 

2556 

2557 return projected 

2558 

2559 

2560def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2561 ''' 

2562 Cross correlation of two traces. 

2563 

2564 :param a,b: input traces 

2565 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2566 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2567 :param use_fft: bool, whether to do cross correlation in spectral domain 

2568 

2569 :returns: trace containing cross correlation coefficients 

2570 

2571 This function computes the cross correlation between two traces. It 

2572 evaluates the discrete equivalent of 

2573 

2574 .. math:: 

2575 

2576 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2577 

2578 where the star denotes complex conjugate. Note, that the arguments here are 

2579 swapped when compared with the :py:func:`numpy.correlate` function, 

2580 which is internally called. This function should be safe even with older 

2581 versions of NumPy, where the correlate function has some problems. 

2582 

2583 A trace containing the cross correlation coefficients is returned. The time 

2584 information of the output trace is set so that the returned cross 

2585 correlation can be viewed directly as a function of time lag. 

2586 

2587 Example:: 

2588 

2589 # align two traces a and b containing a time shifted similar signal: 

2590 c = pyrocko.trace.correlate(a,b) 

2591 t, coef = c.max() # get time and value of maximum 

2592 b.shift(-t) # align b with a 

2593 

2594 ''' 

2595 

2596 assert_same_sampling_rate(a, b) 

2597 

2598 ya, yb = a.ydata, b.ydata 

2599 

2600 # need reversed order here: 

2601 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2602 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2603 

2604 if normalization == 'normal': 

2605 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2606 yc = yc/normfac 

2607 

2608 elif normalization == 'gliding': 

2609 if mode != 'valid': 

2610 assert False, 'gliding normalization currently only available ' \ 

2611 'with "valid" mode.' 

2612 

2613 if ya.size < yb.size: 

2614 yshort, ylong = ya, yb 

2615 else: 

2616 yshort, ylong = yb, ya 

2617 

2618 epsilon = 0.00001 

2619 normfac_short = num.sqrt(num.sum(yshort**2)) 

2620 normfac = normfac_short * num.sqrt( 

2621 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2622 + normfac_short*epsilon 

2623 

2624 if yb.size <= ya.size: 

2625 normfac = normfac[::-1] 

2626 

2627 yc /= normfac 

2628 

2629 c = a.copy() 

2630 c.set_ydata(yc) 

2631 c.set_codes(*merge_codes(a, b, '~')) 

2632 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2633 

2634 return c 

2635 

2636 

2637def deconvolve( 

2638 a, b, waterlevel, 

2639 tshift=0., 

2640 pad=0.5, 

2641 fd_taper=None, 

2642 pad_to_pow2=True): 

2643 

2644 same_sampling_rate(a, b) 

2645 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2646 deltat = a.deltat 

2647 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2648 

2649 ndata = max(a.data_len(), b.data_len()) 

2650 ndata_pad = ndata + npad 

2651 

2652 if pad_to_pow2: 

2653 ntrans = nextpow2(ndata_pad) 

2654 else: 

2655 ntrans = ndata 

2656 

2657 aspec = num.fft.rfft(a.ydata, ntrans) 

2658 bspec = num.fft.rfft(b.ydata, ntrans) 

2659 

2660 out = aspec * num.conj(bspec) 

2661 

2662 bautocorr = bspec*num.conj(bspec) 

2663 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2664 

2665 out /= denom 

2666 df = 1/(ntrans*deltat) 

2667 

2668 if fd_taper is not None: 

2669 fd_taper(out, 0.0, df) 

2670 

2671 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2672 c = a.copy(data=False) 

2673 c.set_ydata(ydata[:ndata]) 

2674 c.set_codes(*merge_codes(a, b, '/')) 

2675 return c 

2676 

2677 

2678def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2679 assert same_sampling_rate(a, b, eps), \ 

2680 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2681 

2682 

2683def same_sampling_rate(a, b, eps=1.0e-6): 

2684 ''' 

2685 Check if two traces have the same sampling rate. 

2686 

2687 :param a,b: input traces 

2688 :param eps: relative tolerance 

2689 ''' 

2690 

2691 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2692 

2693 

2694def fix_deltat_rounding_errors(deltat): 

2695 ''' 

2696 Try to undo sampling rate rounding errors. 

2697 

2698 Fix rounding errors of sampling intervals when these are read from single 

2699 precision floating point values. 

2700 

2701 Assumes that the true sampling rate or sampling interval was an integer 

2702 value. No correction will be applied if this would change the sampling 

2703 rate by more than 0.001%. 

2704 ''' 

2705 

2706 if deltat <= 1.0: 

2707 deltat_new = 1.0 / round(1.0 / deltat) 

2708 else: 

2709 deltat_new = round(deltat) 

2710 

2711 if abs(deltat_new - deltat) / deltat > 1e-5: 

2712 deltat_new = deltat 

2713 

2714 return deltat_new 

2715 

2716 

2717def merge_codes(a, b, sep='-'): 

2718 ''' 

2719 Merge network-station-location-channel codes of a pair of traces. 

2720 ''' 

2721 

2722 o = [] 

2723 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2724 if xa == xb: 

2725 o.append(xa) 

2726 else: 

2727 o.append(sep.join((xa, xb))) 

2728 return o 

2729 

2730 

2731class Taper(Object): 

2732 ''' 

2733 Base class for tapers. 

2734 

2735 Does nothing by default. 

2736 ''' 

2737 

2738 def __call__(self, y, x0, dx): 

2739 pass 

2740 

2741 

2742class CosTaper(Taper): 

2743 ''' 

2744 Cosine Taper. 

2745 

2746 :param a: start of fading in 

2747 :param b: end of fading in 

2748 :param c: start of fading out 

2749 :param d: end of fading out 

2750 ''' 

2751 

2752 a = Float.T() 

2753 b = Float.T() 

2754 c = Float.T() 

2755 d = Float.T() 

2756 

2757 def __init__(self, a, b, c, d): 

2758 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2759 

2760 def __call__(self, y, x0, dx): 

2761 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2762 

2763 def span(self, y, x0, dx): 

2764 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2765 

2766 def time_span(self): 

2767 return self.a, self.d 

2768 

2769 

2770class CosFader(Taper): 

2771 ''' 

2772 Cosine Fader. 

2773 

2774 :param xfade: fade in and fade out time in seconds (optional) 

2775 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2776 

2777 Only one argument can be set. The other should to be ``None``. 

2778 ''' 

2779 

2780 xfade = Float.T(optional=True) 

2781 xfrac = Float.T(optional=True) 

2782 

2783 def __init__(self, xfade=None, xfrac=None): 

2784 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2785 assert (xfade is None) != (xfrac is None) 

2786 self._xfade = xfade 

2787 self._xfrac = xfrac 

2788 

2789 def __call__(self, y, x0, dx): 

2790 

2791 xfade = self._xfade 

2792 

2793 xlen = (y.size - 1)*dx 

2794 if xfade is None: 

2795 xfade = xlen * self._xfrac 

2796 

2797 a = x0 

2798 b = x0 + xfade 

2799 c = x0 + xlen - xfade 

2800 d = x0 + xlen 

2801 

2802 apply_costaper(a, b, c, d, y, x0, dx) 

2803 

2804 def span(self, y, x0, dx): 

2805 return 0, y.size 

2806 

2807 def time_span(self): 

2808 return None, None 

2809 

2810 

2811def none_min(li): 

2812 if None in li: 

2813 return None 

2814 else: 

2815 return min(x for x in li if x is not None) 

2816 

2817 

2818def none_max(li): 

2819 if None in li: 

2820 return None 

2821 else: 

2822 return max(x for x in li if x is not None) 

2823 

2824 

2825class MultiplyTaper(Taper): 

2826 ''' 

2827 Multiplication of several tapers. 

2828 ''' 

2829 

2830 tapers = List.T(Taper.T()) 

2831 

2832 def __init__(self, tapers=None): 

2833 if tapers is None: 

2834 tapers = [] 

2835 

2836 Taper.__init__(self, tapers=tapers) 

2837 

2838 def __call__(self, y, x0, dx): 

2839 for taper in self.tapers: 

2840 taper(y, x0, dx) 

2841 

2842 def span(self, y, x0, dx): 

2843 spans = [] 

2844 for taper in self.tapers: 

2845 spans.append(taper.span(y, x0, dx)) 

2846 

2847 mins, maxs = list(zip(*spans)) 

2848 return min(mins), max(maxs) 

2849 

2850 def time_span(self): 

2851 spans = [] 

2852 for taper in self.tapers: 

2853 spans.append(taper.time_span()) 

2854 

2855 mins, maxs = list(zip(*spans)) 

2856 return none_min(mins), none_max(maxs) 

2857 

2858 

2859class GaussTaper(Taper): 

2860 ''' 

2861 Frequency domain Gaussian filter. 

2862 ''' 

2863 

2864 alpha = Float.T() 

2865 

2866 def __init__(self, alpha): 

2867 Taper.__init__(self, alpha=alpha) 

2868 self._alpha = alpha 

2869 

2870 def __call__(self, y, x0, dx): 

2871 f = x0 + num.arange(y.size)*dx 

2872 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2873 

2874 

2875cached_coefficients = {} 

2876 

2877 

2878def _get_cached_filter_coefs(order, corners, btype): 

2879 ck = (order, tuple(corners), btype) 

2880 if ck not in cached_coefficients: 

2881 if len(corners) == 0: 

2882 cached_coefficients[ck] = signal.butter( 

2883 order, corners[0], btype=btype) 

2884 else: 

2885 cached_coefficients[ck] = signal.butter( 

2886 order, corners, btype=btype) 

2887 

2888 return cached_coefficients[ck] 

2889 

2890 

2891class _globals(object): 

2892 _numpy_has_correlate_flip_bug = None 

2893 

2894 

2895def _default_key(tr): 

2896 return (tr.network, tr.station, tr.location, tr.channel) 

2897 

2898 

2899def numpy_has_correlate_flip_bug(): 

2900 ''' 

2901 Check if NumPy's correlate function reveals old behaviour. 

2902 ''' 

2903 

2904 if _globals._numpy_has_correlate_flip_bug is None: 

2905 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

2906 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

2907 ab = num.correlate(a, b, mode='same') 

2908 ba = num.correlate(b, a, mode='same') 

2909 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

2910 

2911 return _globals._numpy_has_correlate_flip_bug 

2912 

2913 

2914def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

2915 ''' 

2916 Call :py:func:`numpy.correlate` with fixes. 

2917 

2918 c[k] = sum_i a[i+k] * conj(b[i]) 

2919 

2920 Note that the result produced by newer numpy.correlate is always flipped 

2921 with respect to the formula given in its documentation (if ascending k 

2922 assumed for the output). 

2923 ''' 

2924 

2925 if use_fft: 

2926 if a.size < b.size: 

2927 c = signal.fftconvolve(b[::-1], a, mode=mode) 

2928 else: 

2929 c = signal.fftconvolve(a, b[::-1], mode=mode) 

2930 return c 

2931 

2932 else: 

2933 buggy = numpy_has_correlate_flip_bug() 

2934 

2935 a = num.asarray(a) 

2936 b = num.asarray(b) 

2937 

2938 if buggy: 

2939 b = num.conj(b) 

2940 

2941 c = num.correlate(a, b, mode=mode) 

2942 

2943 if buggy and a.size < b.size: 

2944 return c[::-1] 

2945 else: 

2946 return c 

2947 

2948 

2949def numpy_correlate_emulate(a, b, mode='valid'): 

2950 ''' 

2951 Slow version of :py:func:`numpy.correlate` for comparison. 

2952 ''' 

2953 

2954 a = num.asarray(a) 

2955 b = num.asarray(b) 

2956 kmin = -(b.size-1) 

2957 klen = a.size-kmin 

2958 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

2959 kmin = int(kmin) 

2960 kmax = int(kmax) 

2961 klen = kmax - kmin + 1 

2962 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

2963 for k in range(kmin, kmin+klen): 

2964 imin = max(0, -k) 

2965 ilen = min(b.size, a.size-k) - imin 

2966 c[k-kmin] = num.sum( 

2967 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

2968 

2969 return c 

2970 

2971 

2972def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

2973 ''' 

2974 Get range of lags for which :py:func:`numpy.correlate` produces values. 

2975 ''' 

2976 

2977 a = num.asarray(a) 

2978 b = num.asarray(b) 

2979 

2980 kmin = -(b.size-1) 

2981 if mode == 'full': 

2982 klen = a.size-kmin 

2983 elif mode == 'same': 

2984 klen = max(a.size, b.size) 

2985 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

2986 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

2987 elif mode == 'valid': 

2988 klen = abs(a.size - b.size) + 1 

2989 kmin += min(a.size, b.size) - 1 

2990 

2991 return kmin, kmin + klen - 1 

2992 

2993 

2994def autocorr(x, nshifts): 

2995 ''' 

2996 Compute biased estimate of the first autocorrelation coefficients. 

2997 

2998 :param x: input array 

2999 :param nshifts: number of coefficients to calculate 

3000 ''' 

3001 

3002 mean = num.mean(x) 

3003 std = num.std(x) 

3004 n = x.size 

3005 xdm = x - mean 

3006 r = num.zeros(nshifts) 

3007 for k in range(nshifts): 

3008 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3009 

3010 return r 

3011 

3012 

3013def yulewalker(x, order): 

3014 ''' 

3015 Compute autoregression coefficients using Yule-Walker method. 

3016 

3017 :param x: input array 

3018 :param order: number of coefficients to produce 

3019 

3020 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3021 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3022 recursion which is normally used. 

3023 ''' 

3024 

3025 gamma = autocorr(x, order+1) 

3026 d = gamma[1:1+order] 

3027 a = num.zeros((order, order)) 

3028 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3029 for i in range(order): 

3030 ioff = order-i 

3031 a[i, :] = gamma2[ioff:ioff+order] 

3032 

3033 return num.dot(num.linalg.inv(a), -d) 

3034 

3035 

3036def moving_avg(x, n): 

3037 n = int(n) 

3038 cx = x.cumsum() 

3039 nn = len(x) 

3040 y = num.zeros(nn, dtype=cx.dtype) 

3041 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3042 y[:n//2] = y[n//2] 

3043 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3044 return y 

3045 

3046 

3047def moving_sum(x, n, mode='valid'): 

3048 n = int(n) 

3049 cx = x.cumsum() 

3050 nn = len(x) 

3051 

3052 if mode == 'valid': 

3053 if nn-n+1 <= 0: 

3054 return num.zeros(0, dtype=cx.dtype) 

3055 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3056 y[0] = cx[n-1] 

3057 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3058 

3059 if mode == 'full': 

3060 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3061 if n <= nn: 

3062 y[0:n] = cx[0:n] 

3063 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3064 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3065 else: 

3066 y[0:nn] = cx[0:nn] 

3067 y[nn:n] = cx[nn-1] 

3068 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3069 

3070 if mode == 'same': 

3071 n1 = (n-1)//2 

3072 y = num.zeros(nn, dtype=cx.dtype) 

3073 if n <= nn: 

3074 y[0:n-n1] = cx[n1:n] 

3075 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3076 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3077 else: 

3078 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3079 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3080 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3081 

3082 return y 

3083 

3084 

3085def nextpow2(i): 

3086 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3087 

3088 

3089def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3090 def snap(x): 

3091 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3092 return snap 

3093 

3094 

3095def snapper(nmax, delta, snapfun=math.ceil): 

3096 def snap(x): 

3097 return max(0, min(int(snapfun(x/delta)), nmax)) 

3098 return snap 

3099 

3100 

3101def apply_costaper(a, b, c, d, y, x0, dx): 

3102 hi = snapper_w_offset(y.size, x0, dx) 

3103 y[:hi(a)] = 0. 

3104 y[hi(a):hi(b)] *= 0.5 \ 

3105 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3106 y[hi(c):hi(d)] *= 0.5 \ 

3107 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3108 y[hi(d):] = 0. 

3109 

3110 

3111def span_costaper(a, b, c, d, y, x0, dx): 

3112 hi = snapper_w_offset(y.size, x0, dx) 

3113 return hi(a), hi(d) - hi(a) 

3114 

3115 

3116def costaper(a, b, c, d, nfreqs, deltaf): 

3117 hi = snapper(nfreqs, deltaf) 

3118 tap = num.zeros(nfreqs) 

3119 tap[hi(a):hi(b)] = 0.5 \ 

3120 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3121 tap[hi(b):hi(c)] = 1. 

3122 tap[hi(c):hi(d)] = 0.5 \ 

3123 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3124 

3125 return tap 

3126 

3127 

3128def t2ind(t, tdelta, snap=round): 

3129 return int(snap(t/tdelta)) 

3130 

3131 

3132def hilbert(x, N=None): 

3133 ''' 

3134 Return the hilbert transform of x of length N. 

3135 

3136 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3137 ''' 

3138 

3139 x = num.asarray(x) 

3140 if N is None: 

3141 N = len(x) 

3142 if N <= 0: 

3143 raise ValueError("N must be positive.") 

3144 if num.iscomplexobj(x): 

3145 logger.warning('imaginary part of x ignored.') 

3146 x = num.real(x) 

3147 

3148 Xf = num.fft.fft(x, N, axis=0) 

3149 h = num.zeros(N) 

3150 if N % 2 == 0: 

3151 h[0] = h[N//2] = 1 

3152 h[1:N//2] = 2 

3153 else: 

3154 h[0] = 1 

3155 h[1:(N+1)//2] = 2 

3156 

3157 if len(x.shape) > 1: 

3158 h = h[:, num.newaxis] 

3159 x = num.fft.ifft(Xf*h) 

3160 return x 

3161 

3162 

3163def near(a, b, eps): 

3164 return abs(a-b) < eps 

3165 

3166 

3167def coroutine(func): 

3168 def wrapper(*args, **kwargs): 

3169 gen = func(*args, **kwargs) 

3170 next(gen) 

3171 return gen 

3172 

3173 wrapper.__name__ = func.__name__ 

3174 wrapper.__dict__ = func.__dict__ 

3175 wrapper.__doc__ = func.__doc__ 

3176 return wrapper 

3177 

3178 

3179class States(object): 

3180 ''' 

3181 Utility to store channel-specific state in coroutines. 

3182 ''' 

3183 

3184 def __init__(self): 

3185 self._states = {} 

3186 

3187 def get(self, tr): 

3188 k = tr.nslc_id 

3189 if k in self._states: 

3190 tmin, deltat, dtype, value = self._states[k] 

3191 if (near(tmin, tr.tmin, deltat/100.) 

3192 and near(deltat, tr.deltat, deltat/10000.) 

3193 and dtype == tr.ydata.dtype): 

3194 

3195 return value 

3196 

3197 return None 

3198 

3199 def set(self, tr, value): 

3200 k = tr.nslc_id 

3201 if k in self._states and self._states[k][-1] is not value: 

3202 self.free(self._states[k][-1]) 

3203 

3204 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3205 

3206 def free(self, value): 

3207 pass 

3208 

3209 

3210@coroutine 

3211def co_list_append(list): 

3212 while True: 

3213 list.append((yield)) 

3214 

3215 

3216class ScipyBug(Exception): 

3217 pass 

3218 

3219 

3220@coroutine 

3221def co_lfilter(target, b, a): 

3222 ''' 

3223 Successively filter broken continuous trace data (coroutine). 

3224 

3225 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3226 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3227 objects containing the filtered data to target. This is useful, if one 

3228 wants to filter a long continuous time series, which is split into many 

3229 successive traces without producing filter artifacts at trace boundaries. 

3230 

3231 Filter states are kept *per channel*, specifically, for each (network, 

3232 station, location, channel) combination occuring in the input traces, a 

3233 separate state is created and maintained. This makes it possible to filter 

3234 multichannel or multistation data with only one :py:func:`co_lfilter` 

3235 instance. 

3236 

3237 Filter state is reset, when gaps occur. 

3238 

3239 Use it like this:: 

3240 

3241 from pyrocko.trace import co_lfilter, co_list_append 

3242 

3243 filtered_traces = [] 

3244 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3245 for trace in traces: 

3246 pipe.send(trace) 

3247 

3248 pipe.close() 

3249 

3250 ''' 

3251 

3252 try: 

3253 states = States() 

3254 output = None 

3255 while True: 

3256 input = (yield) 

3257 

3258 zi = states.get(input) 

3259 if zi is None: 

3260 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3261 

3262 output = input.copy(data=False) 

3263 try: 

3264 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3265 except ValueError: 

3266 raise ScipyBug( 

3267 'signal.lfilter failed: could be related to a bug ' 

3268 'in some older scipy versions, e.g. on opensuse42.1') 

3269 

3270 output.set_ydata(ydata) 

3271 states.set(input, zf) 

3272 target.send(output) 

3273 

3274 except GeneratorExit: 

3275 target.close() 

3276 

3277 

3278def co_antialias(target, q, n=None, ftype='fir'): 

3279 b, a, n = util.decimate_coeffs(q, n, ftype) 

3280 anti = co_lfilter(target, b, a) 

3281 return anti 

3282 

3283 

3284@coroutine 

3285def co_dropsamples(target, q, nfir): 

3286 try: 

3287 states = States() 

3288 while True: 

3289 tr = (yield) 

3290 newdeltat = q * tr.deltat 

3291 ioffset = states.get(tr) 

3292 if ioffset is None: 

3293 # for fir filter, the first nfir samples are pulluted by 

3294 # boundary effects; cut it off. 

3295 # for iir this may be (much) more, we do not correct for that. 

3296 # put sample instances to a time which is a multiple of the 

3297 # new sampling interval. 

3298 newtmin_want = math.ceil( 

3299 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3300 - (nfir/2*tr.deltat) 

3301 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3302 if ioffset < 0: 

3303 ioffset = ioffset % q 

3304 

3305 newtmin_have = tr.tmin + ioffset * tr.deltat 

3306 newtr = tr.copy(data=False) 

3307 newtr.deltat = newdeltat 

3308 # because the fir kernel shifts data by nfir/2 samples: 

3309 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3310 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3311 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3312 target.send(newtr) 

3313 

3314 except GeneratorExit: 

3315 target.close() 

3316 

3317 

3318def co_downsample(target, q, n=None, ftype='fir'): 

3319 ''' 

3320 Successively downsample broken continuous trace data (coroutine). 

3321 

3322 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3323 data and sends new :py:class:`Trace` objects containing the downsampled 

3324 data to target. This is useful, if one wants to downsample a long 

3325 continuous time series, which is split into many successive traces without 

3326 producing filter artifacts and gaps at trace boundaries. 

3327 

3328 Filter states are kept *per channel*, specifically, for each (network, 

3329 station, location, channel) combination occuring in the input traces, a 

3330 separate state is created and maintained. This makes it possible to filter 

3331 multichannel or multistation data with only one :py:func:`co_lfilter` 

3332 instance. 

3333 

3334 Filter state is reset, when gaps occur. The sampling instances are choosen 

3335 so that they occur at (or as close as possible) to even multiples of the 

3336 sampling interval of the downsampled trace (based on system time). 

3337 ''' 

3338 

3339 b, a, n = util.decimate_coeffs(q, n, ftype) 

3340 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3341 

3342 

3343@coroutine 

3344def co_downsample_to(target, deltat): 

3345 

3346 decimators = {} 

3347 try: 

3348 while True: 

3349 tr = (yield) 

3350 ratio = deltat / tr.deltat 

3351 rratio = round(ratio) 

3352 if abs(rratio - ratio)/ratio > 0.0001: 

3353 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3354 

3355 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3356 if deci_seq not in decimators: 

3357 pipe = target 

3358 for q in deci_seq[::-1]: 

3359 pipe = co_downsample(pipe, q) 

3360 

3361 decimators[deci_seq] = pipe 

3362 

3363 decimators[deci_seq].send(tr) 

3364 

3365 except GeneratorExit: 

3366 for g in decimators.values(): 

3367 g.close() 

3368 

3369 

3370class DomainChoice(StringChoice): 

3371 choices = [ 

3372 'time_domain', 

3373 'frequency_domain', 

3374 'envelope', 

3375 'absolute', 

3376 'cc_max_norm'] 

3377 

3378 

3379class MisfitSetup(Object): 

3380 ''' 

3381 Contains misfit setup to be used in :py:func:`trace.misfit` 

3382 

3383 :param description: Description of the setup 

3384 :param norm: L-norm classifier 

3385 :param taper: Object of :py:class:`Taper` 

3386 :param filter: Object of :py:class:`FrequencyResponse` 

3387 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3388 'cc_max_norm'] 

3389 

3390 Can be dumped to a yaml file. 

3391 ''' 

3392 

3393 xmltagname = 'misfitsetup' 

3394 description = String.T(optional=True) 

3395 norm = Int.T(optional=False) 

3396 taper = Taper.T(optional=False) 

3397 filter = FrequencyResponse.T(optional=True) 

3398 domain = DomainChoice.T(default='time_domain') 

3399 

3400 

3401def equalize_sampling_rates(trace_1, trace_2): 

3402 ''' 

3403 Equalize sampling rates of two traces (reduce higher sampling rate to 

3404 lower). 

3405 

3406 :param trace_1: :py:class:`Trace` object 

3407 :param trace_2: :py:class:`Trace` object 

3408 

3409 Returns a copy of the resampled trace if resampling is needed. 

3410 ''' 

3411 

3412 if same_sampling_rate(trace_1, trace_2): 

3413 return trace_1, trace_2 

3414 

3415 if trace_1.deltat < trace_2.deltat: 

3416 t1_out = trace_1.copy() 

3417 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3418 logger.debug('Trace downsampled (return copy of trace): %s' 

3419 % '.'.join(t1_out.nslc_id)) 

3420 return t1_out, trace_2 

3421 

3422 elif trace_1.deltat > trace_2.deltat: 

3423 t2_out = trace_2.copy() 

3424 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3425 logger.debug('Trace downsampled (return copy of trace): %s' 

3426 % '.'.join(t2_out.nslc_id)) 

3427 return trace_1, t2_out 

3428 

3429 

3430def Lx_norm(u, v, norm=2): 

3431 ''' 

3432 Calculate the misfit denominator *m* and the normalization devisor *n* 

3433 according to norm. 

3434 

3435 The normalization divisor *n* is calculated from ``v``. 

3436 

3437 :param u: :py:class:`numpy.array` 

3438 :param v: :py:class:`numpy.array` 

3439 :param norm: (default = 2) 

3440 

3441 ``u`` and ``v`` must be of same size. 

3442 ''' 

3443 

3444 if norm == 1: 

3445 return ( 

3446 num.sum(num.abs(v-u)), 

3447 num.sum(num.abs(v))) 

3448 

3449 elif norm == 2: 

3450 return ( 

3451 num.sqrt(num.sum((v-u)**2)), 

3452 num.sqrt(num.sum(v**2))) 

3453 

3454 else: 

3455 return ( 

3456 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3457 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3458 

3459 

3460def do_downsample(tr, deltat): 

3461 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3462 tr = tr.copy() 

3463 tr.downsample_to(deltat, snap=True, demean=False) 

3464 else: 

3465 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3466 tr = tr.copy() 

3467 tr.snap() 

3468 return tr 

3469 

3470 

3471def do_extend(tr, tmin, tmax): 

3472 if tmin < tr.tmin or tmax > tr.tmax: 

3473 tr = tr.copy() 

3474 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3475 

3476 return tr 

3477 

3478 

3479def do_pre_taper(tr, taper): 

3480 return tr.taper(taper, inplace=False, chop=True) 

3481 

3482 

3483def do_fft(tr, filter): 

3484 if filter is None: 

3485 return tr 

3486 else: 

3487 ndata = tr.ydata.size 

3488 nfft = nextpow2(ndata) 

3489 padded = num.zeros(nfft, dtype=float) 

3490 padded[:ndata] = tr.ydata 

3491 spectrum = num.fft.rfft(padded) 

3492 df = 1.0 / (tr.deltat * nfft) 

3493 frequencies = num.arange(spectrum.size)*df 

3494 return [tr, frequencies, spectrum] 

3495 

3496 

3497def do_filter(inp, filter): 

3498 if filter is None: 

3499 return inp 

3500 else: 

3501 tr, frequencies, spectrum = inp 

3502 spectrum *= filter.evaluate(frequencies) 

3503 return [tr, frequencies, spectrum] 

3504 

3505 

3506def do_ifft(inp): 

3507 if isinstance(inp, Trace): 

3508 return inp 

3509 else: 

3510 tr, _, spectrum = inp 

3511 ndata = tr.ydata.size 

3512 tr = tr.copy(data=False) 

3513 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3514 return tr 

3515 

3516 

3517def check_alignment(t1, t2): 

3518 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3519 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3520 t1.ydata.shape != t2.ydata.shape: 

3521 raise MisalignedTraces( 

3522 'Cannot calculate misfit of %s and %s due to misaligned ' 

3523 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))