Coverage for /usr/local/lib/python3.11/dist-packages/pyrocko/trace.py: 75%

1783 statements  

« prev     ^ index     » next       coverage.py v6.5.0, created at 2024-02-27 10:58 +0000

1# https://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5 

6''' 

7Basic signal processing for seismic waveforms. 

8''' 

9 

10import time 

11import math 

12import copy 

13import logging 

14import hashlib 

15from collections import defaultdict 

16 

17import numpy as num 

18from scipy import signal 

19 

20from pyrocko import util, orthodrome, pchain, model, signal_ext 

21from pyrocko.util import reuse 

22from pyrocko.guts import Object, Float, Int, String, List, \ 

23 StringChoice, Timestamp 

24from pyrocko.guts_array import Array 

25from pyrocko.model import squirrel_content 

26 

27# backward compatibility 

28from pyrocko.util import UnavailableDecimation # noqa 

29from pyrocko.response import ( # noqa 

30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse, 

31 ButterworthResponse, SampledResponse, IntegrationResponse, 

32 DifferentiationResponse, MultiplyResponse) 

33 

34 

35guts_prefix = 'pf' 

36 

37logger = logging.getLogger('pyrocko.trace') 

38 

39 

40g_tapered_coeffs_cache = {} 

41g_one_response = FrequencyResponse() 

42 

43 

44@squirrel_content 

45class Trace(Object): 

46 

47 ''' 

48 Create new trace object. 

49 

50 A ``Trace`` object represents a single continuous strip of evenly sampled 

51 time series data. It is built from a 1D NumPy array containing the data 

52 samples and some attributes describing its beginning and ending time, its 

53 sampling rate and four string identifiers (its network, station, location 

54 and channel code). 

55 

56 :param network: network code 

57 :param station: station code 

58 :param location: location code 

59 :param channel: channel code 

60 :param extra: extra code 

61 :param tmin: system time of first sample in [s] 

62 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

63 computed from length of ``ydata``) 

64 :param deltat: sampling interval in [s] 

65 :param ydata: 1D numpy array with data samples (can be ``None`` when 

66 ``tmax`` is not ``None``) 

67 :param mtime: optional modification time 

68 

69 :param meta: additional meta information (not used, but maintained by the 

70 library) 

71 

72 The length of the network, station, location and channel codes is not 

73 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

74 have different limits on the lengths of these codes. The codes set here are 

75 silently truncated when the trace is stored 

76 ''' 

77 

78 network = String.T(default='', help='Deployment/network code (1-8)') 

79 station = String.T(default='STA', help='Station code (1-5)') 

80 location = String.T(default='', help='Location code (0-2)') 

81 channel = String.T(default='', help='Channel code (3)') 

82 extra = String.T(default='', help='Extra/custom code') 

83 

84 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

85 tmax = Timestamp.T() 

86 

87 deltat = Float.T(default=1.0) 

88 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

89 

90 mtime = Timestamp.T(optional=True) 

91 

92 cached_frequencies = {} 

93 

94 def __init__(self, network='', station='STA', location='', channel='', 

95 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

96 meta=None, extra=''): 

97 

98 Object.__init__(self, init_props=False) 

99 

100 time_float = util.get_time_float() 

101 

102 if not isinstance(tmin, time_float): 

103 tmin = Trace.tmin.regularize_extra(tmin) 

104 

105 if tmax is not None and not isinstance(tmax, time_float): 

106 tmax = Trace.tmax.regularize_extra(tmax) 

107 

108 if mtime is not None and not isinstance(mtime, time_float): 

109 mtime = Trace.mtime.regularize_extra(mtime) 

110 

111 if ydata is not None and not isinstance(ydata, num.ndarray): 

112 ydata = Trace.ydata.regularize_extra(ydata) 

113 

114 self._growbuffer = None 

115 

116 tmin = time_float(tmin) 

117 if tmax is not None: 

118 tmax = time_float(tmax) 

119 

120 if mtime is None: 

121 mtime = time_float(time.time()) 

122 

123 self.network, self.station, self.location, self.channel, \ 

124 self.extra = [ 

125 reuse(x) for x in ( 

126 network, station, location, channel, extra)] 

127 

128 self.tmin = tmin 

129 self.deltat = deltat 

130 

131 if tmax is None: 

132 if ydata is not None: 

133 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

134 else: 

135 raise Exception( 

136 'fixme: trace must be created with tmax or ydata') 

137 else: 

138 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

139 self.tmax = self.tmin + (n - 1) * self.deltat 

140 

141 self.meta = meta 

142 self.ydata = ydata 

143 self.mtime = mtime 

144 self._update_ids() 

145 self.file = None 

146 self._pchain = None 

147 

148 def __str__(self): 

149 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

150 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

151 s += ' timerange: %s - %s\n' % ( 

152 util.time_to_str(self.tmin, format=fmt), 

153 util.time_to_str(self.tmax, format=fmt)) 

154 

155 s += ' delta t: %g\n' % self.deltat 

156 if self.meta: 

157 for k in sorted(self.meta.keys()): 

158 s += ' %s: %s\n' % (k, self.meta[k]) 

159 return s 

160 

161 @property 

162 def codes(self): 

163 from pyrocko.squirrel import model 

164 return model.CodesNSLCE( 

165 self.network, self.station, self.location, self.channel, 

166 self.extra) 

167 

168 @property 

169 def time_span(self): 

170 return self.tmin, self.tmax 

171 

172 @property 

173 def summary_entries(self): 

174 return ( 

175 self.__class__.__name__, 

176 str(self.codes), 

177 self.str_time_span, 

178 '%g' % (1.0/self.deltat)) 

179 

180 @property 

181 def summary_stats_entries(self): 

182 return tuple('%13.7g' % v for v in ( 

183 self.ydata.min(), 

184 self.ydata.max(), 

185 self.ydata.mean(), 

186 self.ydata.std())) 

187 

188 @property 

189 def summary(self): 

190 return util.fmt_summary(self.summary_entries, (10, 20, 55, 0)) 

191 

192 @property 

193 def summary_stats(self): 

194 return self.summary + ' | ' + util.fmt_summary( 

195 self.summary_stats_entries, (12, 12, 12, 12)) 

196 

197 def __getstate__(self): 

198 return (self.network, self.station, self.location, self.channel, 

199 self.tmin, self.tmax, self.deltat, self.mtime, 

200 self.ydata, self.meta, self.extra) 

201 

202 def __setstate__(self, state): 

203 if len(state) == 11: 

204 self.network, self.station, self.location, self.channel, \ 

205 self.tmin, self.tmax, self.deltat, self.mtime, \ 

206 self.ydata, self.meta, self.extra = state 

207 

208 elif len(state) == 12: 

209 # backward compatibility 0 

210 self.network, self.station, self.location, self.channel, \ 

211 self.tmin, self.tmax, self.deltat, self.mtime, \ 

212 self.ydata, self.meta, _, self.extra = state 

213 

214 elif len(state) == 10: 

215 # backward compatibility 1 

216 self.network, self.station, self.location, self.channel, \ 

217 self.tmin, self.tmax, self.deltat, self.mtime, \ 

218 self.ydata, self.meta = state 

219 

220 self.extra = '' 

221 

222 else: 

223 # backward compatibility 2 

224 self.network, self.station, self.location, self.channel, \ 

225 self.tmin, self.tmax, self.deltat, self.mtime = state 

226 self.ydata = None 

227 self.meta = None 

228 self.extra = '' 

229 

230 self._growbuffer = None 

231 self._update_ids() 

232 

233 def hash(self, unsafe=False): 

234 sha1 = hashlib.sha1() 

235 for code in self.nslc_id: 

236 sha1.update(code.encode()) 

237 sha1.update(self.tmin.hex().encode()) 

238 sha1.update(self.tmax.hex().encode()) 

239 sha1.update(self.deltat.hex().encode()) 

240 

241 if self.ydata is not None: 

242 if not unsafe: 

243 sha1.update(memoryview(self.ydata)) 

244 else: 

245 sha1.update(memoryview(self.ydata[:32])) 

246 sha1.update(memoryview(self.ydata[-32:])) 

247 

248 return sha1.hexdigest() 

249 

250 def name(self): 

251 ''' 

252 Get a short string description. 

253 ''' 

254 

255 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

256 util.time_to_str(self.tmin), 

257 util.time_to_str(self.tmax))) 

258 

259 return s 

260 

261 def __eq__(self, other): 

262 return ( 

263 isinstance(other, Trace) 

264 and self.network == other.network 

265 and self.station == other.station 

266 and self.location == other.location 

267 and self.channel == other.channel 

268 and (abs(self.deltat - other.deltat) 

269 < (self.deltat + other.deltat)*1e-6) 

270 and abs(self.tmin-other.tmin) < self.deltat*0.01 

271 and abs(self.tmax-other.tmax) < self.deltat*0.01 

272 and num.all(self.ydata == other.ydata)) 

273 

274 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

275 return ( 

276 self.network == other.network 

277 and self.station == other.station 

278 and self.location == other.location 

279 and self.channel == other.channel 

280 and (abs(self.deltat - other.deltat) 

281 < (self.deltat + other.deltat)*1e-6) 

282 and abs(self.tmin-other.tmin) < self.deltat*0.01 

283 and abs(self.tmax-other.tmax) < self.deltat*0.01 

284 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

285 

286 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

287 

288 assert self.network == other.network, \ 

289 'network codes differ: %s, %s' % (self.network, other.network) 

290 assert self.station == other.station, \ 

291 'station codes differ: %s, %s' % (self.station, other.station) 

292 assert self.location == other.location, \ 

293 'location codes differ: %s, %s' % (self.location, other.location) 

294 assert self.channel == other.channel, 'channel codes differ' 

295 assert (abs(self.deltat - other.deltat) 

296 < (self.deltat + other.deltat)*1e-6), \ 

297 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

298 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

299 'start times differ: %s, %s' % ( 

300 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

301 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

302 'end times differ: %s, %s' % ( 

303 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

304 

305 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

306 'trace values differ' 

307 

308 def __hash__(self): 

309 return id(self) 

310 

311 def __call__(self, t, clip=False, snap=round): 

312 it = int(snap((t-self.tmin)/self.deltat)) 

313 if clip: 

314 it = max(0, min(it, self.ydata.size-1)) 

315 else: 

316 if it < 0 or self.ydata.size <= it: 

317 raise IndexError() 

318 

319 return self.tmin+it*self.deltat, self.ydata[it] 

320 

321 def interpolate(self, t, clip=False): 

322 ''' 

323 Value of trace between supporting points through linear interpolation. 

324 

325 :param t: time instant 

326 :param clip: whether to clip indices to trace ends 

327 ''' 

328 

329 t0, y0 = self(t, clip=clip, snap=math.floor) 

330 t1, y1 = self(t, clip=clip, snap=math.ceil) 

331 if t0 == t1: 

332 return y0 

333 else: 

334 return y0+(t-t0)/(t1-t0)*(y1-y0) 

335 

336 def index_clip(self, i): 

337 ''' 

338 Clip index to valid range. 

339 ''' 

340 

341 return min(max(0, i), self.ydata.size) 

342 

343 def add(self, other, interpolate=True, left=0., right=0.): 

344 ''' 

345 Add values of other trace (self += other). 

346 

347 Add values of ``other`` trace to the values of ``self``, where it 

348 intersects with ``other``. This method does not change the extent of 

349 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

350 ``other`` to be added are interpolated at sampling instants of 

351 ``self``. Linear interpolation is performed. In this case the sampling 

352 rate of ``other`` must be equal to or lower than that of ``self``. If 

353 ``interpolate`` is ``False``, the sampling rates of the two traces must 

354 match. 

355 ''' 

356 

357 if interpolate: 

358 assert self.deltat <= other.deltat \ 

359 or same_sampling_rate(self, other) 

360 

361 other_xdata = other.get_xdata() 

362 xdata = self.get_xdata() 

363 self.ydata += num.interp( 

364 xdata, other_xdata, other.ydata, left=left, right=left) 

365 else: 

366 assert self.deltat == other.deltat 

367 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

368 ibeg = max(0, ioff) 

369 iend = min(self.data_len(), ioff+other.data_len()) 

370 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

371 

372 def mult(self, other, interpolate=True): 

373 ''' 

374 Muliply with values of other trace ``(self *= other)``. 

375 

376 Multiply values of ``other`` trace to the values of ``self``, where it 

377 intersects with ``other``. This method does not change the extent of 

378 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

379 ``other`` to be multiplied are interpolated at sampling instants of 

380 ``self``. Linear interpolation is performed. In this case the sampling 

381 rate of ``other`` must be equal to or lower than that of ``self``. If 

382 ``interpolate`` is ``False``, the sampling rates of the two traces must 

383 match. 

384 ''' 

385 

386 if interpolate: 

387 assert self.deltat <= other.deltat or \ 

388 same_sampling_rate(self, other) 

389 

390 other_xdata = other.get_xdata() 

391 xdata = self.get_xdata() 

392 self.ydata *= num.interp( 

393 xdata, other_xdata, other.ydata, left=0., right=0.) 

394 else: 

395 assert self.deltat == other.deltat 

396 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

397 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

398 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

399 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

400 

401 ibeg1 = self.index_clip(ibeg1) 

402 iend1 = self.index_clip(iend1) 

403 ibeg2 = self.index_clip(ibeg2) 

404 iend2 = self.index_clip(iend2) 

405 

406 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

407 

408 def max(self): 

409 ''' 

410 Get time and value of data maximum. 

411 ''' 

412 

413 i = num.argmax(self.ydata) 

414 return self.tmin + i*self.deltat, self.ydata[i] 

415 

416 def min(self): 

417 ''' 

418 Get time and value of data minimum. 

419 ''' 

420 

421 i = num.argmin(self.ydata) 

422 return self.tmin + i*self.deltat, self.ydata[i] 

423 

424 def absmax(self): 

425 ''' 

426 Get time and value of maximum of the absolute of data. 

427 ''' 

428 

429 tmi, mi = self.min() 

430 tma, ma = self.max() 

431 if abs(mi) > abs(ma): 

432 return tmi, abs(mi) 

433 else: 

434 return tma, abs(ma) 

435 

436 def set_codes( 

437 self, network=None, station=None, location=None, channel=None, 

438 extra=None): 

439 

440 ''' 

441 Set network, station, location, and channel codes. 

442 ''' 

443 

444 if network is not None: 

445 self.network = network 

446 if station is not None: 

447 self.station = station 

448 if location is not None: 

449 self.location = location 

450 if channel is not None: 

451 self.channel = channel 

452 if extra is not None: 

453 self.extra = extra 

454 

455 self._update_ids() 

456 

457 def set_network(self, network): 

458 self.network = network 

459 self._update_ids() 

460 

461 def set_station(self, station): 

462 self.station = station 

463 self._update_ids() 

464 

465 def set_location(self, location): 

466 self.location = location 

467 self._update_ids() 

468 

469 def set_channel(self, channel): 

470 self.channel = channel 

471 self._update_ids() 

472 

473 def overlaps(self, tmin, tmax): 

474 ''' 

475 Check if trace has overlap with a given time span. 

476 ''' 

477 return not (tmax < self.tmin or self.tmax < tmin) 

478 

479 def is_relevant(self, tmin, tmax, selector=None): 

480 ''' 

481 Check if trace has overlap with a given time span and matches a 

482 condition callback. (internal use) 

483 ''' 

484 

485 return not (tmax <= self.tmin or self.tmax < tmin) \ 

486 and (selector is None or selector(self)) 

487 

488 def _update_ids(self): 

489 ''' 

490 Update dependent ids. 

491 ''' 

492 

493 self.full_id = ( 

494 self.network, self.station, self.location, self.channel, self.tmin) 

495 self.nslc_id = reuse( 

496 (self.network, self.station, self.location, self.channel)) 

497 

498 def prune_from_reuse_cache(self): 

499 util.deuse(self.nslc_id) 

500 util.deuse(self.network) 

501 util.deuse(self.station) 

502 util.deuse(self.location) 

503 util.deuse(self.channel) 

504 

505 def set_mtime(self, mtime): 

506 ''' 

507 Set modification time of the trace. 

508 ''' 

509 

510 self.mtime = mtime 

511 

512 def get_xdata(self): 

513 ''' 

514 Create array for time axis. 

515 ''' 

516 

517 if self.ydata is None: 

518 raise NoData() 

519 

520 return self.tmin \ 

521 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

522 

523 def get_ydata(self): 

524 ''' 

525 Get data array. 

526 ''' 

527 

528 if self.ydata is None: 

529 raise NoData() 

530 

531 return self.ydata 

532 

533 def set_ydata(self, new_ydata): 

534 ''' 

535 Replace data array. 

536 ''' 

537 

538 self.drop_growbuffer() 

539 self.ydata = new_ydata 

540 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

541 

542 def data_len(self): 

543 if self.ydata is not None: 

544 return self.ydata.size 

545 else: 

546 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

547 

548 def drop_data(self): 

549 ''' 

550 Forget data, make dataless trace. 

551 ''' 

552 

553 self.drop_growbuffer() 

554 self.ydata = None 

555 

556 def drop_growbuffer(self): 

557 ''' 

558 Detach the traces grow buffer. 

559 ''' 

560 

561 self._growbuffer = None 

562 self._pchain = None 

563 

564 def copy(self, data=True): 

565 ''' 

566 Make a deep copy of the trace. 

567 ''' 

568 

569 tracecopy = copy.copy(self) 

570 tracecopy.drop_growbuffer() 

571 if data: 

572 tracecopy.ydata = self.ydata.copy() 

573 tracecopy.meta = copy.deepcopy(self.meta) 

574 return tracecopy 

575 

576 def crop_zeros(self): 

577 ''' 

578 Remove any zeros at beginning and end. 

579 ''' 

580 

581 indices = num.where(self.ydata != 0.0)[0] 

582 if indices.size == 0: 

583 raise NoData() 

584 

585 ibeg = indices[0] 

586 iend = indices[-1]+1 

587 if ibeg == 0 and iend == self.ydata.size-1: 

588 return 

589 

590 self.drop_growbuffer() 

591 self.ydata = self.ydata[ibeg:iend].copy() 

592 self.tmin = self.tmin+ibeg*self.deltat 

593 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

594 self._update_ids() 

595 

596 def append(self, data): 

597 ''' 

598 Append data to the end of the trace. 

599 

600 To make this method efficient when successively very few or even single 

601 samples are appended, a larger grow buffer is allocated upon first 

602 invocation. The traces data is then changed to be a view into the 

603 currently filled portion of the grow buffer array. 

604 ''' 

605 

606 assert self.ydata.dtype == data.dtype 

607 newlen = data.size + self.ydata.size 

608 if self._growbuffer is None or self._growbuffer.size < newlen: 

609 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

610 self._growbuffer[:self.ydata.size] = self.ydata 

611 self._growbuffer[self.ydata.size:newlen] = data 

612 self.ydata = self._growbuffer[:newlen] 

613 self.tmax = self.tmin + (newlen-1)*self.deltat 

614 

615 def chop( 

616 self, tmin, tmax, inplace=True, include_last=False, 

617 snap=(round, round), want_incomplete=True): 

618 

619 ''' 

620 Cut the trace to given time span. 

621 

622 If the ``inplace`` argument is True (the default) the trace is cut in 

623 place, otherwise a new trace with the cut part is returned. By 

624 default, the indices where to start and end the trace data array are 

625 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

626 using Python's :py:func:`round` function. This behaviour can be changed 

627 with the ``snap`` argument, which takes a tuple of two functions (one 

628 for the lower and one for the upper end) to be used instead of 

629 :py:func:`round`. The last sample is by default not included unless 

630 ``include_last`` is set to True. If the given time span exceeds the 

631 available time span of the trace, the available part is returned, 

632 unless ``want_incomplete`` is set to False - in that case, a 

633 :py:exc:`NoData` exception is raised. This exception is always raised, 

634 when the requested time span does dot overlap with the trace's time 

635 span. 

636 ''' 

637 

638 if want_incomplete: 

639 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

640 raise NoData() 

641 else: 

642 if tmin < self.tmin or self.tmax < tmax: 

643 raise NoData() 

644 

645 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

646 iplus = 0 

647 if include_last: 

648 iplus = 1 

649 

650 iend = min( 

651 self.data_len(), 

652 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

653 

654 if ibeg >= iend: 

655 raise NoData() 

656 

657 obj = self 

658 if not inplace: 

659 obj = self.copy(data=False) 

660 

661 self.drop_growbuffer() 

662 if self.ydata is not None: 

663 obj.ydata = self.ydata[ibeg:iend].copy() 

664 else: 

665 obj.ydata = None 

666 

667 obj.tmin = obj.tmin+ibeg*obj.deltat 

668 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

669 

670 obj._update_ids() 

671 

672 return obj 

673 

674 def downsample( 

675 self, ndecimate, snap=False, demean=False, ftype='fir-remez', 

676 cut=False): 

677 

678 ''' 

679 Downsample (decimate) trace by a given integer factor. 

680 

681 Antialiasing filter details: 

682 

683 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum 

684 ripple of 0.05 dB in the passband. 

685 * ``fir``: A FIR filter using a Hamming window and 31 taps and a 

686 well-behaved phase response. 

687 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm 

688 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency. 

689 

690 Comparison of the digital filters: 

691 

692 .. figure :: ../../static/downsampling-filter-comparison.png 

693 :width: 60% 

694 :alt: Comparison of the downsampling filters. 

695 

696 See also :py:meth:`Trace.downsample_to`. 

697 

698 :param ndecimate: 

699 Decimation factor, avoid values larger than 8. 

700 :type ndecimate: 

701 int 

702 

703 :param snap: 

704 Whether to put the new sampling instants closest to multiples of 

705 the sampling rate (according to absolute time). 

706 :type snap: 

707 bool 

708 

709 :param demean: 

710 Whether to demean the signal before filtering. 

711 :type demean: 

712 bool 

713 

714 :param ftype: 

715 Which FIR filter to use, choose from ``'iir'``, ``'fir'``, 

716 ``'fir-remez'``. Default is ``'fir-remez'``. 

717 

718 :param cut: 

719 Whether to cut off samples in the beginning of the trace which 

720 are polluted by artifacts of the anti-aliasing filter. 

721 :type cut: 

722 bool 

723 ''' 

724 newdeltat = self.deltat*ndecimate 

725 b, a, n = util.decimate_coeffs(ndecimate, None, ftype) 

726 if snap: 

727 ilag = int(round((math.ceil( 

728 (self.tmin+(n//2 if cut else 0)*self.deltat) / 

729 newdeltat) * newdeltat - self.tmin) / self.deltat)) 

730 else: 

731 ilag = (n//2 if cut else 0) 

732 

733 data = self.ydata.astype(num.float64) 

734 if data.size != 0: 

735 if demean: 

736 data -= num.mean(data) 

737 y = signal.lfilter(b, a, data) 

738 self.ydata = y[n//2+ilag::ndecimate].copy() 

739 else: 

740 self.ydata = data 

741 

742 self.tmin += ilag * self.deltat 

743 self.deltat = reuse(self.deltat*ndecimate) 

744 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

745 self._update_ids() 

746 

747 def downsample_to( 

748 self, deltat, snap=False, allow_upsample_max=1, demean=False, 

749 ftype='fir-remez', cut=False): 

750 

751 ''' 

752 Downsample to given sampling rate. 

753 

754 Tries to downsample the trace to a target sampling interval of 

755 ``deltat``. This runs :py:meth:`downsample` one or several times. If 

756 ``allow_upsample_max`` is set to a value larger than 1, intermediate 

757 upsampling steps are allowed, in order to increase the number of 

758 possible downsampling ratios. 

759 

760 If the requested ratio is not supported, an exception of type 

761 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

762 

763 The downsampled trace will be shorter than the input trace because the 

764 anti-aliasing filter introduces edge effects. If `cut` is selected, 

765 additionally, polluted samples in the beginning of the trace are 

766 removed. The approximate amount of cutoff which will be produced by a 

767 given downsampling configuration can be estimated with 

768 :py:func:`downsample_tpad`. 

769 

770 See also: :meth:`Trace.downsample`. 

771 

772 :param deltat: 

773 Desired sampling interval in [s]. 

774 :type deltat: 

775 float 

776 

777 :param allow_upsample_max: 

778 Maximum allowed upsampling factor of the signal to achieve the 

779 desired sampling interval. Default is ``1``. 

780 :type allow_upsample_max: 

781 int 

782 

783 :param snap: 

784 Whether to put the new sampling instants closest to multiples of 

785 the sampling rate (according to absolute time). 

786 :type snap: 

787 bool 

788 

789 :param demean: 

790 Whether to demean the signal before filtering. 

791 :type demean: 

792 bool 

793 

794 :param ftype: 

795 Which FIR filter to use, choose from ``'iir'``, ``'fir'``, 

796 ``'fir-remez'``. Default is ``'fir-remez'``. 

797 

798 :param cut: 

799 Whether to cut off samples in the beginning of the trace which 

800 are polluted by artifacts of the anti-aliasing filter. 

801 :type demean: 

802 bool 

803 ''' 

804 

805 upsratio, deci_seq = _configure_downsampling( 

806 self.deltat, deltat, allow_upsample_max) 

807 

808 if demean: 

809 self.drop_growbuffer() 

810 self.ydata = self.ydata.astype(num.float64) 

811 self.ydata -= num.mean(self.ydata) 

812 

813 if upsratio > 1: 

814 self.drop_growbuffer() 

815 ydata = self.ydata 

816 self.ydata = num.zeros( 

817 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

818 self.ydata[::upsratio] = ydata 

819 for i in range(1, upsratio): 

820 self.ydata[i::upsratio] = \ 

821 float(i)/upsratio * ydata[:-1] \ 

822 + float(upsratio-i)/upsratio * ydata[1:] 

823 self.deltat = self.deltat/upsratio 

824 

825 for i, ndecimate in enumerate(deci_seq): 

826 self.downsample( 

827 ndecimate, snap=snap, demean=False, ftype=ftype, cut=cut) 

828 

829 def resample(self, deltat): 

830 ''' 

831 Resample to given sampling rate ``deltat``. 

832 

833 Resampling is performed in the frequency domain. 

834 ''' 

835 

836 ndata = self.ydata.size 

837 ntrans = nextpow2(ndata) 

838 fntrans2 = ntrans * self.deltat/deltat 

839 ntrans2 = int(round(fntrans2)) 

840 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

841 ndata2 = int(round(ndata*self.deltat/deltat2)) 

842 if abs(fntrans2 - ntrans2) > 1e-7: 

843 logger.warning( 

844 'resample: requested deltat %g could not be matched exactly: ' 

845 '%g' % (deltat, deltat2)) 

846 

847 data = self.ydata 

848 data_pad = num.zeros(ntrans, dtype=float) 

849 data_pad[:ndata] = data 

850 fdata = num.fft.rfft(data_pad) 

851 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

852 n = min(fdata.size, fdata2.size) 

853 fdata2[:n] = fdata[:n] 

854 data2 = num.fft.irfft(fdata2) 

855 data2 = data2[:ndata2] 

856 data2 *= float(ntrans2) / float(ntrans) 

857 self.deltat = deltat2 

858 self.set_ydata(data2) 

859 

860 def resample_simple(self, deltat): 

861 tyear = 3600*24*365. 

862 

863 if deltat == self.deltat: 

864 return 

865 

866 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

867 logger.warning( 

868 'resample_simple: less than one sample would have to be ' 

869 'inserted/deleted per year. Doing nothing.') 

870 return 

871 

872 ninterval = int(round(deltat / (self.deltat - deltat))) 

873 if abs(ninterval) < 20: 

874 logger.error( 

875 'resample_simple: sample insertion/deletion interval less ' 

876 'than 20. results would be erroneous.') 

877 raise ResamplingFailed() 

878 

879 delete = False 

880 if ninterval < 0: 

881 ninterval = - ninterval 

882 delete = True 

883 

884 tyearbegin = util.year_start(self.tmin) 

885 

886 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

887 

888 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

889 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

890 if nindices > 0: 

891 indices = ibegin + num.arange(nindices) * ninterval 

892 data_split = num.split(self.ydata, indices) 

893 data = [] 

894 for ln, h in zip(data_split[:-1], data_split[1:]): 

895 if delete: 

896 ln = ln[:-1] 

897 

898 data.append(ln) 

899 if not delete: 

900 if ln.size == 0: 

901 v = h[0] 

902 else: 

903 v = 0.5*(ln[-1] + h[0]) 

904 data.append(num.array([v], dtype=ln.dtype)) 

905 

906 data.append(data_split[-1]) 

907 

908 ydata_new = num.concatenate(data) 

909 

910 self.tmin = tyearbegin + nmin * deltat 

911 self.deltat = deltat 

912 self.set_ydata(ydata_new) 

913 

914 def stretch(self, tmin_new, tmax_new): 

915 ''' 

916 Stretch signal while preserving sample rate using sinc interpolation. 

917 

918 :param tmin_new: new time of first sample 

919 :param tmax_new: new time of last sample 

920 

921 This method can be used to correct for a small linear time drift or to 

922 introduce sub-sample time shifts. The amount of stretching is limited 

923 to 10% by the implementation and is expected to be much smaller than 

924 that by the approximations used. 

925 ''' 

926 

927 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

928 t_control = num.array([tmin_new, tmax_new], dtype=float) 

929 

930 r = (tmax_new - tmin_new) / self.deltat + 1.0 

931 n_new = int(round(r)) 

932 if abs(n_new - r) > 0.001: 

933 n_new = int(math.floor(r)) 

934 

935 assert n_new >= 2 

936 

937 tmax_new = tmin_new + (n_new-1) * self.deltat 

938 

939 ydata_new = num.empty(n_new, dtype=float) 

940 signal_ext.antidrift(i_control, t_control, 

941 self.ydata.astype(float), 

942 tmin_new, self.deltat, ydata_new) 

943 

944 self.tmin = tmin_new 

945 self.set_ydata(ydata_new) 

946 self._update_ids() 

947 

948 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

949 raise_exception=False): 

950 

951 ''' 

952 Check if a given frequency is above the Nyquist frequency of the trace. 

953 

954 :param intro: string used to introduce the warning/error message 

955 :param warn: whether to emit a warning 

956 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

957 exception. 

958 ''' 

959 

960 if frequency >= 0.5/self.deltat: 

961 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

962 'frequency (%g Hz). (Trace %s)' \ 

963 % (intro, frequency, 0.5/self.deltat, self.name()) 

964 if warn: 

965 logger.warning(message) 

966 if raise_exception: 

967 raise AboveNyquist(message) 

968 

969 def lowpass(self, order, corner, nyquist_warn=True, 

970 nyquist_exception=False, demean=True): 

971 

972 ''' 

973 Apply Butterworth lowpass to the trace. 

974 

975 :param order: order of the filter 

976 :param corner: corner frequency of the filter 

977 

978 Mean is removed before filtering. 

979 ''' 

980 

981 self.nyquist_check( 

982 corner, 'Corner frequency of lowpass', nyquist_warn, 

983 nyquist_exception) 

984 

985 (b, a) = _get_cached_filter_coeffs( 

986 order, [corner*2.0*self.deltat], btype='low') 

987 

988 if len(a) != order+1 or len(b) != order+1: 

989 logger.warning( 

990 'Erroneous filter coefficients returned by ' 

991 'scipy.signal.butter(). You may need to downsample the ' 

992 'signal before filtering.') 

993 

994 data = self.ydata.astype(num.float64) 

995 if demean: 

996 data -= num.mean(data) 

997 self.drop_growbuffer() 

998 self.ydata = signal.lfilter(b, a, data) 

999 

1000 def highpass(self, order, corner, nyquist_warn=True, 

1001 nyquist_exception=False, demean=True): 

1002 

1003 ''' 

1004 Apply butterworth highpass to the trace. 

1005 

1006 :param order: order of the filter 

1007 :param corner: corner frequency of the filter 

1008 

1009 Mean is removed before filtering. 

1010 ''' 

1011 

1012 self.nyquist_check( 

1013 corner, 'Corner frequency of highpass', nyquist_warn, 

1014 nyquist_exception) 

1015 

1016 (b, a) = _get_cached_filter_coeffs( 

1017 order, [corner*2.0*self.deltat], btype='high') 

1018 

1019 data = self.ydata.astype(num.float64) 

1020 if len(a) != order+1 or len(b) != order+1: 

1021 logger.warning( 

1022 'Erroneous filter coefficients returned by ' 

1023 'scipy.signal.butter(). You may need to downsample the ' 

1024 'signal before filtering.') 

1025 if demean: 

1026 data -= num.mean(data) 

1027 self.drop_growbuffer() 

1028 self.ydata = signal.lfilter(b, a, data) 

1029 

1030 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

1031 ''' 

1032 Apply butterworth bandpass to the trace. 

1033 

1034 :param order: order of the filter 

1035 :param corner_hp: lower corner frequency of the filter 

1036 :param corner_lp: upper corner frequency of the filter 

1037 

1038 Mean is removed before filtering. 

1039 ''' 

1040 

1041 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

1042 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

1043 (b, a) = _get_cached_filter_coeffs( 

1044 order, 

1045 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1046 btype='band') 

1047 data = self.ydata.astype(num.float64) 

1048 if demean: 

1049 data -= num.mean(data) 

1050 self.drop_growbuffer() 

1051 self.ydata = signal.lfilter(b, a, data) 

1052 

1053 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

1054 ''' 

1055 Apply bandstop (attenuates frequencies in band) to the trace. 

1056 

1057 :param order: order of the filter 

1058 :param corner_hp: lower corner frequency of the filter 

1059 :param corner_lp: upper corner frequency of the filter 

1060 

1061 Mean is removed before filtering. 

1062 ''' 

1063 

1064 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

1065 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

1066 (b, a) = _get_cached_filter_coeffs( 

1067 order, 

1068 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

1069 btype='bandstop') 

1070 data = self.ydata.astype(num.float64) 

1071 if demean: 

1072 data -= num.mean(data) 

1073 self.drop_growbuffer() 

1074 self.ydata = signal.lfilter(b, a, data) 

1075 

1076 def envelope(self, inplace=True): 

1077 ''' 

1078 Calculate the envelope of the trace. 

1079 

1080 :param inplace: calculate envelope in place 

1081 

1082 The calculation follows: 

1083 

1084 .. math:: 

1085 

1086 Y' = \\sqrt{Y^2+H(Y)^2} 

1087 

1088 where H is the Hilbert-Transform of the signal Y. 

1089 ''' 

1090 

1091 y = self.ydata.astype(float) 

1092 env = num.abs(hilbert(y)) 

1093 if inplace: 

1094 self.drop_growbuffer() 

1095 self.ydata = env 

1096 else: 

1097 tr = self.copy(data=False) 

1098 tr.ydata = env 

1099 return tr 

1100 

1101 def taper(self, taperer, inplace=True, chop=False): 

1102 ''' 

1103 Apply a :py:class:`Taper` to the trace. 

1104 

1105 :param taperer: instance of :py:class:`Taper` subclass 

1106 :param inplace: apply taper inplace 

1107 :param chop: if ``True``: exclude tapered parts from the resulting 

1108 trace 

1109 ''' 

1110 

1111 if not inplace: 

1112 tr = self.copy() 

1113 else: 

1114 tr = self 

1115 

1116 if chop: 

1117 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

1118 tr.shift(i*tr.deltat) 

1119 tr.set_ydata(tr.ydata[i:i+n]) 

1120 

1121 taperer(tr.ydata, tr.tmin, tr.deltat) 

1122 

1123 if not inplace: 

1124 return tr 

1125 

1126 def whiten(self, order=6): 

1127 ''' 

1128 Whiten signal in time domain using autoregression and recursive filter. 

1129 

1130 :param order: order of the autoregression process 

1131 ''' 

1132 

1133 b, a = self.whitening_coefficients(order) 

1134 self.drop_growbuffer() 

1135 self.ydata = signal.lfilter(b, a, self.ydata) 

1136 

1137 def whitening_coefficients(self, order=6): 

1138 ar = yulewalker(self.ydata, order) 

1139 b, a = [1.] + ar.tolist(), [1.] 

1140 return b, a 

1141 

1142 def ampspec_whiten( 

1143 self, 

1144 width, 

1145 td_taper='auto', 

1146 fd_taper='auto', 

1147 pad_to_pow2=True, 

1148 demean=True): 

1149 

1150 ''' 

1151 Whiten signal via frequency domain using moving average on amplitude 

1152 spectra. 

1153 

1154 :param width: width of smoothing kernel [Hz] 

1155 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1156 ``None`` or ``'auto'``. 

1157 :param fd_taper: frequency domain taper, object of type 

1158 :py:class:`Taper` or ``None`` or ``'auto'``. 

1159 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1160 of 2^n 

1161 :param demean: whether to demean the signal before tapering 

1162 

1163 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1164 the spectrum is calculated and inversely weighted with a smoothed 

1165 version of its amplitude spectrum. A moving average is used for the 

1166 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1167 Finally, the smoothed and tapered spectrum is back-transformed into the 

1168 time domain. 

1169 

1170 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1171 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1172 ''' 

1173 

1174 ndata = self.data_len() 

1175 

1176 if pad_to_pow2: 

1177 ntrans = nextpow2(ndata) 

1178 else: 

1179 ntrans = ndata 

1180 

1181 df = 1./(ntrans*self.deltat) 

1182 nw = int(round(width/df)) 

1183 if ndata//2+1 <= nw: 

1184 raise TraceTooShort( 

1185 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1186 

1187 if td_taper == 'auto': 

1188 td_taper = CosFader(1./width) 

1189 

1190 if fd_taper == 'auto': 

1191 fd_taper = CosFader(width) 

1192 

1193 if td_taper: 

1194 self.taper(td_taper) 

1195 

1196 ydata = self.get_ydata().astype(float) 

1197 if demean: 

1198 ydata -= ydata.mean() 

1199 

1200 spec = num.fft.rfft(ydata, ntrans) 

1201 

1202 amp = num.abs(spec) 

1203 nspec = amp.size 

1204 csamp = num.cumsum(amp) 

1205 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1206 n1, n2 = nw//2, nw//2 + nspec - nw 

1207 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1208 amp_smoothed[:n1] = amp_smoothed[n1] 

1209 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1210 

1211 denom = amp_smoothed * amp 

1212 numer = amp 

1213 eps = num.mean(denom) * 1e-9 

1214 if eps == 0.0: 

1215 eps = 1e-9 

1216 

1217 numer += eps 

1218 denom += eps 

1219 spec *= numer/denom 

1220 

1221 if fd_taper: 

1222 fd_taper(spec, 0., df) 

1223 

1224 ydata = num.fft.irfft(spec) 

1225 self.set_ydata(ydata[:ndata]) 

1226 

1227 def _get_cached_freqs(self, nf, deltaf): 

1228 ck = (nf, deltaf) 

1229 if ck not in Trace.cached_frequencies: 

1230 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1231 nf, dtype=float) 

1232 

1233 return Trace.cached_frequencies[ck] 

1234 

1235 def bandpass_fft(self, corner_hp, corner_lp): 

1236 ''' 

1237 Apply boxcar bandbpass to trace (in spectral domain). 

1238 ''' 

1239 

1240 n = len(self.ydata) 

1241 n2 = nextpow2(n) 

1242 data = num.zeros(n2, dtype=num.float64) 

1243 data[:n] = self.ydata 

1244 fdata = num.fft.rfft(data) 

1245 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1246 fdata[0] = 0.0 

1247 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1248 data = num.fft.irfft(fdata) 

1249 self.drop_growbuffer() 

1250 self.ydata = data[:n] 

1251 

1252 def shift(self, tshift): 

1253 ''' 

1254 Time shift the trace. 

1255 ''' 

1256 

1257 self.tmin += tshift 

1258 self.tmax += tshift 

1259 self._update_ids() 

1260 

1261 def snap(self, inplace=True, interpolate=False): 

1262 ''' 

1263 Shift trace samples to nearest even multiples of the sampling rate. 

1264 

1265 :param inplace: (boolean) snap traces inplace 

1266 

1267 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1268 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1269 :py:func:`snap` returns the unsnapped instance of the original trace. 

1270 ''' 

1271 

1272 tmin = round(self.tmin/self.deltat)*self.deltat 

1273 tmax = tmin + (self.ydata.size-1)*self.deltat 

1274 

1275 if inplace: 

1276 xself = self 

1277 else: 

1278 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1279 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1280 return self 

1281 

1282 xself = self.copy() 

1283 

1284 if interpolate: 

1285 n = xself.data_len() 

1286 ydata_new = num.empty(n, dtype=float) 

1287 i_control = num.array([0, n-1], dtype=num.int64) 

1288 tref = tmin 

1289 t_control = num.array( 

1290 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1291 dtype=float) 

1292 

1293 signal_ext.antidrift(i_control, t_control, 

1294 xself.ydata.astype(float), 

1295 float(tmin-tref), xself.deltat, ydata_new) 

1296 

1297 xself.ydata = ydata_new 

1298 

1299 xself.tmin = tmin 

1300 xself.tmax = tmax 

1301 xself._update_ids() 

1302 

1303 return xself 

1304 

1305 def fix_deltat_rounding_errors(self): 

1306 ''' 

1307 Try to undo sampling rate rounding errors. 

1308 

1309 See :py:func:`fix_deltat_rounding_errors`. 

1310 ''' 

1311 

1312 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1313 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1314 

1315 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1316 ''' 

1317 Run special STA/LTA filter where the short time window is centered on 

1318 the long time window. 

1319 

1320 :param tshort: length of short time window in [s] 

1321 :param tlong: length of long time window in [s] 

1322 :param quad: whether to square the data prior to applying the STA/LTA 

1323 filter 

1324 :param scalingmethod: integer key to select how output values are 

1325 scaled / normalized (``1``, ``2``, or ``3``) 

1326 

1327 ============= ====================================== =========== 

1328 Scalingmethod Implementation Range 

1329 ============= ====================================== =========== 

1330 ``1`` As/Al* Ts/Tl [0,1] 

1331 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1332 ``3`` Like ``2`` but clipping range at zero [0,1] 

1333 ============= ====================================== =========== 

1334 

1335 ''' 

1336 

1337 nshort = int(round(tshort/self.deltat)) 

1338 nlong = int(round(tlong/self.deltat)) 

1339 

1340 assert nshort < nlong 

1341 if nlong > len(self.ydata): 

1342 raise TraceTooShort( 

1343 'Samples in trace: %s, samples needed: %s' 

1344 % (len(self.ydata), nlong)) 

1345 

1346 if quad: 

1347 sqrdata = self.ydata**2 

1348 else: 

1349 sqrdata = self.ydata 

1350 

1351 mavg_short = moving_avg(sqrdata, nshort) 

1352 mavg_long = moving_avg(sqrdata, nlong) 

1353 

1354 self.drop_growbuffer() 

1355 

1356 if scalingmethod not in (1, 2, 3): 

1357 raise Exception('Invalid argument to scalingrange argument.') 

1358 

1359 if scalingmethod == 1: 

1360 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1361 elif scalingmethod in (2, 3): 

1362 self.ydata = (mavg_short/mavg_long - 1.) \ 

1363 / ((float(nlong)/float(nshort)) - 1) 

1364 

1365 if scalingmethod == 3: 

1366 self.ydata = num.maximum(self.ydata, 0.) 

1367 

1368 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1369 ''' 

1370 Run special STA/LTA filter where the short time window is overlapping 

1371 with the last part of the long time window. 

1372 

1373 :param tshort: length of short time window in [s] 

1374 :param tlong: length of long time window in [s] 

1375 :param quad: whether to square the data prior to applying the STA/LTA 

1376 filter 

1377 :param scalingmethod: integer key to select how output values are 

1378 scaled / normalized (``1``, ``2``, or ``3``) 

1379 

1380 ============= ====================================== =========== 

1381 Scalingmethod Implementation Range 

1382 ============= ====================================== =========== 

1383 ``1`` As/Al* Ts/Tl [0,1] 

1384 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1385 ``3`` Like ``2`` but clipping range at zero [0,1] 

1386 ============= ====================================== =========== 

1387 

1388 With ``scalingmethod=1``, the values produced by this variant of the 

1389 STA/LTA are equivalent to 

1390 

1391 .. math:: 

1392 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1393 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1394 

1395 where :math:`f_j` are the input samples, :math:`s` are the number of 

1396 samples in the short time window and :math:`l` are the number of 

1397 samples in the long time window. 

1398 ''' 

1399 

1400 n = self.data_len() 

1401 tmin = self.tmin 

1402 

1403 nshort = max(1, int(round(tshort/self.deltat))) 

1404 nlong = max(1, int(round(tlong/self.deltat))) 

1405 

1406 assert nshort < nlong 

1407 

1408 if nlong > len(self.ydata): 

1409 raise TraceTooShort( 

1410 'Samples in trace: %s, samples needed: %s' 

1411 % (len(self.ydata), nlong)) 

1412 

1413 if quad: 

1414 sqrdata = self.ydata**2 

1415 else: 

1416 sqrdata = self.ydata 

1417 

1418 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1419 if nlong % 2 != 0 and nshort % 2 == 0: 

1420 nshift += 1 

1421 

1422 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1423 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1424 

1425 self.drop_growbuffer() 

1426 

1427 if scalingmethod not in (1, 2, 3): 

1428 raise Exception('Invalid argument to scalingrange argument.') 

1429 

1430 if scalingmethod == 1: 

1431 ydata = mavg_short/mavg_long * nshort/nlong 

1432 elif scalingmethod in (2, 3): 

1433 ydata = (mavg_short/mavg_long - 1.) \ 

1434 / ((float(nlong)/float(nshort)) - 1) 

1435 

1436 if scalingmethod == 3: 

1437 ydata = num.maximum(self.ydata, 0.) 

1438 

1439 self.set_ydata(ydata) 

1440 

1441 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1442 

1443 self.chop( 

1444 tmin + (nlong - nshort) * self.deltat, 

1445 tmin + (n - nshort) * self.deltat) 

1446 

1447 def peaks(self, threshold, tsearch, 

1448 deadtime=False, 

1449 nblock_duration_detection=100): 

1450 

1451 ''' 

1452 Detect peaks above a given threshold (method 1). 

1453 

1454 From every instant, where the signal rises above ``threshold``, a time 

1455 length of ``tsearch`` seconds is searched for a maximum. A list with 

1456 tuples (time, value) for each detected peak is returned. The 

1457 ``deadtime`` argument turns on a special deadtime duration detection 

1458 algorithm useful in combination with recursive STA/LTA filters. 

1459 ''' 

1460 

1461 y = self.ydata 

1462 above = num.where(y > threshold, 1, 0) 

1463 deriv = num.zeros(y.size, dtype=num.int8) 

1464 deriv[1:] = above[1:]-above[:-1] 

1465 itrig_positions = num.nonzero(deriv > 0)[0] 

1466 tpeaks = [] 

1467 apeaks = [] 

1468 tzeros = [] 

1469 tzero = self.tmin 

1470 

1471 for itrig_pos in itrig_positions: 

1472 ibeg = itrig_pos 

1473 iend = min( 

1474 len(self.ydata), 

1475 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1476 ipeak = num.argmax(y[ibeg:iend]) 

1477 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1478 apeak = y[ibeg+ipeak] 

1479 

1480 if tpeak < tzero: 

1481 continue 

1482 

1483 if deadtime: 

1484 ibeg = itrig_pos 

1485 iblock = 0 

1486 nblock = nblock_duration_detection 

1487 totalsum = 0. 

1488 while True: 

1489 if ibeg+iblock*nblock >= len(y): 

1490 tzero = self.tmin + (len(y)-1) * self.deltat 

1491 break 

1492 

1493 logy = num.log( 

1494 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1495 logy[0] += totalsum 

1496 ysum = num.cumsum(logy) 

1497 totalsum = ysum[-1] 

1498 below = num.where(ysum <= 0., 1, 0) 

1499 deriv = num.zeros(ysum.size, dtype=num.int8) 

1500 deriv[1:] = below[1:]-below[:-1] 

1501 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1502 if len(izero_positions) > 0: 

1503 tzero = self.tmin + self.deltat * ( 

1504 ibeg + izero_positions[0]) 

1505 break 

1506 iblock += 1 

1507 else: 

1508 tzero = ibeg*self.deltat + self.tmin + tsearch 

1509 

1510 tpeaks.append(tpeak) 

1511 apeaks.append(apeak) 

1512 tzeros.append(tzero) 

1513 

1514 if deadtime: 

1515 return tpeaks, apeaks, tzeros 

1516 else: 

1517 return tpeaks, apeaks 

1518 

1519 def peaks2(self, threshold, tsearch): 

1520 

1521 ''' 

1522 Detect peaks above a given threshold (method 2). 

1523 

1524 This variant of peak detection is a bit more robust (and slower) than 

1525 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1526 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1527 iteratively the one with the maximum amplitude ``a[j]`` and time 

1528 ``t[j]`` is choosen and potential peaks within 

1529 ``t[j] - tsearch, t[j] + tsearch`` 

1530 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1531 no more potential peaks are left. 

1532 ''' 

1533 

1534 a = num.copy(self.ydata) 

1535 

1536 amin = num.min(a) 

1537 

1538 a[0] = amin 

1539 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1540 a[-1] = amin 

1541 

1542 data = [] 

1543 while True: 

1544 imax = num.argmax(a) 

1545 amax = a[imax] 

1546 

1547 if amax < threshold or amax == amin: 

1548 break 

1549 

1550 data.append((self.tmin + imax * self.deltat, amax)) 

1551 

1552 ntsearch = int(round(tsearch / self.deltat)) 

1553 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1554 

1555 if data: 

1556 data.sort() 

1557 tpeaks, apeaks = list(zip(*data)) 

1558 else: 

1559 tpeaks, apeaks = [], [] 

1560 

1561 return tpeaks, apeaks 

1562 

1563 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1564 ''' 

1565 Extend trace to given span. 

1566 

1567 :param tmin: begin time of new span 

1568 :param tmax: end time of new span 

1569 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1570 ``'median'`` 

1571 ''' 

1572 

1573 nold = self.ydata.size 

1574 

1575 if tmin is not None: 

1576 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1577 else: 

1578 nl = 0 

1579 

1580 if tmax is not None: 

1581 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1582 else: 

1583 nh = nold - 1 

1584 

1585 n = nh - nl + 1 

1586 data = num.zeros(n, dtype=self.ydata.dtype) 

1587 data[-nl:-nl + nold] = self.ydata 

1588 if self.ydata.size >= 1: 

1589 if fillmethod == 'repeat': 

1590 data[:-nl] = self.ydata[0] 

1591 data[-nl + nold:] = self.ydata[-1] 

1592 elif fillmethod == 'median': 

1593 v = num.median(self.ydata) 

1594 data[:-nl] = v 

1595 data[-nl + nold:] = v 

1596 elif fillmethod == 'mean': 

1597 v = num.mean(self.ydata) 

1598 data[:-nl] = v 

1599 data[-nl + nold:] = v 

1600 

1601 self.drop_growbuffer() 

1602 self.ydata = data 

1603 

1604 self.tmin += nl * self.deltat 

1605 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1606 

1607 self._update_ids() 

1608 

1609 def transfer(self, 

1610 tfade=0., 

1611 freqlimits=None, 

1612 transfer_function=None, 

1613 cut_off_fading=True, 

1614 demean=True, 

1615 invert=False): 

1616 

1617 ''' 

1618 Return new trace with transfer function applied (convolution). 

1619 

1620 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1621 at both ends of trace. 

1622 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1623 :param transfer_function: FrequencyResponse object; must provide a 

1624 method 'evaluate(freqs)', which returns the transfer function 

1625 coefficients at the frequencies 'freqs'. 

1626 :param cut_off_fading: whether to cut off rise/fall interval in output 

1627 trace. 

1628 :param demean: remove mean before applying transfer function 

1629 :param invert: set to True to do a deconvolution 

1630 ''' 

1631 

1632 if transfer_function is None: 

1633 transfer_function = g_one_response 

1634 

1635 if self.tmax - self.tmin <= tfade*2.: 

1636 raise TraceTooShort( 

1637 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1638 'trace length = %g, fading length = %g' 

1639 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1640 

1641 if freqlimits is None and ( 

1642 transfer_function is None or transfer_function.is_scalar()): 

1643 

1644 # special case for flat responses 

1645 

1646 output = self.copy() 

1647 data = self.ydata 

1648 ndata = data.size 

1649 

1650 if transfer_function is not None: 

1651 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1652 

1653 if invert: 

1654 c = 1.0/c 

1655 

1656 data *= c 

1657 

1658 if tfade != 0.0: 

1659 data *= costaper( 

1660 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1661 ndata, self.deltat) 

1662 

1663 output.ydata = data 

1664 

1665 else: 

1666 ndata = self.ydata.size 

1667 ntrans = nextpow2(ndata*1.2) 

1668 coeffs = self._get_tapered_coeffs( 

1669 ntrans, freqlimits, transfer_function, invert=invert, 

1670 demean=demean) 

1671 

1672 data = self.ydata 

1673 

1674 data_pad = num.zeros(ntrans, dtype=float) 

1675 data_pad[:ndata] = data 

1676 if demean: 

1677 data_pad[:ndata] -= data.mean() 

1678 

1679 if tfade != 0.0: 

1680 data_pad[:ndata] *= costaper( 

1681 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1682 ndata, self.deltat) 

1683 

1684 fdata = num.fft.rfft(data_pad) 

1685 fdata *= coeffs 

1686 ddata = num.fft.irfft(fdata) 

1687 output = self.copy() 

1688 output.ydata = ddata[:ndata] 

1689 

1690 if cut_off_fading and tfade != 0.0: 

1691 try: 

1692 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1693 except NoData: 

1694 raise TraceTooShort( 

1695 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1696 'trace length = %g, fading length = %g' 

1697 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1698 else: 

1699 output.ydata = output.ydata.copy() 

1700 

1701 return output 

1702 

1703 def differentiate(self, n=1, order=4, inplace=True): 

1704 ''' 

1705 Approximate first or second derivative of the trace. 

1706 

1707 :param n: 1 for first derivative, 2 for second 

1708 :param order: order of the approximation 2 and 4 are supported 

1709 :param inplace: if ``True`` the trace is differentiated in place, 

1710 otherwise a new trace object with the derivative is returned. 

1711 

1712 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1713 

1714 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1715 ''' 

1716 

1717 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1718 

1719 if inplace: 

1720 self.ydata = ddata 

1721 else: 

1722 output = self.copy(data=False) 

1723 output.set_ydata(ddata) 

1724 return output 

1725 

1726 def drop_chain_cache(self): 

1727 if self._pchain: 

1728 self._pchain.clear() 

1729 

1730 def init_chain(self): 

1731 self._pchain = pchain.Chain( 

1732 do_downsample, 

1733 do_extend, 

1734 do_pre_taper, 

1735 do_fft, 

1736 do_filter, 

1737 do_ifft) 

1738 

1739 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1740 if setup.domain == 'frequency_domain': 

1741 _, _, data = self._pchain( 

1742 (self, deltat), 

1743 (tmin, tmax), 

1744 (setup.taper,), 

1745 (setup.filter,), 

1746 (setup.filter,), 

1747 nocache=nocache) 

1748 

1749 return num.abs(data), num.abs(data) 

1750 

1751 else: 

1752 processed = self._pchain( 

1753 (self, deltat), 

1754 (tmin, tmax), 

1755 (setup.taper,), 

1756 (setup.filter,), 

1757 (setup.filter,), 

1758 (), 

1759 nocache=nocache) 

1760 

1761 if setup.domain == 'time_domain': 

1762 data = processed.get_ydata() 

1763 

1764 elif setup.domain == 'envelope': 

1765 processed = processed.envelope(inplace=False) 

1766 

1767 elif setup.domain == 'absolute': 

1768 processed.set_ydata(num.abs(processed.get_ydata())) 

1769 

1770 return processed.get_ydata(), processed 

1771 

1772 def misfit(self, candidate, setup, nocache=False, debug=False): 

1773 ''' 

1774 Calculate misfit and normalization factor against candidate trace. 

1775 

1776 :param candidate: :py:class:`Trace` object 

1777 :param setup: :py:class:`MisfitSetup` object 

1778 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1779 normalization divisor 

1780 

1781 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1782 with the higher sampling rate will be downsampled. 

1783 ''' 

1784 

1785 a = self 

1786 b = candidate 

1787 

1788 for tr in (a, b): 

1789 if not tr._pchain: 

1790 tr.init_chain() 

1791 

1792 deltat = max(a.deltat, b.deltat) 

1793 tmin = min(a.tmin, b.tmin) - deltat 

1794 tmax = max(a.tmax, b.tmax) + deltat 

1795 

1796 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1797 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1798 

1799 if setup.domain != 'cc_max_norm': 

1800 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1801 else: 

1802 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1803 ccmax = ctr.max()[1] 

1804 m = 0.5 - 0.5 * ccmax 

1805 n = 0.5 

1806 

1807 if debug: 

1808 return m, n, aproc, bproc 

1809 else: 

1810 return m, n 

1811 

1812 def spectrum(self, pad_to_pow2=False, tfade=None): 

1813 ''' 

1814 Get FFT spectrum of trace. 

1815 

1816 :param pad_to_pow2: whether to zero-pad the data to next larger 

1817 power-of-two length 

1818 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1819 shaped tapers to both 

1820 

1821 :returns: a tuple with (frequencies, values) 

1822 ''' 

1823 

1824 ndata = self.ydata.size 

1825 

1826 if pad_to_pow2: 

1827 ntrans = nextpow2(ndata) 

1828 else: 

1829 ntrans = ndata 

1830 

1831 if tfade is None: 

1832 ydata = self.ydata 

1833 else: 

1834 ydata = self.ydata * costaper( 

1835 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1836 ndata, self.deltat) 

1837 

1838 fydata = num.fft.rfft(ydata, ntrans) 

1839 df = 1./(ntrans*self.deltat) 

1840 fxdata = num.arange(len(fydata))*df 

1841 return fxdata, fydata 

1842 

1843 def multi_filter(self, filter_freqs, bandwidth): 

1844 

1845 class Gauss(FrequencyResponse): 

1846 f0 = Float.T() 

1847 a = Float.T(default=1.0) 

1848 

1849 def __init__(self, f0, a=1.0, **kwargs): 

1850 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs) 

1851 

1852 def evaluate(self, freqs): 

1853 omega0 = 2.*math.pi*self.f0 

1854 omega = 2.*math.pi*freqs 

1855 return num.exp(-((omega-omega0) 

1856 / (self.a*omega0))**2) 

1857 

1858 freqs, coeffs = self.spectrum() 

1859 n = self.data_len() 

1860 nfilt = len(filter_freqs) 

1861 signal_tf = num.zeros((nfilt, n)) 

1862 centroid_freqs = num.zeros(nfilt) 

1863 for ifilt, f0 in enumerate(filter_freqs): 

1864 taper = Gauss(f0, a=bandwidth) 

1865 weights = taper.evaluate(freqs) 

1866 nhalf = freqs.size 

1867 analytic_spec = num.zeros(n, dtype=complex) 

1868 analytic_spec[:nhalf] = coeffs*weights 

1869 

1870 enorm = num.abs(analytic_spec[:nhalf])**2 

1871 enorm /= num.sum(enorm) 

1872 

1873 if n % 2 == 0: 

1874 analytic_spec[1:nhalf-1] *= 2. 

1875 else: 

1876 analytic_spec[1:nhalf] *= 2. 

1877 

1878 analytic = num.fft.ifft(analytic_spec) 

1879 signal_tf[ifilt, :] = num.abs(analytic) 

1880 

1881 enorm = num.abs(analytic_spec[:nhalf])**2 

1882 enorm /= num.sum(enorm) 

1883 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1884 

1885 return centroid_freqs, signal_tf 

1886 

1887 def _get_tapered_coeffs( 

1888 self, ntrans, freqlimits, transfer_function, invert=False, 

1889 demean=True): 

1890 

1891 cache_key = ( 

1892 ntrans, self.deltat, freqlimits, transfer_function.uuid, invert, 

1893 demean) 

1894 

1895 if cache_key in g_tapered_coeffs_cache: 

1896 return g_tapered_coeffs_cache[cache_key] 

1897 

1898 deltaf = 1./(self.deltat*ntrans) 

1899 nfreqs = ntrans//2 + 1 

1900 transfer = num.ones(nfreqs, dtype=complex) 

1901 hi = snapper(nfreqs, deltaf) 

1902 if freqlimits is not None: 

1903 kmin, kmax = hi(freqlimits[0]), hi(freqlimits[3]) 

1904 freqs = num.arange(kmin, kmax)*deltaf 

1905 coeffs = transfer_function.evaluate(freqs) 

1906 if invert: 

1907 if num.any(coeffs == 0.0): 

1908 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1909 

1910 transfer[kmin:kmax] = 1.0 / coeffs 

1911 else: 

1912 transfer[kmin:kmax] = coeffs 

1913 

1914 tapered_transfer = costaper(*freqlimits, nfreqs, deltaf) * transfer 

1915 else: 

1916 if invert: 

1917 raise Exception( 

1918 'transfer: `freqlimits` must be given when `invert` is ' 

1919 'set to `True`') 

1920 

1921 freqs = num.arange(nfreqs) * deltaf 

1922 tapered_transfer = transfer_function.evaluate(freqs) 

1923 

1924 g_tapered_coeffs_cache[cache_key] = tapered_transfer 

1925 

1926 if demean: 

1927 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1928 

1929 return tapered_transfer 

1930 

1931 def fill_template(self, template, **additional): 

1932 ''' 

1933 Fill string template with trace metadata. 

1934 

1935 Uses normal python '%(placeholder)s' string templates. The following 

1936 placeholders are considered: ``network``, ``station``, ``location``, 

1937 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1938 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1939 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``, 

1940 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with 

1941 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1942 microseconds, those with ``'_year'`` contain only the year. 

1943 ''' 

1944 

1945 template = template.replace('%n', '%(network)s')\ 

1946 .replace('%s', '%(station)s')\ 

1947 .replace('%l', '%(location)s')\ 

1948 .replace('%c', '%(channel)s')\ 

1949 .replace('%b', '%(tmin)s')\ 

1950 .replace('%e', '%(tmax)s')\ 

1951 .replace('%j', '%(julianday)s') 

1952 

1953 params = dict( 

1954 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1955 params['tmin'] = util.time_to_str( 

1956 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1957 params['tmax'] = util.time_to_str( 

1958 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1959 params['tmin_ms'] = util.time_to_str( 

1960 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1961 params['tmax_ms'] = util.time_to_str( 

1962 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1963 params['tmin_us'] = util.time_to_str( 

1964 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1965 params['tmax_us'] = util.time_to_str( 

1966 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1967 params['tmin_year'], params['tmin_month'], params['tmin_day'] \ 

1968 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-') 

1969 params['tmax_year'], params['tmax_month'], params['tmax_day'] \ 

1970 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-') 

1971 params['julianday'] = util.julian_day_of_year(self.tmin) 

1972 params.update(additional) 

1973 return template % params 

1974 

1975 def plot(self): 

1976 ''' 

1977 Show trace with matplotlib. 

1978 

1979 See also: :py:meth:`Trace.snuffle`. 

1980 ''' 

1981 

1982 import pylab 

1983 pylab.plot(self.get_xdata(), self.get_ydata()) 

1984 name = '%s %s %s - %s' % ( 

1985 self.channel, 

1986 self.station, 

1987 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmin)), 

1988 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmax))) 

1989 

1990 pylab.title(name) 

1991 pylab.show() 

1992 

1993 def snuffle(self, **kwargs): 

1994 ''' 

1995 Show trace in a snuffler window. 

1996 

1997 :param stations: list of :py:class:`pyrocko.model.station.Station` 

1998 objects or ``None`` 

1999 :param events: list of :py:class:`pyrocko.model.event.Event` objects or 

2000 ``None`` 

2001 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker` 

2002 objects or ``None`` 

2003 :param ntracks: float, number of tracks to be shown initially (default: 

2004 12) 

2005 :param follow: time interval (in seconds) for real time follow mode or 

2006 ``None`` 

2007 :param controls: bool, whether to show the main controls (default: 

2008 ``True``) 

2009 :param opengl: bool, whether to use opengl (default: ``False``) 

2010 ''' 

2011 

2012 return snuffle([self], **kwargs) 

2013 

2014 

2015def snuffle(traces, **kwargs): 

2016 ''' 

2017 Show traces in a snuffler window. 

2018 

2019 :param stations: list of :py:class:`pyrocko.model.station.Station` objects 

2020 or ``None`` 

2021 :param events: list of :py:class:`pyrocko.model.event.Event` objects or 

2022 ``None`` 

2023 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker` 

2024 objects or ``None`` 

2025 :param ntracks: int, number of tracks to be shown initially (default: 12) 

2026 :param follow: time interval (in seconds) for real time follow mode or 

2027 ``None`` 

2028 :param controls: bool, whether to show the main controls (default: 

2029 ``True``) 

2030 :param opengl: bool, whether to use opengl (default: ``False``) 

2031 ''' 

2032 

2033 from pyrocko import pile 

2034 from pyrocko.gui.snuffler import snuffler 

2035 p = pile.Pile() 

2036 if traces: 

2037 trf = pile.MemTracesFile(None, traces) 

2038 p.add_file(trf) 

2039 return snuffler.snuffle(p, **kwargs) 

2040 

2041 

2042def downsample_tpad( 

2043 deltat_in, deltat_out, allow_upsample_max=1, ftype='fir-remez'): 

2044 ''' 

2045 Get approximate amount of cutoff which will be produced by downsampling. 

2046 

2047 The :py:meth:`Trace.downsample_to` method removes some samples at the 

2048 beginning and end of the trace which is downsampled. This function 

2049 estimates the approximate length [s] which will be cut off for a given set 

2050 of parameters supplied to :py:meth:`Trace.downsample_to`. 

2051 

2052 :param deltat_in: 

2053 Input sampling interval [s]. 

2054 :type deltat_in: 

2055 float 

2056 

2057 :param deltat_out: 

2058 Output samling interval [s]. 

2059 :type deltat_out: 

2060 float 

2061 

2062 :returns: 

2063 Approximate length [s] which will be cut off. 

2064 

2065 See :py:meth:`Trace.downsample_to` for details. 

2066 ''' 

2067 

2068 upsratio, deci_seq = _configure_downsampling( 

2069 deltat_in, deltat_out, allow_upsample_max) 

2070 

2071 tpad = 0.0 

2072 deltat = deltat_in / upsratio 

2073 for deci in deci_seq: 

2074 b, a, n = util.decimate_coeffs(deci, None, ftype) 

2075 # n//2 for the antialiasing 

2076 # +deci for possible snap to multiples 

2077 # +1 for rounding errors 

2078 tpad += (n//2 + deci + 1) * deltat 

2079 deltat = deltat * deci 

2080 

2081 return tpad 

2082 

2083 

2084def _configure_downsampling(deltat_in, deltat_out, allow_upsample_max): 

2085 for upsratio in range(1, allow_upsample_max+1): 

2086 dratio = (upsratio/deltat_in) / (1./deltat_out) 

2087 deci_seq = util.decitab(int(round(dratio))) 

2088 if abs(dratio - round(dratio)) / dratio < 0.0001 and deci_seq: 

2089 return upsratio, [deci for deci in deci_seq if deci != 1] 

2090 

2091 raise util.UnavailableDecimation('ratio = %g' % (deltat_out / deltat_in)) 

2092 

2093 

2094def _all_same(xs): 

2095 return all(x == xs[0] for x in xs) 

2096 

2097 

2098def _incompatibilities(traces): 

2099 if not traces: 

2100 return None 

2101 

2102 params = [ 

2103 (tr.ydata.size, tr.ydata.dtype, tr.deltat, tr.tmin) 

2104 for tr in traces] 

2105 

2106 if not _all_same(params): 

2107 return params 

2108 else: 

2109 return None 

2110 

2111 

2112def _raise_incompatible_traces(params): 

2113 raise IncompatibleTraces( 

2114 'Given traces are incompatible. Sampling rate, start time, ' 

2115 'number of samples and data type must match.\n%s\n%s' % ( 

2116 ' %10s %-10s %12s %22s' % ( 

2117 'nsamples', 'dtype', 'deltat', 'tmin'), 

2118 '\n'.join( 

2119 ' %10i %-10s %12.5e %22s' % ( 

2120 nsamples, dtype, deltat, util.time_to_str(tmin)) 

2121 for (nsamples, dtype, deltat, tmin) in params))) 

2122 

2123 

2124def _ensure_compatible(traces): 

2125 params = _incompatibilities(traces) 

2126 if params: 

2127 _raise_incompatible_traces(params) 

2128 

2129 

2130def _almost_equal(a, b, atol): 

2131 return abs(a-b) < atol 

2132 

2133 

2134def get_traces_data_as_array(traces): 

2135 ''' 

2136 Merge data samples from multiple traces into a 2D array. 

2137 

2138 :param traces: 

2139 Input waveforms. 

2140 :type traces: 

2141 list of :py:class:`pyrocko.Trace <pyrocko.trace.Trace>` objects 

2142 

2143 :raises: 

2144 :py:class:`IncompatibleTraces` if traces have different time 

2145 span, sample rate or data type, or if traces is an empty list. 

2146 

2147 :returns: 

2148 2D array as ``data[itrace, isample]``. 

2149 :rtype: 

2150 :py:class:`numpy.ndarray` 

2151 ''' 

2152 

2153 if not traces: 

2154 raise ValueError('Need at least one trace.') 

2155 

2156 _ensure_compatible(traces) 

2157 

2158 return num.vstack([tr.ydata for tr in traces]) 

2159 

2160 

2161def _ensure_aligned(traces): 

2162 if not traces: 

2163 raise ValueError('No traces given.') 

2164 

2165 eps = 1e-3 

2166 deltats = sorted(set(tr.deltat for tr in traces)) 

2167 if len(deltats) != 1: 

2168 raise UnalignedTraces( 

2169 'Differing sampling intervals: %s' % ', '.join( 

2170 str(deltat) for deltat in deltats)) 

2171 

2172 dtypes = sorted(set(tr.ydata.dtype for tr in traces)) 

2173 if len(dtypes) != 1: 

2174 raise UnalignedTraces( 

2175 'Differing data types: %s' % ', '.join( 

2176 str(dtype) for dtype in dtypes)) 

2177 

2178 deltat = deltats[0] 

2179 tmins = num.array([tr.tmin for tr in traces]) 

2180 toffsets = num.abs(num.round(tmins / deltat) * deltat - tmins) 

2181 is_aligned = toffsets < deltat * eps 

2182 if not all(is_aligned): 

2183 raise UnalignedTraces( 

2184 'Samples of some traces are not aligned: %s' % ( 

2185 ', '.join(str(tr.codes) for tr in [ 

2186 traces[i] for i in num.where( 

2187 num.logical_not(is_aligned))[0]]))) 

2188 

2189 return None 

2190 

2191 

2192def merge_traces_data_as_array(traces, tmin=None, tmax=None): 

2193 from numpy.ma import masked_array 

2194 

2195 if not traces: 

2196 raise ValueError('Need at least one trace.') 

2197 

2198 _ensure_aligned(traces) 

2199 

2200 codes = sorted(set(tr.codes for tr in traces)) 

2201 codes_to_i = dict((codes, i) for (i, codes) in enumerate(codes)) 

2202 

2203 if tmax is None: 

2204 tmax = max(tr.tmax + tr.deltat for tr in traces) 

2205 

2206 if tmin is None: 

2207 tmin = min(tr.tmin for tr in traces) 

2208 

2209 deltat = traces[0].deltat 

2210 

2211 nsamples = int(round((tmax - tmin) / deltat)) 

2212 

2213 data = num.zeros( 

2214 (len(codes), nsamples), 

2215 dtype=traces[0].ydata.dtype) 

2216 

2217 mask = num.ones(data.shape, dtype=bool) 

2218 for tr in traces: 

2219 itmax = nsamples 

2220 itmin_tr = int(round((tr.tmin - tmin) / deltat)) 

2221 itmax_tr = itmin_tr + tr.ydata.size 

2222 itmin_common = max(0, itmin_tr) 

2223 itmax_common = min(itmax, itmax_tr) 

2224 icodes = codes_to_i[tr.codes] 

2225 data[icodes, itmin_common:itmax_common] \ 

2226 = tr.ydata[itmin_common-itmin_tr:itmax_common-itmin_tr] 

2227 mask[icodes, itmin_common:itmax_common] = False 

2228 

2229 return masked_array(data, mask=mask), codes, tmin, deltat 

2230 

2231 

2232def make_traces_compatible( 

2233 traces, 

2234 dtype=None, 

2235 deltat=None, 

2236 enforce_global_snap=True, 

2237 warn_snap=False): 

2238 

2239 ''' 

2240 Homogenize sampling rate, time span, sampling instants, and data type. 

2241 

2242 This function takes a group of traces and tries to make them compatible in 

2243 terms of data type and sampling rate, time span, and sampling instants of 

2244 time. 

2245 

2246 If necessary, traces are (in order): 

2247 

2248 - casted to the same data type. 

2249 - downsampled to a common sampling rate, using decimation cascades. 

2250 - resampled to common sampling instants in time, using Sinc interpolation. 

2251 - cut to the same time span. The longest time span covered by all traces is 

2252 used. 

2253 

2254 :param traces: 

2255 Input waveforms. 

2256 :type traces: 

2257 :py:class:`list` of :py:class:`Trace` 

2258 

2259 :param dtype: 

2260 Force traces to be casted to the given data type. If not specified, the 

2261 traces are cast to :py:class:`float`. 

2262 :type dtype: 

2263 :py:class:`numpy.dtype` 

2264 

2265 :param deltat: 

2266 Sampling interval [s]. If not specified, the longest sampling interval 

2267 among the input traces is chosen. 

2268 :type deltat: 

2269 float 

2270 

2271 :param enforce_global_snap: 

2272 If ``True``, choose sampling instants to be even multiples of the 

2273 sampling rate in system time. When set to ``False`` traces are still 

2274 resampled to common time instants (if necessary), but they may be 

2275 offset to the system time sampling rate multiples. 

2276 :type enforce_global_snap: 

2277 bool 

2278 

2279 :param warn_snap: 

2280 If set to ``True`` warn, when resampling has to be performed. 

2281 :type warn_snap: 

2282 bool 

2283 ''' 

2284 

2285 eps_snap = 1e-3 

2286 

2287 if not traces: 

2288 return [] 

2289 

2290 traces = list(traces) 

2291 

2292 dtypes = [tr.ydata.dtype for tr in traces] 

2293 if not _all_same(dtypes) or dtype is not None: 

2294 

2295 if dtype is None: 

2296 dtype = float 

2297 logger.warning( 

2298 'make_traces_compatible: Inconsistent data types - converting ' 

2299 'sample datatype to %s.' % str(dtype)) 

2300 

2301 for itr, tr in enumerate(traces): 

2302 tr_copy = tr.copy(data=False) 

2303 tr_copy.set_ydata(tr.ydata.astype(dtype)) 

2304 traces[itr] = tr_copy 

2305 

2306 deltats = [tr.deltat for tr in traces] 

2307 if not _all_same(deltats) or deltat is not None: 

2308 if deltat is None: 

2309 deltat = max(deltats) 

2310 logger.warning( 

2311 'make_traces_compatible: Inconsistent sampling rates - ' 

2312 'downsampling to lowest rate among input traces: %g Hz.' 

2313 % (1.0 / deltat)) 

2314 

2315 for itr, tr in enumerate(traces): 

2316 if tr.deltat != deltat: 

2317 tr_copy = tr.copy() 

2318 tr_copy.downsample_to(deltat, snap=True, cut=True) 

2319 traces[itr] = tr_copy 

2320 

2321 tmins = num.array([tr.tmin for tr in traces]) 

2322 is_aligned = num.abs(num.round(tmins / deltat) * deltat - tmins) \ 

2323 > deltat * eps_snap 

2324 

2325 if enforce_global_snap or any(is_aligned): 

2326 tref = util.to_time_float(0.0) 

2327 else: 

2328 # to keep a common subsample shift 

2329 tref = num.max(tmins) 

2330 

2331 tmins_snap = num.round((tmins - tref) / deltat) * deltat + tref 

2332 need_snap = num.abs(tmins_snap - tmins) > deltat * eps_snap 

2333 if num.any(need_snap): 

2334 if warn_snap: 

2335 logger.warning( 

2336 'make_traces_compatible: Misaligned sampling - introducing ' 

2337 'subsample shifts for proper alignment.') 

2338 

2339 for itr, tr in enumerate(traces): 

2340 if need_snap[itr]: 

2341 tr_copy = tr.copy() 

2342 if tref != 0.0: 

2343 tr_copy.shift(-tref) 

2344 

2345 tr_copy.snap(interpolate=True) 

2346 if tref != 0.0: 

2347 tr_copy.shift(tref) 

2348 

2349 traces[itr] = tr_copy 

2350 

2351 tmins = num.array([tr.tmin for tr in traces]) 

2352 nsamples = num.array([tr.ydata.size for tr in traces]) 

2353 tmaxs = tmins + (nsamples - 1) * deltat 

2354 

2355 tmin = num.max(tmins) 

2356 tmax = num.min(tmaxs) 

2357 

2358 if tmin > tmax: 

2359 raise IncompatibleTraces('Traces do not overlap.') 

2360 

2361 nsamples_must = int(round((tmax - tmin) / deltat)) + 1 

2362 for itr, tr in enumerate(traces): 

2363 if not (_almost_equal(tr.tmin, tmin, deltat*eps_snap) 

2364 and _almost_equal(tr.tmax, tmax, deltat*eps_snap)): 

2365 

2366 traces[itr] = tr.chop( 

2367 tmin, tmax, 

2368 inplace=False, 

2369 want_incomplete=False, 

2370 include_last=True) 

2371 

2372 xtr = traces[itr] 

2373 assert _almost_equal(xtr.tmin, tmin, deltat*eps_snap) 

2374 assert int(round((xtr.tmax - xtr.tmin) / deltat)) + 1 == nsamples_must 

2375 xtr.tmin = tmin 

2376 xtr.tmax = tmax 

2377 xtr.deltat = deltat 

2378 xtr._update_ids() 

2379 

2380 return traces 

2381 

2382 

2383class IncompatibleTraces(Exception): 

2384 ''' 

2385 Raised when traces have incompatible sampling rate, time span or data type. 

2386 ''' 

2387 

2388 

2389class UnalignedTraces(Exception): 

2390 ''' 

2391 Raised when traces have incompatible sampling rate, time span or data type. 

2392 ''' 

2393 

2394 

2395class InfiniteResponse(Exception): 

2396 ''' 

2397 This exception is raised by :py:class:`Trace` operations when deconvolution 

2398 of a frequency response (instrument response transfer function) would 

2399 result in a division by zero. 

2400 ''' 

2401 

2402 

2403class MisalignedTraces(Exception): 

2404 ''' 

2405 This exception is raised by some :py:class:`Trace` operations when tmin, 

2406 tmax or number of samples do not match. 

2407 ''' 

2408 

2409 pass 

2410 

2411 

2412class NoData(Exception): 

2413 ''' 

2414 This exception is raised by some :py:class:`Trace` operations when no or 

2415 not enough data is available. 

2416 ''' 

2417 

2418 pass 

2419 

2420 

2421class AboveNyquist(Exception): 

2422 ''' 

2423 This exception is raised by some :py:class:`Trace` operations when given 

2424 frequencies are above the Nyquist frequency. 

2425 ''' 

2426 

2427 pass 

2428 

2429 

2430class TraceTooShort(Exception): 

2431 ''' 

2432 This exception is raised by some :py:class:`Trace` operations when the 

2433 trace is too short. 

2434 ''' 

2435 

2436 pass 

2437 

2438 

2439class ResamplingFailed(Exception): 

2440 pass 

2441 

2442 

2443def minmax(traces, key=None, mode='minmax', outer_mode='minmax'): 

2444 

2445 ''' 

2446 Get data range given traces grouped by selected pattern. 

2447 

2448 :param key: a callable which takes as single argument a trace and returns a 

2449 key for the grouping of the results. If this is ``None``, the default, 

2450 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2451 used. 

2452 :param mode: ``'minmax'`` or floating point number. If this is 

2453 ``'minmax'``, minimum and maximum of the traces are used, if it is a 

2454 number, mean +- standard deviation times ``mode`` is used. 

2455 

2456 param outer_mode: ``'minmax'`` to use mininum and maximum of the 

2457 single-trace ranges, or ``'robust'`` to use the interval to discard 10% 

2458 extreme values on either end. 

2459 

2460 :returns: a dict with the combined data ranges. 

2461 

2462 Examples:: 

2463 

2464 ranges = minmax(traces, lambda tr: tr.channel) 

2465 print ranges['N'] # print min & max of all traces with channel == 'N' 

2466 print ranges['E'] # print min & max of all traces with channel == 'E' 

2467 

2468 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

2469 print ranges['GR', 'HAM3'] # print min & max of all traces with 

2470 # network == 'GR' and station == 'HAM3' 

2471 

2472 ranges = minmax(traces, lambda tr: None) 

2473 print ranges[None] # prints min & max of all traces 

2474 ''' 

2475 

2476 if key is None: 

2477 key = _default_key 

2478 

2479 ranges = defaultdict(list) 

2480 for trace in traces: 

2481 if isinstance(mode, str) and mode == 'minmax': 

2482 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata) 

2483 else: 

2484 mean = trace.ydata.mean() 

2485 std = trace.ydata.std() 

2486 mi, ma = mean-std*mode, mean+std*mode 

2487 

2488 k = key(trace) 

2489 ranges[k].append((mi, ma)) 

2490 

2491 for k in ranges: 

2492 mins, maxs = num.array(ranges[k]).T 

2493 if outer_mode == 'minmax': 

2494 ranges[k] = num.nanmin(mins), num.nanmax(maxs) 

2495 elif outer_mode == 'robust': 

2496 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.) 

2497 

2498 return ranges 

2499 

2500 

2501def minmaxtime(traces, key=None): 

2502 

2503 ''' 

2504 Get time range given traces grouped by selected pattern. 

2505 

2506 :param key: a callable which takes as single argument a trace and returns a 

2507 key for the grouping of the results. If this is ``None``, the default, 

2508 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

2509 used. 

2510 

2511 :returns: a dict with the combined data ranges. 

2512 ''' 

2513 

2514 if key is None: 

2515 key = _default_key 

2516 

2517 ranges = {} 

2518 for trace in traces: 

2519 mi, ma = trace.tmin, trace.tmax 

2520 k = key(trace) 

2521 if k not in ranges: 

2522 ranges[k] = mi, ma 

2523 else: 

2524 tmi, tma = ranges[k] 

2525 ranges[k] = min(tmi, mi), max(tma, ma) 

2526 

2527 return ranges 

2528 

2529 

2530def degapper( 

2531 traces, 

2532 maxgap=5, 

2533 fillmethod='interpolate', 

2534 deoverlap='use_second', 

2535 maxlap=None): 

2536 

2537 ''' 

2538 Try to connect traces and remove gaps. 

2539 

2540 This method will combine adjacent traces, which match in their network, 

2541 station, location and channel attributes. Overlapping parts are handled 

2542 according to the ``deoverlap`` argument. 

2543 

2544 :param traces: input traces, must be sorted by their full_id attribute. 

2545 :param maxgap: maximum number of samples to interpolate. 

2546 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2547 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2548 second trace (default), 'use_first' to use data from first trace, 

2549 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2550 values. 

2551 :param maxlap: maximum number of samples of overlap which are removed 

2552 

2553 :returns: list of traces 

2554 ''' 

2555 

2556 in_traces = traces 

2557 out_traces = [] 

2558 if not in_traces: 

2559 return out_traces 

2560 out_traces.append(in_traces.pop(0)) 

2561 while in_traces: 

2562 

2563 a = out_traces[-1] 

2564 b = in_traces.pop(0) 

2565 

2566 avirt, bvirt = a.ydata is None, b.ydata is None 

2567 assert avirt == bvirt, \ 

2568 'traces given to degapper() must either all have data or have ' \ 

2569 'no data.' 

2570 

2571 virtual = avirt and bvirt 

2572 

2573 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2574 and a.data_len() >= 1 and b.data_len() >= 1 

2575 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2576 

2577 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2578 idist = int(round(dist)) 

2579 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2580 # logger.warning('Cannot degap traces with displaced sampling ' 

2581 # '(%s, %s, %s, %s)' % a.nslc_id) 

2582 pass 

2583 else: 

2584 if 1 < idist <= maxgap: 

2585 if not virtual: 

2586 if fillmethod == 'interpolate': 

2587 filler = a.ydata[-1] + ( 

2588 ((1.0 + num.arange(idist-1, dtype=float)) 

2589 / idist) * (b.ydata[0]-a.ydata[-1]) 

2590 ).astype(a.ydata.dtype) 

2591 elif fillmethod == 'zeros': 

2592 filler = num.zeros(idist-1, dtype=a.ydata.dtype) 

2593 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2594 a.tmax = b.tmax 

2595 if a.mtime and b.mtime: 

2596 a.mtime = max(a.mtime, b.mtime) 

2597 continue 

2598 

2599 elif idist == 1: 

2600 if not virtual: 

2601 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2602 a.tmax = b.tmax 

2603 if a.mtime and b.mtime: 

2604 a.mtime = max(a.mtime, b.mtime) 

2605 continue 

2606 

2607 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2608 if b.tmax > a.tmax: 

2609 if not virtual: 

2610 na = a.ydata.size 

2611 n = -idist+1 

2612 if deoverlap == 'use_second': 

2613 a.ydata = num.concatenate( 

2614 (a.ydata[:-n], b.ydata)) 

2615 elif deoverlap in ('use_first', 'crossfade_cos'): 

2616 a.ydata = num.concatenate( 

2617 (a.ydata, b.ydata[n:])) 

2618 elif deoverlap == 'add': 

2619 a.ydata[-n:] += b.ydata[:n] 

2620 a.ydata = num.concatenate( 

2621 (a.ydata, b.ydata[n:])) 

2622 else: 

2623 assert False, 'unknown deoverlap method' 

2624 

2625 if deoverlap == 'crossfade_cos': 

2626 n = -idist+1 

2627 taper = 0.5-0.5*num.cos( 

2628 (1.+num.arange(n))/(1.+n)*num.pi) 

2629 a.ydata[na-n:na] *= 1.-taper 

2630 a.ydata[na-n:na] += b.ydata[:n] * taper 

2631 

2632 a.tmax = b.tmax 

2633 if a.mtime and b.mtime: 

2634 a.mtime = max(a.mtime, b.mtime) 

2635 continue 

2636 else: 

2637 # make short second trace vanish 

2638 continue 

2639 

2640 if b.data_len() >= 1: 

2641 out_traces.append(b) 

2642 

2643 for tr in out_traces: 

2644 tr._update_ids() 

2645 

2646 return out_traces 

2647 

2648 

2649def rotate(traces, azimuth, in_channels, out_channels): 

2650 ''' 

2651 2D rotation of traces. 

2652 

2653 :param traces: list of input traces 

2654 :param azimuth: difference of the azimuths of the component directions 

2655 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2656 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2657 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2658 :returns: list of rotated traces 

2659 ''' 

2660 

2661 phi = azimuth/180.*math.pi 

2662 cphi = math.cos(phi) 

2663 sphi = math.sin(phi) 

2664 rotated = [] 

2665 in_channels = tuple(_channels_to_names(in_channels)) 

2666 out_channels = tuple(_channels_to_names(out_channels)) 

2667 for a in traces: 

2668 for b in traces: 

2669 if ((a.channel, b.channel) == in_channels and 

2670 a.nslc_id[:3] == b.nslc_id[:3] and 

2671 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2672 tmin = max(a.tmin, b.tmin) 

2673 tmax = min(a.tmax, b.tmax) 

2674 

2675 if tmin < tmax: 

2676 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2677 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2678 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2679 logger.warning( 

2680 'Cannot rotate traces with displaced sampling ' 

2681 '(%s, %s, %s, %s)' % a.nslc_id) 

2682 continue 

2683 

2684 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2685 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2686 ac.set_ydata(acydata) 

2687 bc.set_ydata(bcydata) 

2688 

2689 ac.set_codes(channel=out_channels[0]) 

2690 bc.set_codes(channel=out_channels[1]) 

2691 rotated.append(ac) 

2692 rotated.append(bc) 

2693 

2694 return rotated 

2695 

2696 

2697def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2698 ''' 

2699 Rotate traces from NE to RT system. 

2700 

2701 :param n: 

2702 North trace. 

2703 :type n: 

2704 :py:class:`~pyrocko.trace.Trace` 

2705 

2706 :param e: 

2707 East trace. 

2708 :type e: 

2709 :py:class:`~pyrocko.trace.Trace` 

2710 

2711 :param source: 

2712 Source of the recorded signal. 

2713 :type source: 

2714 :py:class:`pyrocko.gf.seismosizer.Source` 

2715 

2716 :param receiver: 

2717 Receiver of the recorded signal. 

2718 :type receiver: 

2719 :py:class:`pyrocko.model.location.Location` 

2720 

2721 :param out_channels: 

2722 Channel codes of the output channels (radial, transversal). 

2723 Default is ('R', 'T'). 

2724 

2725 :type out_channels 

2726 optional, tuple[str, str] 

2727 

2728 :returns: 

2729 Rotated traces (radial, transversal). 

2730 :rtype: 

2731 tuple[ 

2732 :py:class:`~pyrocko.trace.Trace`, 

2733 :py:class:`~pyrocko.trace.Trace`] 

2734 ''' 

2735 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2736 in_channels = n.channel, e.channel 

2737 out = rotate( 

2738 [n, e], azimuth, 

2739 in_channels=in_channels, 

2740 out_channels=out_channels) 

2741 

2742 assert len(out) == 2 

2743 for tr in out: 

2744 if tr.channel == out_channels[0]: 

2745 r = tr 

2746 elif tr.channel == out_channels[1]: 

2747 t = tr 

2748 else: 

2749 assert False 

2750 

2751 return r, t 

2752 

2753 

2754def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2755 out_channels=('L', 'Q', 'T')): 

2756 ''' 

2757 Rotate traces from ZNE to LQT system. 

2758 

2759 :param traces: list of traces in arbitrary order 

2760 :param backazimuth: backazimuth in degrees clockwise from north 

2761 :param incidence: incidence angle in degrees from vertical 

2762 :param in_channels: input channel names 

2763 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2764 :returns: list of transformed traces 

2765 ''' 

2766 i = incidence/180.*num.pi 

2767 b = backazimuth/180.*num.pi 

2768 

2769 ci = num.cos(i) 

2770 cb = num.cos(b) 

2771 si = num.sin(i) 

2772 sb = num.sin(b) 

2773 

2774 rotmat = num.array( 

2775 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2776 return project(traces, rotmat, in_channels, out_channels) 

2777 

2778 

2779def _decompose(a): 

2780 ''' 

2781 Decompose matrix into independent submatrices. 

2782 ''' 

2783 

2784 def depends(iout, a): 

2785 row = a[iout, :] 

2786 return set(num.arange(row.size).compress(row != 0.0)) 

2787 

2788 def provides(iin, a): 

2789 col = a[:, iin] 

2790 return set(num.arange(col.size).compress(col != 0.0)) 

2791 

2792 a = num.asarray(a) 

2793 outs = set(range(a.shape[0])) 

2794 systems = [] 

2795 while outs: 

2796 iout = outs.pop() 

2797 

2798 gout = set() 

2799 for iin in depends(iout, a): 

2800 gout.update(provides(iin, a)) 

2801 

2802 if not gout: 

2803 continue 

2804 

2805 gin = set() 

2806 for iout2 in gout: 

2807 gin.update(depends(iout2, a)) 

2808 

2809 if not gin: 

2810 continue 

2811 

2812 for iout2 in gout: 

2813 if iout2 in outs: 

2814 outs.remove(iout2) 

2815 

2816 gin = list(gin) 

2817 gin.sort() 

2818 gout = list(gout) 

2819 gout.sort() 

2820 

2821 systems.append((gin, gout, a[gout, :][:, gin])) 

2822 

2823 return systems 

2824 

2825 

2826def _channels_to_names(channels): 

2827 from pyrocko import squirrel 

2828 names = [] 

2829 for ch in channels: 

2830 if isinstance(ch, model.Channel): 

2831 names.append(ch.name) 

2832 elif isinstance(ch, squirrel.Channel): 

2833 names.append(ch.codes.channel) 

2834 else: 

2835 names.append(ch) 

2836 

2837 return names 

2838 

2839 

2840def project(traces, matrix, in_channels, out_channels): 

2841 ''' 

2842 Affine transform of three-component traces. 

2843 

2844 Compute matrix-vector product of three-component traces, to e.g. rotate 

2845 traces into a different basis. The traces are distinguished and ordered by 

2846 their channel attribute. The tranform is applied to overlapping parts of 

2847 any appropriate combinations of the input traces. This should allow this 

2848 function to be robust with data gaps. It also tries to apply the 

2849 tranformation to subsets of the channels, if this is possible, so that, if 

2850 for example a vertical compontent is missing, horizontal components can 

2851 still be rotated. 

2852 

2853 :param traces: list of traces in arbitrary order 

2854 :param matrix: tranformation matrix 

2855 :param in_channels: input channel names 

2856 :param out_channels: output channel names 

2857 :returns: list of transformed traces 

2858 ''' 

2859 

2860 in_channels = tuple(_channels_to_names(in_channels)) 

2861 out_channels = tuple(_channels_to_names(out_channels)) 

2862 systems = _decompose(matrix) 

2863 

2864 # fallback to full matrix if some are not quadratic 

2865 for iins, iouts, submatrix in systems: 

2866 if submatrix.shape[0] != submatrix.shape[1]: 

2867 if len(in_channels) != 3 or len(out_channels) != 3: 

2868 return [] 

2869 else: 

2870 return _project3(traces, matrix, in_channels, out_channels) 

2871 

2872 projected = [] 

2873 for iins, iouts, submatrix in systems: 

2874 in_cha = tuple([in_channels[iin] for iin in iins]) 

2875 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2876 if submatrix.shape[0] == 1: 

2877 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2878 elif submatrix.shape[1] == 2: 

2879 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2880 else: 

2881 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2882 

2883 return projected 

2884 

2885 

2886def project_dependencies(matrix, in_channels, out_channels): 

2887 ''' 

2888 Figure out what dependencies project() would produce. 

2889 ''' 

2890 

2891 in_channels = tuple(_channels_to_names(in_channels)) 

2892 out_channels = tuple(_channels_to_names(out_channels)) 

2893 systems = _decompose(matrix) 

2894 

2895 subpro = [] 

2896 for iins, iouts, submatrix in systems: 

2897 if submatrix.shape[0] != submatrix.shape[1]: 

2898 subpro.append((matrix, in_channels, out_channels)) 

2899 

2900 if not subpro: 

2901 for iins, iouts, submatrix in systems: 

2902 in_cha = tuple([in_channels[iin] for iin in iins]) 

2903 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2904 subpro.append((submatrix, in_cha, out_cha)) 

2905 

2906 deps = {} 

2907 for mat, in_cha, out_cha in subpro: 

2908 for oc in out_cha: 

2909 if oc not in deps: 

2910 deps[oc] = [] 

2911 

2912 for ic in in_cha: 

2913 deps[oc].append(ic) 

2914 

2915 return deps 

2916 

2917 

2918def _project1(traces, matrix, in_channels, out_channels): 

2919 assert len(in_channels) == 1 

2920 assert len(out_channels) == 1 

2921 assert matrix.shape == (1, 1) 

2922 

2923 projected = [] 

2924 for a in traces: 

2925 if not (a.channel,) == in_channels: 

2926 continue 

2927 

2928 ac = a.copy() 

2929 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2930 ac.set_codes(channel=out_channels[0]) 

2931 projected.append(ac) 

2932 

2933 return projected 

2934 

2935 

2936def _project2(traces, matrix, in_channels, out_channels): 

2937 assert len(in_channels) == 2 

2938 assert len(out_channels) == 2 

2939 assert matrix.shape == (2, 2) 

2940 projected = [] 

2941 for a in traces: 

2942 for b in traces: 

2943 if not ((a.channel, b.channel) == in_channels and 

2944 a.nslc_id[:3] == b.nslc_id[:3] and 

2945 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2946 continue 

2947 

2948 tmin = max(a.tmin, b.tmin) 

2949 tmax = min(a.tmax, b.tmax) 

2950 

2951 if tmin > tmax: 

2952 continue 

2953 

2954 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2955 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2956 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2957 logger.warning( 

2958 'Cannot project traces with displaced sampling ' 

2959 '(%s, %s, %s, %s)' % a.nslc_id) 

2960 continue 

2961 

2962 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2963 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2964 

2965 ac.set_ydata(acydata) 

2966 bc.set_ydata(bcydata) 

2967 

2968 ac.set_codes(channel=out_channels[0]) 

2969 bc.set_codes(channel=out_channels[1]) 

2970 

2971 projected.append(ac) 

2972 projected.append(bc) 

2973 

2974 return projected 

2975 

2976 

2977def _project3(traces, matrix, in_channels, out_channels): 

2978 assert len(in_channels) == 3 

2979 assert len(out_channels) == 3 

2980 assert matrix.shape == (3, 3) 

2981 projected = [] 

2982 for a in traces: 

2983 for b in traces: 

2984 for c in traces: 

2985 if not ((a.channel, b.channel, c.channel) == in_channels 

2986 and a.nslc_id[:3] == b.nslc_id[:3] 

2987 and b.nslc_id[:3] == c.nslc_id[:3] 

2988 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2989 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2990 

2991 continue 

2992 

2993 tmin = max(a.tmin, b.tmin, c.tmin) 

2994 tmax = min(a.tmax, b.tmax, c.tmax) 

2995 

2996 if tmin >= tmax: 

2997 continue 

2998 

2999 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

3000 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

3001 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

3002 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

3003 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

3004 

3005 logger.warning( 

3006 'Cannot project traces with displaced sampling ' 

3007 '(%s, %s, %s, %s)' % a.nslc_id) 

3008 continue 

3009 

3010 acydata = num.dot( 

3011 matrix[0], 

3012 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

3013 bcydata = num.dot( 

3014 matrix[1], 

3015 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

3016 ccydata = num.dot( 

3017 matrix[2], 

3018 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

3019 

3020 ac.set_ydata(acydata) 

3021 bc.set_ydata(bcydata) 

3022 cc.set_ydata(ccydata) 

3023 

3024 ac.set_codes(channel=out_channels[0]) 

3025 bc.set_codes(channel=out_channels[1]) 

3026 cc.set_codes(channel=out_channels[2]) 

3027 

3028 projected.append(ac) 

3029 projected.append(bc) 

3030 projected.append(cc) 

3031 

3032 return projected 

3033 

3034 

3035def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

3036 ''' 

3037 Cross correlation of two traces. 

3038 

3039 :param a,b: input traces 

3040 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

3041 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

3042 :param use_fft: bool, whether to do cross correlation in spectral domain 

3043 

3044 :returns: trace containing cross correlation coefficients 

3045 

3046 This function computes the cross correlation between two traces. It 

3047 evaluates the discrete equivalent of 

3048 

3049 .. math:: 

3050 

3051 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

3052 

3053 where the star denotes complex conjugate. Note, that the arguments here are 

3054 swapped when compared with the :py:func:`numpy.correlate` function, 

3055 which is internally called. This function should be safe even with older 

3056 versions of NumPy, where the correlate function has some problems. 

3057 

3058 A trace containing the cross correlation coefficients is returned. The time 

3059 information of the output trace is set so that the returned cross 

3060 correlation can be viewed directly as a function of time lag. 

3061 

3062 Example:: 

3063 

3064 # align two traces a and b containing a time shifted similar signal: 

3065 c = pyrocko.trace.correlate(a,b) 

3066 t, coef = c.max() # get time and value of maximum 

3067 b.shift(-t) # align b with a 

3068 

3069 ''' 

3070 

3071 assert_same_sampling_rate(a, b) 

3072 

3073 ya, yb = a.ydata, b.ydata 

3074 

3075 # need reversed order here: 

3076 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

3077 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

3078 

3079 if normalization == 'normal': 

3080 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

3081 yc = yc/normfac 

3082 

3083 elif normalization == 'gliding': 

3084 if mode != 'valid': 

3085 assert False, 'gliding normalization currently only available ' \ 

3086 'with "valid" mode.' 

3087 

3088 if ya.size < yb.size: 

3089 yshort, ylong = ya, yb 

3090 else: 

3091 yshort, ylong = yb, ya 

3092 

3093 epsilon = 0.00001 

3094 normfac_short = num.sqrt(num.sum(yshort**2)) 

3095 normfac = normfac_short * num.sqrt( 

3096 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

3097 + normfac_short*epsilon 

3098 

3099 if yb.size <= ya.size: 

3100 normfac = normfac[::-1] 

3101 

3102 yc /= normfac 

3103 

3104 c = a.copy() 

3105 c.set_ydata(yc) 

3106 c.set_codes(*merge_codes(a, b, '~')) 

3107 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

3108 

3109 return c 

3110 

3111 

3112def deconvolve( 

3113 a, b, waterlevel, 

3114 tshift=0., 

3115 pad=0.5, 

3116 fd_taper=None, 

3117 pad_to_pow2=True): 

3118 

3119 same_sampling_rate(a, b) 

3120 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

3121 deltat = a.deltat 

3122 npad = int(round(a.data_len()*pad + tshift / deltat)) 

3123 

3124 ndata = max(a.data_len(), b.data_len()) 

3125 ndata_pad = ndata + npad 

3126 

3127 if pad_to_pow2: 

3128 ntrans = nextpow2(ndata_pad) 

3129 else: 

3130 ntrans = ndata 

3131 

3132 aspec = num.fft.rfft(a.ydata, ntrans) 

3133 bspec = num.fft.rfft(b.ydata, ntrans) 

3134 

3135 out = aspec * num.conj(bspec) 

3136 

3137 bautocorr = bspec*num.conj(bspec) 

3138 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

3139 

3140 out /= denom 

3141 df = 1/(ntrans*deltat) 

3142 

3143 if fd_taper is not None: 

3144 fd_taper(out, 0.0, df) 

3145 

3146 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

3147 c = a.copy(data=False) 

3148 c.set_ydata(ydata[:ndata]) 

3149 c.set_codes(*merge_codes(a, b, '/')) 

3150 return c 

3151 

3152 

3153def assert_same_sampling_rate(a, b, eps=1.0e-6): 

3154 assert same_sampling_rate(a, b, eps), \ 

3155 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

3156 

3157 

3158def same_sampling_rate(a, b, eps=1.0e-6): 

3159 ''' 

3160 Check if two traces have the same sampling rate. 

3161 

3162 :param a,b: input traces 

3163 :param eps: relative tolerance 

3164 ''' 

3165 

3166 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

3167 

3168 

3169def fix_deltat_rounding_errors(deltat): 

3170 ''' 

3171 Try to undo sampling rate rounding errors. 

3172 

3173 Fix rounding errors of sampling intervals when these are read from single 

3174 precision floating point values. 

3175 

3176 Assumes that the true sampling rate or sampling interval was an integer 

3177 value. No correction will be applied if this would change the sampling 

3178 rate by more than 0.001%. 

3179 ''' 

3180 

3181 if deltat <= 1.0: 

3182 deltat_new = 1.0 / round(1.0 / deltat) 

3183 else: 

3184 deltat_new = round(deltat) 

3185 

3186 if abs(deltat_new - deltat) / deltat > 1e-5: 

3187 deltat_new = deltat 

3188 

3189 return deltat_new 

3190 

3191 

3192def merge_codes(a, b, sep='-'): 

3193 ''' 

3194 Merge network-station-location-channel codes of a pair of traces. 

3195 ''' 

3196 

3197 o = [] 

3198 for xa, xb in zip(a.nslc_id, b.nslc_id): 

3199 if xa == xb: 

3200 o.append(xa) 

3201 else: 

3202 o.append(sep.join((xa, xb))) 

3203 return o 

3204 

3205 

3206class Taper(Object): 

3207 ''' 

3208 Base class for tapers. 

3209 

3210 Does nothing by default. 

3211 ''' 

3212 

3213 def __call__(self, y, x0, dx): 

3214 pass 

3215 

3216 

3217class CosTaper(Taper): 

3218 ''' 

3219 Cosine Taper. 

3220 

3221 :param a: start of fading in 

3222 :param b: end of fading in 

3223 :param c: start of fading out 

3224 :param d: end of fading out 

3225 ''' 

3226 

3227 a = Float.T() 

3228 b = Float.T() 

3229 c = Float.T() 

3230 d = Float.T() 

3231 

3232 def __init__(self, a, b, c, d): 

3233 Taper.__init__(self, a=a, b=b, c=c, d=d) 

3234 

3235 def __call__(self, y, x0, dx): 

3236 

3237 if y.dtype == num.dtype(float): 

3238 _apply_costaper = signal_ext.apply_costaper 

3239 else: 

3240 _apply_costaper = apply_costaper 

3241 

3242 _apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

3243 

3244 def span(self, y, x0, dx): 

3245 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

3246 

3247 def time_span(self): 

3248 return self.a, self.d 

3249 

3250 

3251class CosFader(Taper): 

3252 ''' 

3253 Cosine Fader. 

3254 

3255 :param xfade: fade in and fade out time in seconds (optional) 

3256 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

3257 

3258 Only one argument can be set. The other should to be ``None``. 

3259 ''' 

3260 

3261 xfade = Float.T(optional=True) 

3262 xfrac = Float.T(optional=True) 

3263 

3264 def __init__(self, xfade=None, xfrac=None): 

3265 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

3266 assert (xfade is None) != (xfrac is None) 

3267 self._xfade = xfade 

3268 self._xfrac = xfrac 

3269 

3270 def __call__(self, y, x0, dx): 

3271 

3272 xfade = self._xfade 

3273 

3274 xlen = (y.size - 1)*dx 

3275 if xfade is None: 

3276 xfade = xlen * self._xfrac 

3277 

3278 a = x0 

3279 b = x0 + xfade 

3280 c = x0 + xlen - xfade 

3281 d = x0 + xlen 

3282 

3283 apply_costaper(a, b, c, d, y, x0, dx) 

3284 

3285 def span(self, y, x0, dx): 

3286 return 0, y.size 

3287 

3288 def time_span(self): 

3289 return None, None 

3290 

3291 

3292def none_min(li): 

3293 if None in li: 

3294 return None 

3295 else: 

3296 return min(x for x in li if x is not None) 

3297 

3298 

3299def none_max(li): 

3300 if None in li: 

3301 return None 

3302 else: 

3303 return max(x for x in li if x is not None) 

3304 

3305 

3306class MultiplyTaper(Taper): 

3307 ''' 

3308 Multiplication of several tapers. 

3309 ''' 

3310 

3311 tapers = List.T(Taper.T()) 

3312 

3313 def __init__(self, tapers=None): 

3314 if tapers is None: 

3315 tapers = [] 

3316 

3317 Taper.__init__(self, tapers=tapers) 

3318 

3319 def __call__(self, y, x0, dx): 

3320 for taper in self.tapers: 

3321 taper(y, x0, dx) 

3322 

3323 def span(self, y, x0, dx): 

3324 spans = [] 

3325 for taper in self.tapers: 

3326 spans.append(taper.span(y, x0, dx)) 

3327 

3328 mins, maxs = list(zip(*spans)) 

3329 return min(mins), max(maxs) 

3330 

3331 def time_span(self): 

3332 spans = [] 

3333 for taper in self.tapers: 

3334 spans.append(taper.time_span()) 

3335 

3336 mins, maxs = list(zip(*spans)) 

3337 return none_min(mins), none_max(maxs) 

3338 

3339 

3340class GaussTaper(Taper): 

3341 ''' 

3342 Frequency domain Gaussian filter. 

3343 ''' 

3344 

3345 alpha = Float.T() 

3346 

3347 def __init__(self, alpha): 

3348 Taper.__init__(self, alpha=alpha) 

3349 self._alpha = alpha 

3350 

3351 def __call__(self, y, x0, dx): 

3352 f = x0 + num.arange(y.size)*dx 

3353 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

3354 

3355 

3356cached_coefficients = {} 

3357 

3358 

3359def _get_cached_filter_coeffs(order, corners, btype): 

3360 ck = (order, tuple(corners), btype) 

3361 if ck not in cached_coefficients: 

3362 if len(corners) == 1: 

3363 corners = corners[0] 

3364 

3365 cached_coefficients[ck] = signal.butter( 

3366 order, corners, btype=btype) 

3367 

3368 return cached_coefficients[ck] 

3369 

3370 

3371class _globals(object): 

3372 _numpy_has_correlate_flip_bug = None 

3373 

3374 

3375def _default_key(tr): 

3376 return (tr.network, tr.station, tr.location, tr.channel) 

3377 

3378 

3379def numpy_has_correlate_flip_bug(): 

3380 ''' 

3381 Check if NumPy's correlate function reveals old behaviour. 

3382 ''' 

3383 

3384 if _globals._numpy_has_correlate_flip_bug is None: 

3385 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

3386 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

3387 ab = num.correlate(a, b, mode='same') 

3388 ba = num.correlate(b, a, mode='same') 

3389 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

3390 

3391 return _globals._numpy_has_correlate_flip_bug 

3392 

3393 

3394def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

3395 ''' 

3396 Call :py:func:`numpy.correlate` with fixes. 

3397 

3398 c[k] = sum_i a[i+k] * conj(b[i]) 

3399 

3400 Note that the result produced by newer numpy.correlate is always flipped 

3401 with respect to the formula given in its documentation (if ascending k 

3402 assumed for the output). 

3403 ''' 

3404 

3405 if use_fft: 

3406 if a.size < b.size: 

3407 c = signal.fftconvolve(b[::-1], a, mode=mode) 

3408 else: 

3409 c = signal.fftconvolve(a, b[::-1], mode=mode) 

3410 return c 

3411 

3412 else: 

3413 buggy = numpy_has_correlate_flip_bug() 

3414 

3415 a = num.asarray(a) 

3416 b = num.asarray(b) 

3417 

3418 if buggy: 

3419 b = num.conj(b) 

3420 

3421 c = num.correlate(a, b, mode=mode) 

3422 

3423 if buggy and a.size < b.size: 

3424 return c[::-1] 

3425 else: 

3426 return c 

3427 

3428 

3429def numpy_correlate_emulate(a, b, mode='valid'): 

3430 ''' 

3431 Slow version of :py:func:`numpy.correlate` for comparison. 

3432 ''' 

3433 

3434 a = num.asarray(a) 

3435 b = num.asarray(b) 

3436 kmin = -(b.size-1) 

3437 klen = a.size-kmin 

3438 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3439 kmin = int(kmin) 

3440 kmax = int(kmax) 

3441 klen = kmax - kmin + 1 

3442 c = num.zeros(klen, dtype=num.promote_types(b.dtype, a.dtype)) 

3443 for k in range(kmin, kmin+klen): 

3444 imin = max(0, -k) 

3445 ilen = min(b.size, a.size-k) - imin 

3446 c[k-kmin] = num.sum( 

3447 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3448 

3449 return c 

3450 

3451 

3452def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3453 ''' 

3454 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3455 ''' 

3456 

3457 a = num.asarray(a) 

3458 b = num.asarray(b) 

3459 

3460 kmin = -(b.size-1) 

3461 if mode == 'full': 

3462 klen = a.size-kmin 

3463 elif mode == 'same': 

3464 klen = max(a.size, b.size) 

3465 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3466 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3467 elif mode == 'valid': 

3468 klen = abs(a.size - b.size) + 1 

3469 kmin += min(a.size, b.size) - 1 

3470 

3471 return kmin, kmin + klen - 1 

3472 

3473 

3474def autocorr(x, nshifts): 

3475 ''' 

3476 Compute biased estimate of the first autocorrelation coefficients. 

3477 

3478 :param x: input array 

3479 :param nshifts: number of coefficients to calculate 

3480 ''' 

3481 

3482 mean = num.mean(x) 

3483 std = num.std(x) 

3484 n = x.size 

3485 xdm = x - mean 

3486 r = num.zeros(nshifts) 

3487 for k in range(nshifts): 

3488 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3489 

3490 return r 

3491 

3492 

3493def yulewalker(x, order): 

3494 ''' 

3495 Compute autoregression coefficients using Yule-Walker method. 

3496 

3497 :param x: input array 

3498 :param order: number of coefficients to produce 

3499 

3500 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3501 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3502 recursion which is normally used. 

3503 ''' 

3504 

3505 gamma = autocorr(x, order+1) 

3506 d = gamma[1:1+order] 

3507 a = num.zeros((order, order)) 

3508 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3509 for i in range(order): 

3510 ioff = order-i 

3511 a[i, :] = gamma2[ioff:ioff+order] 

3512 

3513 return num.dot(num.linalg.inv(a), -d) 

3514 

3515 

3516def moving_avg(x, n): 

3517 n = int(n) 

3518 cx = x.cumsum() 

3519 nn = len(x) 

3520 y = num.zeros(nn, dtype=cx.dtype) 

3521 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3522 y[:n//2] = y[n//2] 

3523 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3524 return y 

3525 

3526 

3527def moving_sum(x, n, mode='valid'): 

3528 n = int(n) 

3529 cx = x.cumsum() 

3530 nn = len(x) 

3531 

3532 if mode == 'valid': 

3533 if nn-n+1 <= 0: 

3534 return num.zeros(0, dtype=cx.dtype) 

3535 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3536 y[0] = cx[n-1] 

3537 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3538 

3539 if mode == 'full': 

3540 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3541 if n <= nn: 

3542 y[0:n] = cx[0:n] 

3543 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3544 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3545 else: 

3546 y[0:nn] = cx[0:nn] 

3547 y[nn:n] = cx[nn-1] 

3548 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3549 

3550 if mode == 'same': 

3551 n1 = (n-1)//2 

3552 y = num.zeros(nn, dtype=cx.dtype) 

3553 if n <= nn: 

3554 y[0:n-n1] = cx[n1:n] 

3555 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3556 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3557 else: 

3558 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3559 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3560 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3561 

3562 return y 

3563 

3564 

3565def nextpow2(i): 

3566 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3567 

3568 

3569def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3570 def snap(x): 

3571 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3572 return snap 

3573 

3574 

3575def snapper(nmax, delta, snapfun=math.ceil): 

3576 def snap(x): 

3577 return max(0, min(int(snapfun(x/delta)), nmax)) 

3578 return snap 

3579 

3580 

3581def apply_costaper(a, b, c, d, y, x0, dx): 

3582 abcd = num.array((a, b, c, d), dtype=float) 

3583 ja, jb, jc, jd = num.clip(num.ceil((abcd-x0)/dx).astype(int), 0, y.size) 

3584 y[:ja] = 0. 

3585 y[ja:jb] *= 0.5 \ 

3586 - 0.5*num.cos((dx*num.arange(ja, jb)-(a-x0))/(b-a)*num.pi) 

3587 y[jc:jd] *= 0.5 \ 

3588 + 0.5*num.cos((dx*num.arange(jc, jd)-(c-x0))/(d-c)*num.pi) 

3589 y[jd:] = 0. 

3590 

3591 

3592def span_costaper(a, b, c, d, y, x0, dx): 

3593 hi = snapper_w_offset(y.size, x0, dx) 

3594 return hi(a), hi(d) - hi(a) 

3595 

3596 

3597def costaper(a, b, c, d, nfreqs, deltaf): 

3598 hi = snapper(nfreqs, deltaf) 

3599 tap = num.zeros(nfreqs) 

3600 tap[hi(a):hi(b)] = 0.5 \ 

3601 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3602 tap[hi(b):hi(c)] = 1. 

3603 tap[hi(c):hi(d)] = 0.5 \ 

3604 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3605 

3606 return tap 

3607 

3608 

3609def t2ind(t, tdelta, snap=round): 

3610 return int(snap(t/tdelta)) 

3611 

3612 

3613def hilbert(x, N=None): 

3614 ''' 

3615 Return the hilbert transform of x of length N. 

3616 

3617 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3618 ''' 

3619 

3620 x = num.asarray(x) 

3621 if N is None: 

3622 N = len(x) 

3623 if N <= 0: 

3624 raise ValueError('N must be positive.') 

3625 if num.iscomplexobj(x): 

3626 logger.warning('imaginary part of x ignored.') 

3627 x = num.real(x) 

3628 

3629 Xf = num.fft.fft(x, N, axis=0) 

3630 h = num.zeros(N) 

3631 if N % 2 == 0: 

3632 h[0] = h[N//2] = 1 

3633 h[1:N//2] = 2 

3634 else: 

3635 h[0] = 1 

3636 h[1:(N+1)//2] = 2 

3637 

3638 if len(x.shape) > 1: 

3639 h = h[:, num.newaxis] 

3640 x = num.fft.ifft(Xf*h) 

3641 return x 

3642 

3643 

3644def near(a, b, eps): 

3645 return abs(a-b) < eps 

3646 

3647 

3648def coroutine(func): 

3649 def wrapper(*args, **kwargs): 

3650 gen = func(*args, **kwargs) 

3651 next(gen) 

3652 return gen 

3653 

3654 wrapper.__name__ = func.__name__ 

3655 wrapper.__dict__ = func.__dict__ 

3656 wrapper.__doc__ = func.__doc__ 

3657 return wrapper 

3658 

3659 

3660class States(object): 

3661 ''' 

3662 Utility to store channel-specific state in coroutines. 

3663 ''' 

3664 

3665 def __init__(self): 

3666 self._states = {} 

3667 

3668 def get(self, tr): 

3669 k = tr.nslc_id 

3670 if k in self._states: 

3671 tmin, deltat, dtype, value = self._states[k] 

3672 if (near(tmin, tr.tmin, deltat/100.) 

3673 and near(deltat, tr.deltat, deltat/10000.) 

3674 and dtype == tr.ydata.dtype): 

3675 

3676 return value 

3677 

3678 return None 

3679 

3680 def set(self, tr, value): 

3681 k = tr.nslc_id 

3682 if k in self._states and self._states[k][-1] is not value: 

3683 self.free(self._states[k][-1]) 

3684 

3685 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3686 

3687 def free(self, value): 

3688 pass 

3689 

3690 

3691@coroutine 

3692def co_list_append(list): 

3693 while True: 

3694 list.append((yield)) 

3695 

3696 

3697class ScipyBug(Exception): 

3698 pass 

3699 

3700 

3701@coroutine 

3702def co_lfilter(target, b, a): 

3703 ''' 

3704 Successively filter broken continuous trace data (coroutine). 

3705 

3706 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3707 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3708 objects containing the filtered data to target. This is useful, if one 

3709 wants to filter a long continuous time series, which is split into many 

3710 successive traces without producing filter artifacts at trace boundaries. 

3711 

3712 Filter states are kept *per channel*, specifically, for each (network, 

3713 station, location, channel) combination occuring in the input traces, a 

3714 separate state is created and maintained. This makes it possible to filter 

3715 multichannel or multistation data with only one :py:func:`co_lfilter` 

3716 instance. 

3717 

3718 Filter state is reset, when gaps occur. 

3719 

3720 Use it like this:: 

3721 

3722 from pyrocko.trace import co_lfilter, co_list_append 

3723 

3724 filtered_traces = [] 

3725 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3726 for trace in traces: 

3727 pipe.send(trace) 

3728 

3729 pipe.close() 

3730 

3731 ''' 

3732 

3733 try: 

3734 states = States() 

3735 output = None 

3736 while True: 

3737 input = (yield) 

3738 

3739 zi = states.get(input) 

3740 if zi is None: 

3741 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3742 

3743 output = input.copy(data=False) 

3744 try: 

3745 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3746 except ValueError: 

3747 raise ScipyBug( 

3748 'signal.lfilter failed: could be related to a bug ' 

3749 'in some older scipy versions, e.g. on opensuse42.1') 

3750 

3751 output.set_ydata(ydata) 

3752 states.set(input, zf) 

3753 target.send(output) 

3754 

3755 except GeneratorExit: 

3756 target.close() 

3757 

3758 

3759def co_antialias(target, q, n=None, ftype='fir'): 

3760 b, a, n = util.decimate_coeffs(q, n, ftype) 

3761 anti = co_lfilter(target, b, a) 

3762 return anti 

3763 

3764 

3765@coroutine 

3766def co_dropsamples(target, q, nfir): 

3767 try: 

3768 states = States() 

3769 while True: 

3770 tr = (yield) 

3771 newdeltat = q * tr.deltat 

3772 ioffset = states.get(tr) 

3773 if ioffset is None: 

3774 # for fir filter, the first nfir samples are pulluted by 

3775 # boundary effects; cut it off. 

3776 # for iir this may be (much) more, we do not correct for that. 

3777 # put sample instances to a time which is a multiple of the 

3778 # new sampling interval. 

3779 newtmin_want = math.ceil( 

3780 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3781 - (nfir/2*tr.deltat) 

3782 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3783 if ioffset < 0: 

3784 ioffset = ioffset % q 

3785 

3786 newtmin_have = tr.tmin + ioffset * tr.deltat 

3787 newtr = tr.copy(data=False) 

3788 newtr.deltat = newdeltat 

3789 # because the fir kernel shifts data by nfir/2 samples: 

3790 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3791 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3792 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3793 target.send(newtr) 

3794 

3795 except GeneratorExit: 

3796 target.close() 

3797 

3798 

3799def co_downsample(target, q, n=None, ftype='fir'): 

3800 ''' 

3801 Successively downsample broken continuous trace data (coroutine). 

3802 

3803 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3804 data and sends new :py:class:`Trace` objects containing the downsampled 

3805 data to target. This is useful, if one wants to downsample a long 

3806 continuous time series, which is split into many successive traces without 

3807 producing filter artifacts and gaps at trace boundaries. 

3808 

3809 Filter states are kept *per channel*, specifically, for each (network, 

3810 station, location, channel) combination occuring in the input traces, a 

3811 separate state is created and maintained. This makes it possible to filter 

3812 multichannel or multistation data with only one :py:func:`co_lfilter` 

3813 instance. 

3814 

3815 Filter state is reset, when gaps occur. The sampling instances are choosen 

3816 so that they occur at (or as close as possible) to even multiples of the 

3817 sampling interval of the downsampled trace (based on system time). 

3818 ''' 

3819 

3820 b, a, n = util.decimate_coeffs(q, n, ftype) 

3821 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3822 

3823 

3824@coroutine 

3825def co_downsample_to(target, deltat): 

3826 

3827 decimators = {} 

3828 try: 

3829 while True: 

3830 tr = (yield) 

3831 ratio = deltat / tr.deltat 

3832 rratio = round(ratio) 

3833 if abs(rratio - ratio)/ratio > 0.0001: 

3834 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3835 

3836 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3837 if deci_seq not in decimators: 

3838 pipe = target 

3839 for q in deci_seq[::-1]: 

3840 pipe = co_downsample(pipe, q) 

3841 

3842 decimators[deci_seq] = pipe 

3843 

3844 decimators[deci_seq].send(tr) 

3845 

3846 except GeneratorExit: 

3847 for g in decimators.values(): 

3848 g.close() 

3849 

3850 

3851class DomainChoice(StringChoice): 

3852 choices = [ 

3853 'time_domain', 

3854 'frequency_domain', 

3855 'envelope', 

3856 'absolute', 

3857 'cc_max_norm'] 

3858 

3859 

3860class MisfitSetup(Object): 

3861 ''' 

3862 Contains misfit setup to be used in :py:meth:`Trace.misfit` 

3863 

3864 :param description: Description of the setup 

3865 :param norm: L-norm classifier 

3866 :param taper: Object of :py:class:`Taper` 

3867 :param filter: Object of :py:class:`~pyrocko.response.FrequencyResponse` 

3868 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3869 'cc_max_norm'] 

3870 

3871 Can be dumped to a yaml file. 

3872 ''' 

3873 

3874 xmltagname = 'misfitsetup' 

3875 description = String.T(optional=True) 

3876 norm = Int.T(optional=False) 

3877 taper = Taper.T(optional=False) 

3878 filter = FrequencyResponse.T(optional=True) 

3879 domain = DomainChoice.T(default='time_domain') 

3880 

3881 

3882def equalize_sampling_rates(trace_1, trace_2): 

3883 ''' 

3884 Equalize sampling rates of two traces (reduce higher sampling rate to 

3885 lower). 

3886 

3887 :param trace_1: :py:class:`Trace` object 

3888 :param trace_2: :py:class:`Trace` object 

3889 

3890 Returns a copy of the resampled trace if resampling is needed. 

3891 ''' 

3892 

3893 if same_sampling_rate(trace_1, trace_2): 

3894 return trace_1, trace_2 

3895 

3896 if trace_1.deltat < trace_2.deltat: 

3897 t1_out = trace_1.copy() 

3898 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3899 logger.debug('Trace downsampled (return copy of trace): %s' 

3900 % '.'.join(t1_out.nslc_id)) 

3901 return t1_out, trace_2 

3902 

3903 elif trace_1.deltat > trace_2.deltat: 

3904 t2_out = trace_2.copy() 

3905 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3906 logger.debug('Trace downsampled (return copy of trace): %s' 

3907 % '.'.join(t2_out.nslc_id)) 

3908 return trace_1, t2_out 

3909 

3910 

3911def Lx_norm(u, v, norm=2): 

3912 ''' 

3913 Calculate the misfit denominator *m* and the normalization divisor *n* 

3914 according to norm. 

3915 

3916 The normalization divisor *n* is calculated from ``v``. 

3917 

3918 :param u: :py:class:`numpy.ndarray` 

3919 :param v: :py:class:`numpy.ndarray` 

3920 :param norm: (default = 2) 

3921 

3922 ``u`` and ``v`` must be of same size. 

3923 ''' 

3924 

3925 if norm == 1: 

3926 return ( 

3927 num.sum(num.abs(v-u)), 

3928 num.sum(num.abs(v))) 

3929 

3930 elif norm == 2: 

3931 return ( 

3932 num.sqrt(num.sum((v-u)**2)), 

3933 num.sqrt(num.sum(v**2))) 

3934 

3935 else: 

3936 return ( 

3937 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3938 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3939 

3940 

3941def do_downsample(tr, deltat): 

3942 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3943 tr = tr.copy() 

3944 tr.downsample_to(deltat, snap=True, demean=False) 

3945 else: 

3946 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3947 tr = tr.copy() 

3948 tr.snap() 

3949 return tr 

3950 

3951 

3952def do_extend(tr, tmin, tmax): 

3953 if tmin < tr.tmin or tmax > tr.tmax: 

3954 tr = tr.copy() 

3955 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3956 

3957 return tr 

3958 

3959 

3960def do_pre_taper(tr, taper): 

3961 return tr.taper(taper, inplace=False, chop=True) 

3962 

3963 

3964def do_fft(tr, filter): 

3965 if filter is None: 

3966 return tr 

3967 else: 

3968 ndata = tr.ydata.size 

3969 nfft = nextpow2(ndata) 

3970 padded = num.zeros(nfft, dtype=float) 

3971 padded[:ndata] = tr.ydata 

3972 spectrum = num.fft.rfft(padded) 

3973 df = 1.0 / (tr.deltat * nfft) 

3974 frequencies = num.arange(spectrum.size)*df 

3975 return [tr, frequencies, spectrum] 

3976 

3977 

3978def do_filter(inp, filter): 

3979 if filter is None: 

3980 return inp 

3981 else: 

3982 tr, frequencies, spectrum = inp 

3983 spectrum *= filter.evaluate(frequencies) 

3984 return [tr, frequencies, spectrum] 

3985 

3986 

3987def do_ifft(inp): 

3988 if isinstance(inp, Trace): 

3989 return inp 

3990 else: 

3991 tr, _, spectrum = inp 

3992 ndata = tr.ydata.size 

3993 tr = tr.copy(data=False) 

3994 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3995 return tr 

3996 

3997 

3998def check_alignment(t1, t2): 

3999 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

4000 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

4001 t1.ydata.shape != t2.ydata.shape: 

4002 raise MisalignedTraces( 

4003 'Cannot calculate misfit of %s and %s due to misaligned ' 

4004 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))