1# http://pyrocko.org - GPLv3 

2# 

3# The Pyrocko Developers, 21st Century 

4# ---|P------/S----------~Lg---------- 

5''' 

6This module provides basic signal processing for seismic traces. 

7''' 

8from __future__ import division, absolute_import 

9 

10import time 

11import math 

12import copy 

13import logging 

14 

15import numpy as num 

16from scipy import signal 

17 

18from . import util, evalresp, orthodrome, pchain, model 

19from .util import reuse, UnavailableDecimation 

20from .guts import Object, Float, Int, String, Complex, Tuple, List, \ 

21 StringChoice, Timestamp 

22from .guts_array import Array 

23 

24try: 

25 newstr = unicode 

26except NameError: 

27 newstr = str 

28 

29 

30UnavailableDecimation # noqa 

31 

32guts_prefix = 'pf' 

33 

34logger = logging.getLogger('pyrocko.trace') 

35 

36 

37class Trace(Object): 

38 

39 ''' 

40 Create new trace object. 

41 

42 A ``Trace`` object represents a single continuous strip of evenly sampled 

43 time series data. It is built from a 1D NumPy array containing the data 

44 samples and some attributes describing its beginning and ending time, its 

45 sampling rate and four string identifiers (its network, station, location 

46 and channel code). 

47 

48 :param network: network code 

49 :param station: station code 

50 :param location: location code 

51 :param channel: channel code 

52 :param tmin: system time of first sample in [s] 

53 :param tmax: system time of last sample in [s] (if set to ``None`` it is 

54 computed from length of ``ydata``) 

55 :param deltat: sampling interval in [s] 

56 :param ydata: 1D numpy array with data samples (can be ``None`` when 

57 ``tmax`` is not ``None``) 

58 :param mtime: optional modification time 

59 

60 :param meta: additional meta information (not used, but maintained by the 

61 library) 

62 

63 The length of the network, station, location and channel codes is not 

64 resricted by this software, but data formats like SAC, Mini-SEED or GSE 

65 have different limits on the lengths of these codes. The codes set here are 

66 silently truncated when the trace is stored 

67 ''' 

68 

69 network = String.T(default='') 

70 station = String.T(default='STA') 

71 location = String.T(default='') 

72 channel = String.T(default='') 

73 

74 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00')) 

75 tmax = Timestamp.T() 

76 

77 deltat = Float.T(default=1.0) 

78 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta') 

79 

80 mtime = Timestamp.T(optional=True) 

81 

82 cached_frequencies = {} 

83 

84 def __init__(self, network='', station='STA', location='', channel='', 

85 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None, 

86 meta=None): 

87 

88 Object.__init__(self, init_props=False) 

89 

90 time_float = util.get_time_float() 

91 

92 if not isinstance(tmin, time_float): 

93 tmin = Trace.tmin.regularize_extra(tmin) 

94 

95 if tmax is not None and not isinstance(tmax, time_float): 

96 tmax = Trace.tmax.regularize_extra(tmax) 

97 

98 if mtime is not None and not isinstance(mtime, time_float): 

99 mtime = Trace.mtime.regularize_extra(mtime) 

100 

101 self._growbuffer = None 

102 

103 tmin = time_float(tmin) 

104 if tmax is not None: 

105 tmax = time_float(tmax) 

106 

107 if mtime is None: 

108 mtime = time_float(time.time()) 

109 

110 self.network, self.station, self.location, self.channel = [ 

111 reuse(x) for x in (network, station, location, channel)] 

112 

113 self.tmin = tmin 

114 self.deltat = deltat 

115 

116 if tmax is None: 

117 if ydata is not None: 

118 self.tmax = self.tmin + (ydata.size-1)*self.deltat 

119 else: 

120 raise Exception( 

121 'fixme: trace must be created with tmax or ydata') 

122 else: 

123 n = int(round((tmax - self.tmin) / self.deltat)) + 1 

124 self.tmax = self.tmin + (n - 1) * self.deltat 

125 

126 self.meta = meta 

127 self.ydata = ydata 

128 self.mtime = mtime 

129 self._update_ids() 

130 self.file = None 

131 self._pchain = None 

132 

133 def __str__(self): 

134 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat))))) 

135 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id 

136 s += ' timerange: %s - %s\n' % ( 

137 util.time_to_str(self.tmin, format=fmt), 

138 util.time_to_str(self.tmax, format=fmt)) 

139 

140 s += ' delta t: %g\n' % self.deltat 

141 if self.meta: 

142 for k in sorted(self.meta.keys()): 

143 s += ' %s: %s\n' % (k, self.meta[k]) 

144 return s 

145 

146 def __getstate__(self): 

147 return (self.network, self.station, self.location, self.channel, 

148 self.tmin, self.tmax, self.deltat, self.mtime, 

149 self.ydata, self.meta) 

150 

151 def __setstate__(self, state): 

152 if len(state) == 10: 

153 self.network, self.station, self.location, self.channel, \ 

154 self.tmin, self.tmax, self.deltat, self.mtime, \ 

155 self.ydata, self.meta = state 

156 

157 else: 

158 # backward compatibility with old behaviour 

159 self.network, self.station, self.location, self.channel, \ 

160 self.tmin, self.tmax, self.deltat, self.mtime = state 

161 self.ydata = None 

162 self.meta = None 

163 

164 self._growbuffer = None 

165 self._update_ids() 

166 

167 def name(self): 

168 ''' 

169 Get a short string description. 

170 ''' 

171 

172 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + ( 

173 util.time_to_str(self.tmin), 

174 util.time_to_str(self.tmax))) 

175 

176 return s 

177 

178 def __eq__(self, other): 

179 return ( 

180 isinstance(other, Trace) 

181 and self.network == other.network 

182 and self.station == other.station 

183 and self.location == other.location 

184 and self.channel == other.channel 

185 and (abs(self.deltat - other.deltat) 

186 < (self.deltat + other.deltat)*1e-6) 

187 and abs(self.tmin-other.tmin) < self.deltat*0.01 

188 and abs(self.tmax-other.tmax) < self.deltat*0.01 

189 and num.all(self.ydata == other.ydata)) 

190 

191 def almost_equal(self, other, rtol=1e-5, atol=0.0): 

192 return ( 

193 self.network == other.network 

194 and self.station == other.station 

195 and self.location == other.location 

196 and self.channel == other.channel 

197 and (abs(self.deltat - other.deltat) 

198 < (self.deltat + other.deltat)*1e-6) 

199 and abs(self.tmin-other.tmin) < self.deltat*0.01 

200 and abs(self.tmax-other.tmax) < self.deltat*0.01 

201 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol)) 

202 

203 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0): 

204 

205 assert self.network == other.network, \ 

206 'network codes differ: %s, %s' % (self.network, other.network) 

207 assert self.station == other.station, \ 

208 'station codes differ: %s, %s' % (self.station, other.station) 

209 assert self.location == other.location, \ 

210 'location codes differ: %s, %s' % (self.location, other.location) 

211 assert self.channel == other.channel, 'channel codes differ' 

212 assert (abs(self.deltat - other.deltat) 

213 < (self.deltat + other.deltat)*1e-6), \ 

214 'sampling intervals differ %g, %g' % (self.deltat, other.delta) 

215 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \ 

216 'start times differ: %s, %s' % ( 

217 util.time_to_str(self.tmin), util.time_to_str(other.tmin)) 

218 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \ 

219 'end times differ: %s, %s' % ( 

220 util.time_to_str(self.tmax), util.time_to_str(other.tmax)) 

221 

222 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \ 

223 'trace values differ' 

224 

225 def __hash__(self): 

226 return id(self) 

227 

228 def __call__(self, t, clip=False, snap=round): 

229 it = int(snap((t-self.tmin)/self.deltat)) 

230 if clip: 

231 it = max(0, min(it, self.ydata.size-1)) 

232 else: 

233 if it < 0 or self.ydata.size <= it: 

234 raise IndexError() 

235 

236 return self.tmin+it*self.deltat, self.ydata[it] 

237 

238 def interpolate(self, t, clip=False): 

239 ''' 

240 Value of trace between supporting points through linear interpolation. 

241 

242 :param t: time instant 

243 :param clip: whether to clip indices to trace ends 

244 ''' 

245 

246 t0, y0 = self(t, clip=clip, snap=math.floor) 

247 t1, y1 = self(t, clip=clip, snap=math.ceil) 

248 if t0 == t1: 

249 return y0 

250 else: 

251 return y0+(t-t0)/(t1-t0)*(y1-y0) 

252 

253 def index_clip(self, i): 

254 ''' 

255 Clip index to valid range. 

256 ''' 

257 

258 return min(max(0, i), self.ydata.size) 

259 

260 def add(self, other, interpolate=True, left=0., right=0.): 

261 ''' 

262 Add values of other trace (self += other). 

263 

264 Add values of ``other`` trace to the values of ``self``, where it 

265 intersects with ``other``. This method does not change the extent of 

266 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

267 ``other`` to be added are interpolated at sampling instants of 

268 ``self``. Linear interpolation is performed. In this case the sampling 

269 rate of ``other`` must be equal to or lower than that of ``self``. If 

270 ``interpolate`` is ``False``, the sampling rates of the two traces must 

271 match. 

272 ''' 

273 

274 if interpolate: 

275 assert self.deltat <= other.deltat \ 

276 or same_sampling_rate(self, other) 

277 

278 other_xdata = other.get_xdata() 

279 xdata = self.get_xdata() 

280 self.ydata += num.interp( 

281 xdata, other_xdata, other.ydata, left=left, right=left) 

282 else: 

283 assert self.deltat == other.deltat 

284 ioff = int(round((other.tmin-self.tmin)/self.deltat)) 

285 ibeg = max(0, ioff) 

286 iend = min(self.data_len(), ioff+other.data_len()) 

287 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff] 

288 

289 def mult(self, other, interpolate=True): 

290 ''' 

291 Muliply with values of other trace ``(self *= other)``. 

292 

293 Multiply values of ``other`` trace to the values of ``self``, where it 

294 intersects with ``other``. This method does not change the extent of 

295 ``self``. If ``interpolate`` is ``True`` (the default), the values of 

296 ``other`` to be multiplied are interpolated at sampling instants of 

297 ``self``. Linear interpolation is performed. In this case the sampling 

298 rate of ``other`` must be equal to or lower than that of ``self``. If 

299 ``interpolate`` is ``False``, the sampling rates of the two traces must 

300 match. 

301 ''' 

302 

303 if interpolate: 

304 assert self.deltat <= other.deltat or \ 

305 same_sampling_rate(self, other) 

306 

307 other_xdata = other.get_xdata() 

308 xdata = self.get_xdata() 

309 self.ydata *= num.interp( 

310 xdata, other_xdata, other.ydata, left=0., right=0.) 

311 else: 

312 assert self.deltat == other.deltat 

313 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat)) 

314 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat)) 

315 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1 

316 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1 

317 

318 ibeg1 = self.index_clip(ibeg1) 

319 iend1 = self.index_clip(iend1) 

320 ibeg2 = self.index_clip(ibeg2) 

321 iend2 = self.index_clip(iend2) 

322 

323 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2] 

324 

325 def max(self): 

326 ''' 

327 Get time and value of data maximum. 

328 ''' 

329 

330 i = num.argmax(self.ydata) 

331 return self.tmin + i*self.deltat, self.ydata[i] 

332 

333 def min(self): 

334 ''' 

335 Get time and value of data minimum. 

336 ''' 

337 

338 i = num.argmin(self.ydata) 

339 return self.tmin + i*self.deltat, self.ydata[i] 

340 

341 def absmax(self): 

342 ''' 

343 Get time and value of maximum of the absolute of data. 

344 ''' 

345 

346 tmi, mi = self.min() 

347 tma, ma = self.max() 

348 if abs(mi) > abs(ma): 

349 return tmi, abs(mi) 

350 else: 

351 return tma, abs(ma) 

352 

353 def set_codes( 

354 self, network=None, station=None, location=None, channel=None): 

355 

356 ''' 

357 Set network, station, location, and channel codes. 

358 ''' 

359 

360 if network is not None: 

361 self.network = network 

362 if station is not None: 

363 self.station = station 

364 if location is not None: 

365 self.location = location 

366 if channel is not None: 

367 self.channel = channel 

368 

369 self._update_ids() 

370 

371 def set_network(self, network): 

372 self.network = network 

373 self._update_ids() 

374 

375 def set_station(self, station): 

376 self.station = station 

377 self._update_ids() 

378 

379 def set_location(self, location): 

380 self.location = location 

381 self._update_ids() 

382 

383 def set_channel(self, channel): 

384 self.channel = channel 

385 self._update_ids() 

386 

387 def overlaps(self, tmin, tmax): 

388 ''' 

389 Check if trace has overlap with a given time span. 

390 ''' 

391 return not (tmax < self.tmin or self.tmax < tmin) 

392 

393 def is_relevant(self, tmin, tmax, selector=None): 

394 ''' 

395 Check if trace has overlap with a given time span and matches a 

396 condition callback. (internal use) 

397 ''' 

398 

399 return not (tmax <= self.tmin or self.tmax < tmin) \ 

400 and (selector is None or selector(self)) 

401 

402 def _update_ids(self): 

403 ''' 

404 Update dependent ids. 

405 ''' 

406 

407 self.full_id = ( 

408 self.network, self.station, self.location, self.channel, self.tmin) 

409 self.nslc_id = reuse( 

410 (self.network, self.station, self.location, self.channel)) 

411 

412 def prune_from_reuse_cache(self): 

413 util.deuse(self.nslc_id) 

414 util.deuse(self.network) 

415 util.deuse(self.station) 

416 util.deuse(self.location) 

417 util.deuse(self.channel) 

418 

419 def set_mtime(self, mtime): 

420 ''' 

421 Set modification time of the trace. 

422 ''' 

423 

424 self.mtime = mtime 

425 

426 def get_xdata(self): 

427 ''' 

428 Create array for time axis. 

429 ''' 

430 

431 if self.ydata is None: 

432 raise NoData() 

433 

434 return self.tmin \ 

435 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat 

436 

437 def get_ydata(self): 

438 ''' 

439 Get data array. 

440 ''' 

441 

442 if self.ydata is None: 

443 raise NoData() 

444 

445 return self.ydata 

446 

447 def set_ydata(self, new_ydata): 

448 ''' 

449 Replace data array. 

450 ''' 

451 

452 self.drop_growbuffer() 

453 self.ydata = new_ydata 

454 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

455 

456 def data_len(self): 

457 if self.ydata is not None: 

458 return self.ydata.size 

459 else: 

460 return int(round((self.tmax-self.tmin)/self.deltat)) + 1 

461 

462 def drop_data(self): 

463 ''' 

464 Forget data, make dataless trace. 

465 ''' 

466 

467 self.drop_growbuffer() 

468 self.ydata = None 

469 

470 def drop_growbuffer(self): 

471 ''' 

472 Detach the traces grow buffer. 

473 ''' 

474 

475 self._growbuffer = None 

476 self._pchain = None 

477 

478 def copy(self, data=True): 

479 ''' 

480 Make a deep copy of the trace. 

481 ''' 

482 

483 tracecopy = copy.copy(self) 

484 tracecopy.drop_growbuffer() 

485 if data: 

486 tracecopy.ydata = self.ydata.copy() 

487 tracecopy.meta = copy.deepcopy(self.meta) 

488 return tracecopy 

489 

490 def crop_zeros(self): 

491 ''' 

492 Remove any zeros at beginning and end. 

493 ''' 

494 

495 indices = num.where(self.ydata != 0.0)[0] 

496 if indices.size == 0: 

497 raise NoData() 

498 

499 ibeg = indices[0] 

500 iend = indices[-1]+1 

501 if ibeg == 0 and iend == self.ydata.size-1: 

502 return 

503 

504 self.drop_growbuffer() 

505 self.ydata = self.ydata[ibeg:iend].copy() 

506 self.tmin = self.tmin+ibeg*self.deltat 

507 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

508 self._update_ids() 

509 

510 def append(self, data): 

511 ''' 

512 Append data to the end of the trace. 

513 

514 To make this method efficient when successively very few or even single 

515 samples are appended, a larger grow buffer is allocated upon first 

516 invocation. The traces data is then changed to be a view into the 

517 currently filled portion of the grow buffer array. 

518 ''' 

519 

520 assert self.ydata.dtype == data.dtype 

521 newlen = data.size + self.ydata.size 

522 if self._growbuffer is None or self._growbuffer.size < newlen: 

523 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype) 

524 self._growbuffer[:self.ydata.size] = self.ydata 

525 self._growbuffer[self.ydata.size:newlen] = data 

526 self.ydata = self._growbuffer[:newlen] 

527 self.tmax = self.tmin + (newlen-1)*self.deltat 

528 

529 def chop( 

530 self, tmin, tmax, inplace=True, include_last=False, 

531 snap=(round, round), want_incomplete=True): 

532 

533 ''' 

534 Cut the trace to given time span. 

535 

536 If the ``inplace`` argument is True (the default) the trace is cut in 

537 place, otherwise a new trace with the cut part is returned. By 

538 default, the indices where to start and end the trace data array are 

539 determined by rounding of ``tmin`` and ``tmax`` to sampling instances 

540 using Python's :py:func:`round` function. This behaviour can be changed 

541 with the ``snap`` argument, which takes a tuple of two functions (one 

542 for the lower and one for the upper end) to be used instead of 

543 :py:func:`round`. The last sample is by default not included unless 

544 ``include_last`` is set to True. If the given time span exceeds the 

545 available time span of the trace, the available part is returned, 

546 unless ``want_incomplete`` is set to False - in that case, a 

547 :py:exc:`NoData` exception is raised. This exception is always raised, 

548 when the requested time span does dot overlap with the trace's time 

549 span. 

550 ''' 

551 

552 if want_incomplete: 

553 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin: 

554 raise NoData() 

555 else: 

556 if tmin < self.tmin or self.tmax < tmax: 

557 raise NoData() 

558 

559 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0])) 

560 iplus = 0 

561 if include_last: 

562 iplus = 1 

563 

564 iend = min( 

565 self.data_len(), 

566 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus) 

567 

568 if ibeg >= iend: 

569 raise NoData() 

570 

571 obj = self 

572 if not inplace: 

573 obj = self.copy(data=False) 

574 

575 self.drop_growbuffer() 

576 if self.ydata is not None: 

577 obj.ydata = self.ydata[ibeg:iend].copy() 

578 else: 

579 obj.ydata = None 

580 

581 obj.tmin = obj.tmin+ibeg*obj.deltat 

582 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat 

583 

584 obj._update_ids() 

585 

586 return obj 

587 

588 def downsample(self, ndecimate, snap=False, initials=None, demean=False): 

589 ''' 

590 Downsample trace by a given integer factor. 

591 

592 :param ndecimate: decimation factor, avoid values larger than 8 

593 :param snap: whether to put the new sampling instances closest to 

594 multiples of the sampling rate. 

595 :param initials: ``None``, ``True``, or initial conditions for the 

596 anti-aliasing filter, obtained from a previous run. In the latter 

597 two cases the final state of the filter is returned instead of 

598 ``None``. 

599 :param demean: whether to demean the signal before filtering. 

600 ''' 

601 

602 newdeltat = self.deltat*ndecimate 

603 if snap: 

604 ilag = int(round( 

605 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin) 

606 / self.deltat)) 

607 else: 

608 ilag = 0 

609 

610 if snap and ilag > 0 and ilag < self.ydata.size: 

611 data = self.ydata.astype(num.float64) 

612 self.tmin += ilag*self.deltat 

613 else: 

614 data = self.ydata.astype(num.float64) 

615 

616 if demean: 

617 data -= num.mean(data) 

618 

619 if data.size != 0: 

620 result = util.decimate( 

621 data, ndecimate, ftype='fir', zi=initials, ioff=ilag) 

622 else: 

623 result = data 

624 

625 if initials is None: 

626 self.ydata, finals = result, None 

627 else: 

628 self.ydata, finals = result 

629 

630 self.deltat = reuse(self.deltat*ndecimate) 

631 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat 

632 self._update_ids() 

633 

634 return finals 

635 

636 def downsample_to(self, deltat, snap=False, allow_upsample_max=1, 

637 initials=None, demean=False): 

638 

639 ''' 

640 Downsample to given sampling rate. 

641 

642 Tries to downsample the trace to a target sampling interval of 

643 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several 

644 times. If allow_upsample_max is set to a value larger than 1, 

645 intermediate upsampling steps are allowed, in order to increase the 

646 number of possible downsampling ratios. 

647 

648 If the requested ratio is not supported, an exception of type 

649 :py:exc:`pyrocko.util.UnavailableDecimation` is raised. 

650 ''' 

651 

652 ratio = deltat/self.deltat 

653 rratio = round(ratio) 

654 

655 ok = False 

656 for upsratio in range(1, allow_upsample_max+1): 

657 dratio = (upsratio/self.deltat) / (1./deltat) 

658 if abs(dratio - round(dratio)) / dratio < 0.0001 and \ 

659 util.decitab(int(round(dratio))): 

660 

661 ok = True 

662 break 

663 

664 if not ok: 

665 raise util.UnavailableDecimation('ratio = %g' % ratio) 

666 

667 if upsratio > 1: 

668 self.drop_growbuffer() 

669 ydata = self.ydata 

670 self.ydata = num.zeros( 

671 ydata.size*upsratio-(upsratio-1), ydata.dtype) 

672 self.ydata[::upsratio] = ydata 

673 for i in range(1, upsratio): 

674 self.ydata[i::upsratio] = \ 

675 float(i)/upsratio * ydata[:-1] \ 

676 + float(upsratio-i)/upsratio * ydata[1:] 

677 self.deltat = self.deltat/upsratio 

678 

679 ratio = deltat/self.deltat 

680 rratio = round(ratio) 

681 

682 deci_seq = util.decitab(int(rratio)) 

683 finals = [] 

684 for i, ndecimate in enumerate(deci_seq): 

685 if ndecimate != 1: 

686 xinitials = None 

687 if initials is not None: 

688 xinitials = initials[i] 

689 finals.append(self.downsample( 

690 ndecimate, snap=snap, initials=xinitials, demean=demean)) 

691 

692 if initials is not None: 

693 return finals 

694 

695 def resample(self, deltat): 

696 ''' 

697 Resample to given sampling rate ``deltat``. 

698 

699 Resampling is performed in the frequency domain. 

700 ''' 

701 

702 ndata = self.ydata.size 

703 ntrans = nextpow2(ndata) 

704 fntrans2 = ntrans * self.deltat/deltat 

705 ntrans2 = int(round(fntrans2)) 

706 deltat2 = self.deltat * float(ntrans)/float(ntrans2) 

707 ndata2 = int(round(ndata*self.deltat/deltat2)) 

708 if abs(fntrans2 - ntrans2) > 1e-7: 

709 logger.warning( 

710 'resample: requested deltat %g could not be matched exactly: ' 

711 '%g' % (deltat, deltat2)) 

712 

713 data = self.ydata 

714 data_pad = num.zeros(ntrans, dtype=float) 

715 data_pad[:ndata] = data 

716 fdata = num.fft.rfft(data_pad) 

717 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype) 

718 n = min(fdata.size, fdata2.size) 

719 fdata2[:n] = fdata[:n] 

720 data2 = num.fft.irfft(fdata2) 

721 data2 = data2[:ndata2] 

722 data2 *= float(ntrans2) / float(ntrans) 

723 self.deltat = deltat2 

724 self.set_ydata(data2) 

725 

726 def resample_simple(self, deltat): 

727 tyear = 3600*24*365. 

728 

729 if deltat == self.deltat: 

730 return 

731 

732 if abs(self.deltat - deltat) * tyear/deltat < deltat: 

733 logger.warning( 

734 'resample_simple: less than one sample would have to be ' 

735 'inserted/deleted per year. Doing nothing.') 

736 return 

737 

738 ninterval = int(round(deltat / (self.deltat - deltat))) 

739 if abs(ninterval) < 20: 

740 logger.error( 

741 'resample_simple: sample insertion/deletion interval less ' 

742 'than 20. results would be erroneous.') 

743 raise ResamplingFailed() 

744 

745 delete = False 

746 if ninterval < 0: 

747 ninterval = - ninterval 

748 delete = True 

749 

750 tyearbegin = util.year_start(self.tmin) 

751 

752 nmin = int(round((self.tmin - tyearbegin)/deltat)) 

753 

754 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin 

755 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1 

756 if nindices > 0: 

757 indices = ibegin + num.arange(nindices) * ninterval 

758 data_split = num.split(self.ydata, indices) 

759 data = [] 

760 for ln, h in zip(data_split[:-1], data_split[1:]): 

761 if delete: 

762 ln = ln[:-1] 

763 

764 data.append(ln) 

765 if not delete: 

766 if ln.size == 0: 

767 v = h[0] 

768 else: 

769 v = 0.5*(ln[-1] + h[0]) 

770 data.append(num.array([v], dtype=ln.dtype)) 

771 

772 data.append(data_split[-1]) 

773 

774 ydata_new = num.concatenate(data) 

775 

776 self.tmin = tyearbegin + nmin * deltat 

777 self.deltat = deltat 

778 self.set_ydata(ydata_new) 

779 

780 def stretch(self, tmin_new, tmax_new): 

781 ''' 

782 Stretch signal while preserving sample rate using sinc interpolation. 

783 

784 :param tmin_new: new time of first sample 

785 :param tmax_new: new time of last sample 

786 

787 This method can be used to correct for a small linear time drift or to 

788 introduce sub-sample time shifts. The amount of stretching is limited 

789 to 10% by the implementation and is expected to be much smaller than 

790 that by the approximations used. 

791 ''' 

792 

793 from pyrocko import signal_ext 

794 

795 i_control = num.array([0, self.ydata.size-1], dtype=num.int64) 

796 t_control = num.array([tmin_new, tmax_new], dtype=float) 

797 

798 r = (tmax_new - tmin_new) / self.deltat + 1.0 

799 n_new = int(round(r)) 

800 if abs(n_new - r) > 0.001: 

801 n_new = int(math.floor(r)) 

802 

803 assert n_new >= 2 

804 

805 tmax_new = tmin_new + (n_new-1) * self.deltat 

806 

807 ydata_new = num.empty(n_new, dtype=float) 

808 signal_ext.antidrift(i_control, t_control, 

809 self.ydata.astype(float), 

810 tmin_new, self.deltat, ydata_new) 

811 

812 self.tmin = tmin_new 

813 self.set_ydata(ydata_new) 

814 self._update_ids() 

815 

816 def nyquist_check(self, frequency, intro='Corner frequency', warn=True, 

817 raise_exception=False): 

818 

819 ''' 

820 Check if a given frequency is above the Nyquist frequency of the trace. 

821 

822 :param intro: string used to introduce the warning/error message 

823 :param warn: whether to emit a warning 

824 :param raise_exception: whether to raise an :py:exc:`AboveNyquist` 

825 exception. 

826 ''' 

827 

828 if frequency >= 0.5/self.deltat: 

829 message = '%s (%g Hz) is equal to or higher than nyquist ' \ 

830 'frequency (%g Hz). (Trace %s)' \ 

831 % (intro, frequency, 0.5/self.deltat, self.name()) 

832 if warn: 

833 logger.warning(message) 

834 if raise_exception: 

835 raise AboveNyquist(message) 

836 

837 def lowpass(self, order, corner, nyquist_warn=True, 

838 nyquist_exception=False, demean=True): 

839 

840 ''' 

841 Apply Butterworth lowpass to the trace. 

842 

843 :param order: order of the filter 

844 :param corner: corner frequency of the filter 

845 

846 Mean is removed before filtering. 

847 ''' 

848 

849 self.nyquist_check( 

850 corner, 'Corner frequency of lowpass', nyquist_warn, 

851 nyquist_exception) 

852 

853 (b, a) = _get_cached_filter_coefs( 

854 order, [corner*2.0*self.deltat], btype='low') 

855 

856 if len(a) != order+1 or len(b) != order+1: 

857 logger.warning( 

858 'Erroneous filter coefficients returned by ' 

859 'scipy.signal.butter(). You may need to downsample the ' 

860 'signal before filtering.') 

861 

862 data = self.ydata.astype(num.float64) 

863 if demean: 

864 data -= num.mean(data) 

865 self.drop_growbuffer() 

866 self.ydata = signal.lfilter(b, a, data) 

867 

868 def highpass(self, order, corner, nyquist_warn=True, 

869 nyquist_exception=False, demean=True): 

870 

871 ''' 

872 Apply butterworth highpass to the trace. 

873 

874 :param order: order of the filter 

875 :param corner: corner frequency of the filter 

876 

877 Mean is removed before filtering. 

878 ''' 

879 

880 self.nyquist_check( 

881 corner, 'Corner frequency of highpass', nyquist_warn, 

882 nyquist_exception) 

883 

884 (b, a) = _get_cached_filter_coefs( 

885 order, [corner*2.0*self.deltat], btype='high') 

886 

887 data = self.ydata.astype(num.float64) 

888 if len(a) != order+1 or len(b) != order+1: 

889 logger.warning( 

890 'Erroneous filter coefficients returned by ' 

891 'scipy.signal.butter(). You may need to downsample the ' 

892 'signal before filtering.') 

893 if demean: 

894 data -= num.mean(data) 

895 self.drop_growbuffer() 

896 self.ydata = signal.lfilter(b, a, data) 

897 

898 def bandpass(self, order, corner_hp, corner_lp, demean=True): 

899 ''' 

900 Apply butterworth bandpass to the trace. 

901 

902 :param order: order of the filter 

903 :param corner_hp: lower corner frequency of the filter 

904 :param corner_lp: upper corner frequency of the filter 

905 

906 Mean is removed before filtering. 

907 ''' 

908 

909 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass') 

910 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass') 

911 (b, a) = _get_cached_filter_coefs( 

912 order, 

913 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

914 btype='band') 

915 data = self.ydata.astype(num.float64) 

916 if demean: 

917 data -= num.mean(data) 

918 self.drop_growbuffer() 

919 self.ydata = signal.lfilter(b, a, data) 

920 

921 def bandstop(self, order, corner_hp, corner_lp, demean=True): 

922 ''' 

923 Apply bandstop (attenuates frequencies in band) to the trace. 

924 

925 :param order: order of the filter 

926 :param corner_hp: lower corner frequency of the filter 

927 :param corner_lp: upper corner frequency of the filter 

928 

929 Mean is removed before filtering. 

930 ''' 

931 

932 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop') 

933 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop') 

934 (b, a) = _get_cached_filter_coefs( 

935 order, 

936 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)], 

937 btype='bandstop') 

938 data = self.ydata.astype(num.float64) 

939 if demean: 

940 data -= num.mean(data) 

941 self.drop_growbuffer() 

942 self.ydata = signal.lfilter(b, a, data) 

943 

944 def envelope(self, inplace=True): 

945 ''' 

946 Calculate the envelope of the trace. 

947 

948 :param inplace: calculate envelope in place 

949 

950 The calculation follows: 

951 

952 .. math:: 

953 

954 Y' = \\sqrt{Y^2+H(Y)^2} 

955 

956 where H is the Hilbert-Transform of the signal Y. 

957 ''' 

958 

959 y = self.ydata.astype(float) 

960 env = num.abs(hilbert(y)) 

961 if inplace: 

962 self.drop_growbuffer() 

963 self.ydata = env 

964 else: 

965 tr = self.copy(data=False) 

966 tr.ydata = env 

967 return tr 

968 

969 def taper(self, taperer, inplace=True, chop=False): 

970 ''' 

971 Apply a :py:class:`Taper` to the trace. 

972 

973 :param taperer: instance of :py:class:`Taper` subclass 

974 :param inplace: apply taper inplace 

975 :param chop: if ``True``: exclude tapered parts from the resulting 

976 trace 

977 ''' 

978 

979 if not inplace: 

980 tr = self.copy() 

981 else: 

982 tr = self 

983 

984 if chop: 

985 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat) 

986 tr.shift(i*tr.deltat) 

987 tr.set_ydata(tr.ydata[i:i+n]) 

988 

989 taperer(tr.ydata, tr.tmin, tr.deltat) 

990 

991 if not inplace: 

992 return tr 

993 

994 def whiten(self, order=6): 

995 ''' 

996 Whiten signal in time domain using autoregression and recursive filter. 

997 

998 :param order: order of the autoregression process 

999 ''' 

1000 

1001 b, a = self.whitening_coefficients(order) 

1002 self.drop_growbuffer() 

1003 self.ydata = signal.lfilter(b, a, self.ydata) 

1004 

1005 def whitening_coefficients(self, order=6): 

1006 ar = yulewalker(self.ydata, order) 

1007 b, a = [1.] + ar.tolist(), [1.] 

1008 return b, a 

1009 

1010 def ampspec_whiten( 

1011 self, 

1012 width, 

1013 td_taper='auto', 

1014 fd_taper='auto', 

1015 pad_to_pow2=True, 

1016 demean=True): 

1017 

1018 ''' 

1019 Whiten signal via frequency domain using moving average on amplitude 

1020 spectra. 

1021 

1022 :param width: width of smoothing kernel [Hz] 

1023 :param td_taper: time domain taper, object of type :py:class:`Taper` or 

1024 ``None`` or ``'auto'``. 

1025 :param fd_taper: frequency domain taper, object of type 

1026 :py:class:`Taper` or ``None`` or ``'auto'``. 

1027 :param pad_to_pow2: whether to pad the signal with zeros up to a length 

1028 of 2^n 

1029 :param demean: whether to demean the signal before tapering 

1030 

1031 The signal is first demeaned and then tapered using ``td_taper``. Then, 

1032 the spectrum is calculated and inversely weighted with a smoothed 

1033 version of its amplitude spectrum. A moving average is used for the 

1034 smoothing. The smoothed spectrum is then tapered using ``fd_taper``. 

1035 Finally, the smoothed and tapered spectrum is back-transformed into the 

1036 time domain. 

1037 

1038 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used. 

1039 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used. 

1040 ''' 

1041 

1042 ndata = self.data_len() 

1043 

1044 if pad_to_pow2: 

1045 ntrans = nextpow2(ndata) 

1046 else: 

1047 ntrans = ndata 

1048 

1049 df = 1./(ntrans*self.deltat) 

1050 nw = int(round(width/df)) 

1051 if ndata//2+1 <= nw: 

1052 raise TraceTooShort( 

1053 'Samples in trace: %s, samples needed: %s' % (ndata, nw)) 

1054 

1055 if td_taper == 'auto': 

1056 td_taper = CosFader(1./width) 

1057 

1058 if fd_taper == 'auto': 

1059 fd_taper = CosFader(width) 

1060 

1061 if td_taper: 

1062 self.taper(td_taper) 

1063 

1064 ydata = self.get_ydata().astype(float) 

1065 if demean: 

1066 ydata -= ydata.mean() 

1067 

1068 spec = num.fft.rfft(ydata, ntrans) 

1069 

1070 amp = num.abs(spec) 

1071 nspec = amp.size 

1072 csamp = num.cumsum(amp) 

1073 amp_smoothed = num.empty(nspec, dtype=csamp.dtype) 

1074 n1, n2 = nw//2, nw//2 + nspec - nw 

1075 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw 

1076 amp_smoothed[:n1] = amp_smoothed[n1] 

1077 amp_smoothed[n2:] = amp_smoothed[n2-1] 

1078 

1079 denom = amp_smoothed * amp 

1080 numer = amp 

1081 eps = num.mean(denom) * 1e-9 

1082 if eps == 0.0: 

1083 eps = 1e-9 

1084 

1085 numer += eps 

1086 denom += eps 

1087 spec *= numer/denom 

1088 

1089 if fd_taper: 

1090 fd_taper(spec, 0., df) 

1091 

1092 ydata = num.fft.irfft(spec) 

1093 self.set_ydata(ydata[:ndata]) 

1094 

1095 def _get_cached_freqs(self, nf, deltaf): 

1096 ck = (nf, deltaf) 

1097 if ck not in Trace.cached_frequencies: 

1098 Trace.cached_frequencies[ck] = deltaf * num.arange( 

1099 nf, dtype=float) 

1100 

1101 return Trace.cached_frequencies[ck] 

1102 

1103 def bandpass_fft(self, corner_hp, corner_lp): 

1104 ''' 

1105 Apply boxcar bandbpass to trace (in spectral domain). 

1106 ''' 

1107 

1108 n = len(self.ydata) 

1109 n2 = nextpow2(n) 

1110 data = num.zeros(n2, dtype=num.float64) 

1111 data[:n] = self.ydata 

1112 fdata = num.fft.rfft(data) 

1113 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2)) 

1114 fdata[0] = 0.0 

1115 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp) 

1116 data = num.fft.irfft(fdata) 

1117 self.drop_growbuffer() 

1118 self.ydata = data[:n] 

1119 

1120 def shift(self, tshift): 

1121 ''' 

1122 Time shift the trace. 

1123 ''' 

1124 

1125 self.tmin += tshift 

1126 self.tmax += tshift 

1127 self._update_ids() 

1128 

1129 def snap(self, inplace=True, interpolate=False): 

1130 ''' 

1131 Shift trace samples to nearest even multiples of the sampling rate. 

1132 

1133 :param inplace: (boolean) snap traces inplace 

1134 

1135 If ``inplace`` is ``False`` and the difference of tmin and tmax of 

1136 both, the snapped and the original trace is smaller than 0.01 x deltat, 

1137 :py:func:`snap` returns the unsnapped instance of the original trace. 

1138 ''' 

1139 

1140 tmin = round(self.tmin/self.deltat)*self.deltat 

1141 tmax = tmin + (self.ydata.size-1)*self.deltat 

1142 

1143 if inplace: 

1144 xself = self 

1145 else: 

1146 if abs(self.tmin - tmin) < 1e-2*self.deltat and \ 

1147 abs(self.tmax - tmax) < 1e-2*self.deltat: 

1148 return self 

1149 

1150 xself = self.copy() 

1151 

1152 if interpolate: 

1153 from pyrocko import signal_ext 

1154 n = xself.data_len() 

1155 ydata_new = num.empty(n, dtype=float) 

1156 i_control = num.array([0, n-1], dtype=num.int64) 

1157 tref = tmin 

1158 t_control = num.array( 

1159 [float(xself.tmin-tref), float(xself.tmax-tref)], 

1160 dtype=float) 

1161 

1162 signal_ext.antidrift(i_control, t_control, 

1163 xself.ydata.astype(float), 

1164 float(tmin-tref), xself.deltat, ydata_new) 

1165 

1166 xself.ydata = ydata_new 

1167 

1168 xself.tmin = tmin 

1169 xself.tmax = tmax 

1170 xself._update_ids() 

1171 

1172 return xself 

1173 

1174 def fix_deltat_rounding_errors(self): 

1175 ''' 

1176 Try to undo sampling rate rounding errors. 

1177 

1178 See :py:func:`fix_deltat_rounding_errors`. 

1179 ''' 

1180 

1181 self.deltat = fix_deltat_rounding_errors(self.deltat) 

1182 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat 

1183 

1184 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1): 

1185 ''' 

1186 Run special STA/LTA filter where the short time window is centered on 

1187 the long time window. 

1188 

1189 :param tshort: length of short time window in [s] 

1190 :param tlong: length of long time window in [s] 

1191 :param quad: whether to square the data prior to applying the STA/LTA 

1192 filter 

1193 :param scalingmethod: integer key to select how output values are 

1194 scaled / normalized (``1``, ``2``, or ``3``) 

1195 

1196 ============= ====================================== =========== 

1197 Scalingmethod Implementation Range 

1198 ============= ====================================== =========== 

1199 ``1`` As/Al* Ts/Tl [0,1] 

1200 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1201 ``3`` Like ``2`` but clipping range at zero [0,1] 

1202 ============= ====================================== =========== 

1203 

1204 ''' 

1205 

1206 nshort = int(round(tshort/self.deltat)) 

1207 nlong = int(round(tlong/self.deltat)) 

1208 

1209 assert nshort < nlong 

1210 if nlong > len(self.ydata): 

1211 raise TraceTooShort( 

1212 'Samples in trace: %s, samples needed: %s' 

1213 % (len(self.ydata), nlong)) 

1214 

1215 if quad: 

1216 sqrdata = self.ydata**2 

1217 else: 

1218 sqrdata = self.ydata 

1219 

1220 mavg_short = moving_avg(sqrdata, nshort) 

1221 mavg_long = moving_avg(sqrdata, nlong) 

1222 

1223 self.drop_growbuffer() 

1224 

1225 if scalingmethod not in (1, 2, 3): 

1226 raise Exception('Invalid argument to scalingrange argument.') 

1227 

1228 if scalingmethod == 1: 

1229 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong) 

1230 elif scalingmethod in (2, 3): 

1231 self.ydata = (mavg_short/mavg_long - 1.) \ 

1232 / ((float(nlong)/float(nshort)) - 1) 

1233 

1234 if scalingmethod == 3: 

1235 self.ydata = num.maximum(self.ydata, 0.) 

1236 

1237 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1): 

1238 ''' 

1239 Run special STA/LTA filter where the short time window is overlapping 

1240 with the last part of the long time window. 

1241 

1242 :param tshort: length of short time window in [s] 

1243 :param tlong: length of long time window in [s] 

1244 :param quad: whether to square the data prior to applying the STA/LTA 

1245 filter 

1246 :param scalingmethod: integer key to select how output values are 

1247 scaled / normalized (``1``, ``2``, or ``3``) 

1248 

1249 ============= ====================================== =========== 

1250 Scalingmethod Implementation Range 

1251 ============= ====================================== =========== 

1252 ``1`` As/Al* Ts/Tl [0,1] 

1253 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1] 

1254 ``3`` Like ``2`` but clipping range at zero [0,1] 

1255 ============= ====================================== =========== 

1256 

1257 With ``scalingmethod=1``, the values produced by this variant of the 

1258 STA/LTA are equivalent to 

1259 

1260 .. math:: 

1261 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j} 

1262 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j} 

1263 

1264 where :math:`f_j` are the input samples, :math:`s` are the number of 

1265 samples in the short time window and :math:`l` are the number of 

1266 samples in the long time window. 

1267 ''' 

1268 

1269 n = self.data_len() 

1270 tmin = self.tmin 

1271 

1272 nshort = max(1, int(round(tshort/self.deltat))) 

1273 nlong = max(1, int(round(tlong/self.deltat))) 

1274 

1275 assert nshort < nlong 

1276 

1277 if nlong > len(self.ydata): 

1278 raise TraceTooShort( 

1279 'Samples in trace: %s, samples needed: %s' 

1280 % (len(self.ydata), nlong)) 

1281 

1282 if quad: 

1283 sqrdata = self.ydata**2 

1284 else: 

1285 sqrdata = self.ydata 

1286 

1287 nshift = int(math.floor(0.5 * (nlong - nshort))) 

1288 if nlong % 2 != 0 and nshort % 2 == 0: 

1289 nshift += 1 

1290 

1291 mavg_short = moving_avg(sqrdata, nshort)[nshift:] 

1292 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift] 

1293 

1294 self.drop_growbuffer() 

1295 

1296 if scalingmethod not in (1, 2, 3): 

1297 raise Exception('Invalid argument to scalingrange argument.') 

1298 

1299 if scalingmethod == 1: 

1300 ydata = mavg_short/mavg_long * nshort/nlong 

1301 elif scalingmethod in (2, 3): 

1302 ydata = (mavg_short/mavg_long - 1.) \ 

1303 / ((float(nlong)/float(nshort)) - 1) 

1304 

1305 if scalingmethod == 3: 

1306 ydata = num.maximum(self.ydata, 0.) 

1307 

1308 self.set_ydata(ydata) 

1309 

1310 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat) 

1311 

1312 self.chop( 

1313 tmin + (nlong - nshort) * self.deltat, 

1314 tmin + (n - nshort) * self.deltat) 

1315 

1316 def peaks(self, threshold, tsearch, 

1317 deadtime=False, 

1318 nblock_duration_detection=100): 

1319 

1320 ''' 

1321 Detect peaks above a given threshold (method 1). 

1322 

1323 From every instant, where the signal rises above ``threshold``, a time 

1324 length of ``tsearch`` seconds is searched for a maximum. A list with 

1325 tuples (time, value) for each detected peak is returned. The 

1326 ``deadtime`` argument turns on a special deadtime duration detection 

1327 algorithm useful in combination with recursive STA/LTA filters. 

1328 ''' 

1329 

1330 y = self.ydata 

1331 above = num.where(y > threshold, 1, 0) 

1332 deriv = num.zeros(y.size, dtype=num.int8) 

1333 deriv[1:] = above[1:]-above[:-1] 

1334 itrig_positions = num.nonzero(deriv > 0)[0] 

1335 tpeaks = [] 

1336 apeaks = [] 

1337 tzeros = [] 

1338 tzero = self.tmin 

1339 

1340 for itrig_pos in itrig_positions: 

1341 ibeg = itrig_pos 

1342 iend = min( 

1343 len(self.ydata), 

1344 itrig_pos + int(math.ceil(tsearch/self.deltat))) 

1345 ipeak = num.argmax(y[ibeg:iend]) 

1346 tpeak = self.tmin + (ipeak+ibeg)*self.deltat 

1347 apeak = y[ibeg+ipeak] 

1348 

1349 if tpeak < tzero: 

1350 continue 

1351 

1352 if deadtime: 

1353 ibeg = itrig_pos 

1354 iblock = 0 

1355 nblock = nblock_duration_detection 

1356 totalsum = 0. 

1357 while True: 

1358 if ibeg+iblock*nblock >= len(y): 

1359 tzero = self.tmin + (len(y)-1) * self.deltat 

1360 break 

1361 

1362 logy = num.log( 

1363 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock]) 

1364 logy[0] += totalsum 

1365 ysum = num.cumsum(logy) 

1366 totalsum = ysum[-1] 

1367 below = num.where(ysum <= 0., 1, 0) 

1368 deriv = num.zeros(ysum.size, dtype=num.int8) 

1369 deriv[1:] = below[1:]-below[:-1] 

1370 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock 

1371 if len(izero_positions) > 0: 

1372 tzero = self.tmin + self.deltat * ( 

1373 ibeg + izero_positions[0]) 

1374 break 

1375 iblock += 1 

1376 else: 

1377 tzero = ibeg*self.deltat + self.tmin + tsearch 

1378 

1379 tpeaks.append(tpeak) 

1380 apeaks.append(apeak) 

1381 tzeros.append(tzero) 

1382 

1383 if deadtime: 

1384 return tpeaks, apeaks, tzeros 

1385 else: 

1386 return tpeaks, apeaks 

1387 

1388 def peaks2(self, threshold, tsearch): 

1389 

1390 ''' 

1391 Detect peaks above a given threshold (method 2). 

1392 

1393 This variant of peak detection is a bit more robust (and slower) than 

1394 the one implemented in :py:meth:`Trace.peaks`. First all samples with 

1395 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these, 

1396 iteratively the one with the maximum amplitude ``a[j]`` and time 

1397 ``t[j]`` is choosen and potential peaks within 

1398 ``t[j] - tsearch, t[j] + tsearch`` 

1399 are discarded. The algorithm stops, when ``a[j] < threshold`` or when 

1400 no more potential peaks are left. 

1401 ''' 

1402 

1403 a = num.copy(self.ydata) 

1404 

1405 amin = num.min(a) 

1406 

1407 a[0] = amin 

1408 a[1: -1][num.diff(a, 2) <= 0.] = amin 

1409 a[-1] = amin 

1410 

1411 data = [] 

1412 while True: 

1413 imax = num.argmax(a) 

1414 amax = a[imax] 

1415 

1416 if amax < threshold or amax == amin: 

1417 break 

1418 

1419 data.append((self.tmin + imax * self.deltat, amax)) 

1420 

1421 ntsearch = int(round(tsearch / self.deltat)) 

1422 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin 

1423 

1424 if data: 

1425 data.sort() 

1426 tpeaks, apeaks = list(zip(*data)) 

1427 else: 

1428 tpeaks, apeaks = [], [] 

1429 

1430 return tpeaks, apeaks 

1431 

1432 def extend(self, tmin=None, tmax=None, fillmethod='zeros'): 

1433 ''' 

1434 Extend trace to given span. 

1435 

1436 :param tmin: begin time of new span 

1437 :param tmax: end time of new span 

1438 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or 

1439 ``'median'`` 

1440 ''' 

1441 

1442 nold = self.ydata.size 

1443 

1444 if tmin is not None: 

1445 nl = min(0, int(round((tmin-self.tmin)/self.deltat))) 

1446 else: 

1447 nl = 0 

1448 

1449 if tmax is not None: 

1450 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat))) 

1451 else: 

1452 nh = nold - 1 

1453 

1454 n = nh - nl + 1 

1455 data = num.zeros(n, dtype=self.ydata.dtype) 

1456 data[-nl:-nl + nold] = self.ydata 

1457 if self.ydata.size >= 1: 

1458 if fillmethod == 'repeat': 

1459 data[:-nl] = self.ydata[0] 

1460 data[-nl + nold:] = self.ydata[-1] 

1461 elif fillmethod == 'median': 

1462 v = num.median(self.ydata) 

1463 data[:-nl] = v 

1464 data[-nl + nold:] = v 

1465 elif fillmethod == 'mean': 

1466 v = num.mean(self.ydata) 

1467 data[:-nl] = v 

1468 data[-nl + nold:] = v 

1469 

1470 self.drop_growbuffer() 

1471 self.ydata = data 

1472 

1473 self.tmin += nl * self.deltat 

1474 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat 

1475 

1476 self._update_ids() 

1477 

1478 def transfer(self, 

1479 tfade=0., 

1480 freqlimits=None, 

1481 transfer_function=None, 

1482 cut_off_fading=True, 

1483 demean=True, 

1484 invert=False): 

1485 

1486 ''' 

1487 Return new trace with transfer function applied (convolution). 

1488 

1489 :param tfade: rise/fall time in seconds of taper applied in timedomain 

1490 at both ends of trace. 

1491 :param freqlimits: 4-tuple with corner frequencies in Hz. 

1492 :param transfer_function: FrequencyResponse object; must provide a 

1493 method 'evaluate(freqs)', which returns the transfer function 

1494 coefficients at the frequencies 'freqs'. 

1495 :param cut_off_fading: whether to cut off rise/fall interval in output 

1496 trace. 

1497 :param demean: remove mean before applying transfer function 

1498 :param invert: set to True to do a deconvolution 

1499 ''' 

1500 

1501 if transfer_function is None: 

1502 transfer_function = FrequencyResponse() 

1503 

1504 if self.tmax - self.tmin <= tfade*2.: 

1505 raise TraceTooShort( 

1506 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1507 'trace length = %g, fading length = %g' 

1508 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1509 

1510 if freqlimits is None and ( 

1511 transfer_function is None or transfer_function.is_scalar()): 

1512 

1513 # special case for flat responses 

1514 

1515 output = self.copy() 

1516 data = self.ydata 

1517 ndata = data.size 

1518 

1519 if transfer_function is not None: 

1520 c = num.abs(transfer_function.evaluate(num.ones(1))[0]) 

1521 

1522 if invert: 

1523 c = 1.0/c 

1524 

1525 data *= c 

1526 

1527 if tfade != 0.0: 

1528 data *= costaper( 

1529 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1530 ndata, self.deltat) 

1531 

1532 output.ydata = data 

1533 

1534 else: 

1535 ndata = self.ydata.size 

1536 ntrans = nextpow2(ndata*1.2) 

1537 coefs = self._get_tapered_coefs( 

1538 ntrans, freqlimits, transfer_function, invert=invert) 

1539 

1540 data = self.ydata 

1541 

1542 data_pad = num.zeros(ntrans, dtype=float) 

1543 data_pad[:ndata] = data 

1544 if demean: 

1545 data_pad[:ndata] -= data.mean() 

1546 

1547 if tfade != 0.0: 

1548 data_pad[:ndata] *= costaper( 

1549 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1550 ndata, self.deltat) 

1551 

1552 fdata = num.fft.rfft(data_pad) 

1553 fdata *= coefs 

1554 ddata = num.fft.irfft(fdata) 

1555 output = self.copy() 

1556 output.ydata = ddata[:ndata] 

1557 

1558 if cut_off_fading and tfade != 0.0: 

1559 try: 

1560 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True) 

1561 except NoData: 

1562 raise TraceTooShort( 

1563 'Trace %s.%s.%s.%s too short for fading length setting. ' 

1564 'trace length = %g, fading length = %g' 

1565 % (self.nslc_id + (self.tmax-self.tmin, tfade))) 

1566 else: 

1567 output.ydata = output.ydata.copy() 

1568 

1569 return output 

1570 

1571 def differentiate(self, n=1, order=4, inplace=True): 

1572 ''' 

1573 Approximate first or second derivative of the trace. 

1574 

1575 :param n: 1 for first derivative, 2 for second 

1576 :param order: order of the approximation 2 and 4 are supported 

1577 :param inplace: if ``True`` the trace is differentiated in place, 

1578 otherwise a new trace object with the derivative is returned. 

1579 

1580 Raises :py:exc:`ValueError` for unsupported `n` or `order`. 

1581 

1582 See :py:func:`~pyrocko.util.diff_fd` for implementation details. 

1583 ''' 

1584 

1585 ddata = util.diff_fd(n, order, self.deltat, self.ydata) 

1586 

1587 if inplace: 

1588 self.ydata = ddata 

1589 else: 

1590 output = self.copy(data=False) 

1591 output.set_ydata(ddata) 

1592 return output 

1593 

1594 def drop_chain_cache(self): 

1595 if self._pchain: 

1596 self._pchain.clear() 

1597 

1598 def init_chain(self): 

1599 self._pchain = pchain.Chain( 

1600 do_downsample, 

1601 do_extend, 

1602 do_pre_taper, 

1603 do_fft, 

1604 do_filter, 

1605 do_ifft) 

1606 

1607 def run_chain(self, tmin, tmax, deltat, setup, nocache): 

1608 if setup.domain == 'frequency_domain': 

1609 _, _, data = self._pchain( 

1610 (self, deltat), 

1611 (tmin, tmax), 

1612 (setup.taper,), 

1613 (setup.filter,), 

1614 (setup.filter,), 

1615 nocache=nocache) 

1616 

1617 return num.abs(data), num.abs(data) 

1618 

1619 else: 

1620 processed = self._pchain( 

1621 (self, deltat), 

1622 (tmin, tmax), 

1623 (setup.taper,), 

1624 (setup.filter,), 

1625 (setup.filter,), 

1626 (), 

1627 nocache=nocache) 

1628 

1629 if setup.domain == 'time_domain': 

1630 data = processed.get_ydata() 

1631 

1632 elif setup.domain == 'envelope': 

1633 processed = processed.envelope(inplace=False) 

1634 

1635 elif setup.domain == 'absolute': 

1636 processed.set_ydata(num.abs(processed.get_ydata())) 

1637 

1638 return processed.get_ydata(), processed 

1639 

1640 def misfit(self, candidate, setup, nocache=False, debug=False): 

1641 ''' 

1642 Calculate misfit and normalization factor against candidate trace. 

1643 

1644 :param candidate: :py:class:`Trace` object 

1645 :param setup: :py:class:`MisfitSetup` object 

1646 :returns: tuple ``(m, n)``, where m is the misfit value and n is the 

1647 normalization divisor 

1648 

1649 If the sampling rates of ``self`` and ``candidate`` differ, the trace 

1650 with the higher sampling rate will be downsampled. 

1651 ''' 

1652 

1653 a = self 

1654 b = candidate 

1655 

1656 for tr in (a, b): 

1657 if not tr._pchain: 

1658 tr.init_chain() 

1659 

1660 deltat = max(a.deltat, b.deltat) 

1661 tmin = min(a.tmin, b.tmin) - deltat 

1662 tmax = max(a.tmax, b.tmax) + deltat 

1663 

1664 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache) 

1665 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache) 

1666 

1667 if setup.domain != 'cc_max_norm': 

1668 m, n = Lx_norm(bdata, adata, norm=setup.norm) 

1669 else: 

1670 ctr = correlate(aproc, bproc, mode='full', normalization='normal') 

1671 ccmax = ctr.max()[1] 

1672 m = 0.5 - 0.5 * ccmax 

1673 n = 0.5 

1674 

1675 if debug: 

1676 return m, n, aproc, bproc 

1677 else: 

1678 return m, n 

1679 

1680 def spectrum(self, pad_to_pow2=False, tfade=None): 

1681 ''' 

1682 Get FFT spectrum of trace. 

1683 

1684 :param pad_to_pow2: whether to zero-pad the data to next larger 

1685 power-of-two length 

1686 :param tfade: ``None`` or a time length in seconds, to apply cosine 

1687 shaped tapers to both 

1688 

1689 :returns: a tuple with (frequencies, values) 

1690 ''' 

1691 

1692 ndata = self.ydata.size 

1693 

1694 if pad_to_pow2: 

1695 ntrans = nextpow2(ndata) 

1696 else: 

1697 ntrans = ndata 

1698 

1699 if tfade is None: 

1700 ydata = self.ydata 

1701 else: 

1702 ydata = self.ydata * costaper( 

1703 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata, 

1704 ndata, self.deltat) 

1705 

1706 fydata = num.fft.rfft(ydata, ntrans) 

1707 df = 1./(ntrans*self.deltat) 

1708 fxdata = num.arange(len(fydata))*df 

1709 return fxdata, fydata 

1710 

1711 def multi_filter(self, filter_freqs, bandwidth): 

1712 

1713 class Gauss(FrequencyResponse): 

1714 def __init__(self, f0, a=1.0): 

1715 self._omega0 = 2.*math.pi*f0 

1716 self._a = a 

1717 

1718 def evaluate(self, freqs): 

1719 omega = 2.*math.pi*freqs 

1720 return num.exp(-((omega-self._omega0) 

1721 / (self._a*self._omega0))**2) 

1722 

1723 freqs, coefs = self.spectrum() 

1724 n = self.data_len() 

1725 nfilt = len(filter_freqs) 

1726 signal_tf = num.zeros((nfilt, n)) 

1727 centroid_freqs = num.zeros(nfilt) 

1728 for ifilt, f0 in enumerate(filter_freqs): 

1729 taper = Gauss(f0, a=bandwidth) 

1730 weights = taper.evaluate(freqs) 

1731 nhalf = freqs.size 

1732 analytic_spec = num.zeros(n, dtype=complex) 

1733 analytic_spec[:nhalf] = coefs*weights 

1734 

1735 enorm = num.abs(analytic_spec[:nhalf])**2 

1736 enorm /= num.sum(enorm) 

1737 

1738 if n % 2 == 0: 

1739 analytic_spec[1:nhalf-1] *= 2. 

1740 else: 

1741 analytic_spec[1:nhalf] *= 2. 

1742 

1743 analytic = num.fft.ifft(analytic_spec) 

1744 signal_tf[ifilt, :] = num.abs(analytic) 

1745 

1746 enorm = num.abs(analytic_spec[:nhalf])**2 

1747 enorm /= num.sum(enorm) 

1748 centroid_freqs[ifilt] = num.sum(freqs*enorm) 

1749 

1750 return centroid_freqs, signal_tf 

1751 

1752 def _get_tapered_coefs( 

1753 self, ntrans, freqlimits, transfer_function, invert=False): 

1754 

1755 deltaf = 1./(self.deltat*ntrans) 

1756 nfreqs = ntrans//2 + 1 

1757 transfer = num.ones(nfreqs, dtype=complex) 

1758 hi = snapper(nfreqs, deltaf) 

1759 if freqlimits is not None: 

1760 a, b, c, d = freqlimits 

1761 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \ 

1762 + hi(a)*deltaf 

1763 

1764 if invert: 

1765 coeffs = transfer_function.evaluate(freqs) 

1766 if num.any(coeffs == 0.0): 

1767 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id) 

1768 

1769 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs) 

1770 else: 

1771 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs) 

1772 

1773 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer 

1774 else: 

1775 if invert: 

1776 raise Exception( 

1777 'transfer: `freqlimits` must be given when `invert` is ' 

1778 'set to `True`') 

1779 

1780 freqs = num.arange(nfreqs) * deltaf 

1781 tapered_transfer = transfer_function.evaluate(freqs) 

1782 

1783 tapered_transfer[0] = 0.0 # don't introduce static offsets 

1784 return tapered_transfer 

1785 

1786 def fill_template(self, template, **additional): 

1787 ''' 

1788 Fill string template with trace metadata. 

1789 

1790 Uses normal python '%(placeholder)s' string templates. The following 

1791 placeholders are considered: ``network``, ``station``, ``location``, 

1792 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last 

1793 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``, 

1794 ``tmin_year``, ``tmax_year``, ``julianday``. The variants ending with 

1795 ``'_ms'`` include milliseconds, those with ``'_us'`` include 

1796 microseconds, those with ``'_year'`` contain only the year. 

1797 ''' 

1798 

1799 template = template.replace('%n', '%(network)s')\ 

1800 .replace('%s', '%(station)s')\ 

1801 .replace('%l', '%(location)s')\ 

1802 .replace('%c', '%(channel)s')\ 

1803 .replace('%b', '%(tmin)s')\ 

1804 .replace('%e', '%(tmax)s')\ 

1805 .replace('%j', '%(julianday)s') 

1806 

1807 params = dict( 

1808 zip(('network', 'station', 'location', 'channel'), self.nslc_id)) 

1809 params['tmin'] = util.time_to_str( 

1810 self.tmin, format='%Y-%m-%d_%H-%M-%S') 

1811 params['tmax'] = util.time_to_str( 

1812 self.tmax, format='%Y-%m-%d_%H-%M-%S') 

1813 params['tmin_ms'] = util.time_to_str( 

1814 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1815 params['tmax_ms'] = util.time_to_str( 

1816 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC') 

1817 params['tmin_us'] = util.time_to_str( 

1818 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1819 params['tmax_us'] = util.time_to_str( 

1820 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC') 

1821 params['tmin_year'] = util.time_to_str( 

1822 self.tmin, format='%Y') 

1823 params['tmax_year'] = util.time_to_str( 

1824 self.tmax, format='%Y') 

1825 params['julianday'] = util.julian_day_of_year(self.tmin) 

1826 params.update(additional) 

1827 return template % params 

1828 

1829 def plot(self): 

1830 ''' 

1831 Show trace with matplotlib. 

1832 

1833 See also: :py:meth:`Trace.snuffle`. 

1834 ''' 

1835 

1836 import pylab 

1837 pylab.plot(self.get_xdata(), self.get_ydata()) 

1838 name = '%s %s %s - %s' % ( 

1839 self.channel, 

1840 self.station, 

1841 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)), 

1842 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax))) 

1843 

1844 pylab.title(name) 

1845 pylab.show() 

1846 

1847 def snuffle(self, **kwargs): 

1848 ''' 

1849 Show trace in a snuffler window. 

1850 

1851 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1852 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1853 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1854 :param ntracks: float, number of tracks to be shown initially (default: 

1855 12) 

1856 :param follow: time interval (in seconds) for real time follow mode or 

1857 ``None`` 

1858 :param controls: bool, whether to show the main controls (default: 

1859 ``True``) 

1860 :param opengl: bool, whether to use opengl (default: ``False``) 

1861 ''' 

1862 

1863 return snuffle([self], **kwargs) 

1864 

1865 

1866def snuffle(traces, **kwargs): 

1867 ''' 

1868 Show traces in a snuffler window. 

1869 

1870 :param stations: list of `pyrocko.model.Station` objects or ``None`` 

1871 :param events: list of `pyrocko.model.Event` objects or ``None`` 

1872 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None`` 

1873 :param ntracks: float, number of tracks to be shown initially (default: 12) 

1874 :param follow: time interval (in seconds) for real time follow mode or 

1875 ``None`` 

1876 :param controls: bool, whether to show the main controls (default: 

1877 ``True``) 

1878 :param opengl: bool, whether to use opengl (default: ``False``) 

1879 ''' 

1880 

1881 from pyrocko import pile 

1882 from pyrocko.gui import snuffler 

1883 p = pile.Pile() 

1884 if traces: 

1885 trf = pile.MemTracesFile(None, traces) 

1886 p.add_file(trf) 

1887 return snuffler.snuffle(p, **kwargs) 

1888 

1889 

1890class InfiniteResponse(Exception): 

1891 ''' 

1892 This exception is raised by :py:class:`Trace` operations when deconvolution 

1893 of a frequency response (instrument response transfer function) would 

1894 result in a division by zero. 

1895 ''' 

1896 

1897 

1898class MisalignedTraces(Exception): 

1899 ''' 

1900 This exception is raised by some :py:class:`Trace` operations when tmin, 

1901 tmax or number of samples do not match. 

1902 ''' 

1903 

1904 pass 

1905 

1906 

1907class NoData(Exception): 

1908 ''' 

1909 This exception is raised by some :py:class:`Trace` operations when no or 

1910 not enough data is available. 

1911 ''' 

1912 

1913 pass 

1914 

1915 

1916class AboveNyquist(Exception): 

1917 ''' 

1918 This exception is raised by some :py:class:`Trace` operations when given 

1919 frequencies are above the Nyquist frequency. 

1920 ''' 

1921 

1922 pass 

1923 

1924 

1925class TraceTooShort(Exception): 

1926 ''' 

1927 This exception is raised by some :py:class:`Trace` operations when the 

1928 trace is too short. 

1929 ''' 

1930 

1931 pass 

1932 

1933 

1934class ResamplingFailed(Exception): 

1935 pass 

1936 

1937 

1938def minmax(traces, key=None, mode='minmax'): 

1939 

1940 ''' 

1941 Get data range given traces grouped by selected pattern. 

1942 

1943 :param key: a callable which takes as single argument a trace and returns a 

1944 key for the grouping of the results. If this is ``None``, the default, 

1945 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

1946 used. 

1947 :param mode: 'minmax' or floating point number. If this is 'minmax', 

1948 minimum and maximum of the traces are used, if it is a number, mean +- 

1949 standard deviation times ``mode`` is used. 

1950 

1951 :returns: a dict with the combined data ranges. 

1952 

1953 Examples:: 

1954 

1955 ranges = minmax(traces, lambda tr: tr.channel) 

1956 print ranges['N'] # print min & max of all traces with channel == 'N' 

1957 print ranges['E'] # print min & max of all traces with channel == 'E' 

1958 

1959 ranges = minmax(traces, lambda tr: (tr.network, tr.station)) 

1960 print ranges['GR', 'HAM3'] # print min & max of all traces with 

1961 # network == 'GR' and station == 'HAM3' 

1962 

1963 ranges = minmax(traces, lambda tr: None) 

1964 print ranges[None] # prints min & max of all traces 

1965 ''' 

1966 

1967 if key is None: 

1968 key = _default_key 

1969 

1970 ranges = {} 

1971 for trace in traces: 

1972 if isinstance(mode, str) and mode == 'minmax': 

1973 mi, ma = trace.ydata.min(), trace.ydata.max() 

1974 else: 

1975 mean = trace.ydata.mean() 

1976 std = trace.ydata.std() 

1977 mi, ma = mean-std*mode, mean+std*mode 

1978 

1979 k = key(trace) 

1980 if k not in ranges: 

1981 ranges[k] = mi, ma 

1982 else: 

1983 tmi, tma = ranges[k] 

1984 ranges[k] = min(tmi, mi), max(tma, ma) 

1985 

1986 return ranges 

1987 

1988 

1989def minmaxtime(traces, key=None): 

1990 

1991 ''' 

1992 Get time range given traces grouped by selected pattern. 

1993 

1994 :param key: a callable which takes as single argument a trace and returns a 

1995 key for the grouping of the results. If this is ``None``, the default, 

1996 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is 

1997 used. 

1998 

1999 :returns: a dict with the combined data ranges. 

2000 ''' 

2001 

2002 if key is None: 

2003 key = _default_key 

2004 

2005 ranges = {} 

2006 for trace in traces: 

2007 mi, ma = trace.tmin, trace.tmax 

2008 k = key(trace) 

2009 if k not in ranges: 

2010 ranges[k] = mi, ma 

2011 else: 

2012 tmi, tma = ranges[k] 

2013 ranges[k] = min(tmi, mi), max(tma, ma) 

2014 

2015 return ranges 

2016 

2017 

2018def degapper( 

2019 traces, 

2020 maxgap=5, 

2021 fillmethod='interpolate', 

2022 deoverlap='use_second', 

2023 maxlap=None): 

2024 

2025 ''' 

2026 Try to connect traces and remove gaps. 

2027 

2028 This method will combine adjacent traces, which match in their network, 

2029 station, location and channel attributes. Overlapping parts are handled 

2030 according to the ``deoverlap`` argument. 

2031 

2032 :param traces: input traces, must be sorted by their full_id attribute. 

2033 :param maxgap: maximum number of samples to interpolate. 

2034 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'. 

2035 :param deoverlap: how to handle overlaps: 'use_second' to use data from 

2036 second trace (default), 'use_first' to use data from first trace, 

2037 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude 

2038 values. 

2039 :param maxlap: maximum number of samples of overlap which are removed 

2040 

2041 :returns: list of traces 

2042 ''' 

2043 

2044 in_traces = traces 

2045 out_traces = [] 

2046 if not in_traces: 

2047 return out_traces 

2048 out_traces.append(in_traces.pop(0)) 

2049 while in_traces: 

2050 

2051 a = out_traces[-1] 

2052 b = in_traces.pop(0) 

2053 

2054 avirt, bvirt = a.ydata is None, b.ydata is None 

2055 assert avirt == bvirt, \ 

2056 'traces given to degapper() must either all have data or have ' \ 

2057 'no data.' 

2058 

2059 virtual = avirt and bvirt 

2060 

2061 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat 

2062 and a.data_len() >= 1 and b.data_len() >= 1 

2063 and (virtual or a.ydata.dtype == b.ydata.dtype)): 

2064 

2065 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat 

2066 idist = int(round(dist)) 

2067 if abs(dist - idist) > 0.05 and idist <= maxgap: 

2068 # logger.warning('Cannot degap traces with displaced sampling ' 

2069 # '(%s, %s, %s, %s)' % a.nslc_id) 

2070 pass 

2071 else: 

2072 if 1 < idist <= maxgap: 

2073 if not virtual: 

2074 if fillmethod == 'interpolate': 

2075 filler = a.ydata[-1] + ( 

2076 ((1.0 + num.arange(idist-1, dtype=float)) 

2077 / idist) * (b.ydata[0]-a.ydata[-1]) 

2078 ).astype(a.ydata.dtype) 

2079 elif fillmethod == 'zeros': 

2080 filler = num.zeros(idist-1, dtype=a.ydist.dtype) 

2081 a.ydata = num.concatenate((a.ydata, filler, b.ydata)) 

2082 a.tmax = b.tmax 

2083 if a.mtime and b.mtime: 

2084 a.mtime = max(a.mtime, b.mtime) 

2085 continue 

2086 

2087 elif idist == 1: 

2088 if not virtual: 

2089 a.ydata = num.concatenate((a.ydata, b.ydata)) 

2090 a.tmax = b.tmax 

2091 if a.mtime and b.mtime: 

2092 a.mtime = max(a.mtime, b.mtime) 

2093 continue 

2094 

2095 elif idist <= 0 and (maxlap is None or -maxlap < idist): 

2096 if b.tmax > a.tmax: 

2097 if not virtual: 

2098 na = a.ydata.size 

2099 n = -idist+1 

2100 if deoverlap == 'use_second': 

2101 a.ydata = num.concatenate( 

2102 (a.ydata[:-n], b.ydata)) 

2103 elif deoverlap in ('use_first', 'crossfade_cos'): 

2104 a.ydata = num.concatenate( 

2105 (a.ydata, b.ydata[n:])) 

2106 elif deoverlap == 'add': 

2107 a.ydata[-n:] += b.ydata[:n] 

2108 a.ydata = num.concatenate( 

2109 (a.ydata, b.ydata[n:])) 

2110 else: 

2111 assert False, 'unknown deoverlap method' 

2112 

2113 if deoverlap == 'crossfade_cos': 

2114 n = -idist+1 

2115 taper = 0.5-0.5*num.cos( 

2116 (1.+num.arange(n))/(1.+n)*num.pi) 

2117 a.ydata[na-n:na] *= 1.-taper 

2118 a.ydata[na-n:na] += b.ydata[:n] * taper 

2119 

2120 a.tmax = b.tmax 

2121 if a.mtime and b.mtime: 

2122 a.mtime = max(a.mtime, b.mtime) 

2123 continue 

2124 else: 

2125 # make short second trace vanish 

2126 continue 

2127 

2128 if b.data_len() >= 1: 

2129 out_traces.append(b) 

2130 

2131 for tr in out_traces: 

2132 tr._update_ids() 

2133 

2134 return out_traces 

2135 

2136 

2137def rotate(traces, azimuth, in_channels, out_channels): 

2138 ''' 

2139 2D rotation of traces. 

2140 

2141 :param traces: list of input traces 

2142 :param azimuth: difference of the azimuths of the component directions 

2143 (azimuth of out_channels[0]) - (azimuth of in_channels[0]) 

2144 :param in_channels: names of the input channels (e.g. 'N', 'E') 

2145 :param out_channels: names of the output channels (e.g. 'R', 'T') 

2146 :returns: list of rotated traces 

2147 ''' 

2148 

2149 phi = azimuth/180.*math.pi 

2150 cphi = math.cos(phi) 

2151 sphi = math.sin(phi) 

2152 rotated = [] 

2153 in_channels = tuple(_channels_to_names(in_channels)) 

2154 out_channels = tuple(_channels_to_names(out_channels)) 

2155 for a in traces: 

2156 for b in traces: 

2157 if ((a.channel, b.channel) == in_channels and 

2158 a.nslc_id[:3] == b.nslc_id[:3] and 

2159 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2160 tmin = max(a.tmin, b.tmin) 

2161 tmax = min(a.tmax, b.tmax) 

2162 

2163 if tmin < tmax: 

2164 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2165 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2166 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2167 logger.warning( 

2168 'Cannot rotate traces with displaced sampling ' 

2169 '(%s, %s, %s, %s)' % a.nslc_id) 

2170 continue 

2171 

2172 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi 

2173 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi 

2174 ac.set_ydata(acydata) 

2175 bc.set_ydata(bcydata) 

2176 

2177 ac.set_codes(channel=out_channels[0]) 

2178 bc.set_codes(channel=out_channels[1]) 

2179 rotated.append(ac) 

2180 rotated.append(bc) 

2181 

2182 return rotated 

2183 

2184 

2185def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')): 

2186 azimuth = orthodrome.azimuth(receiver, source) + 180. 

2187 in_channels = n.channel, e.channel 

2188 out = rotate( 

2189 [n, e], azimuth, 

2190 in_channels=in_channels, 

2191 out_channels=out_channels) 

2192 

2193 assert len(out) == 2 

2194 for tr in out: 

2195 if tr.channel == 'R': 

2196 r = tr 

2197 elif tr.channel == 'T': 

2198 t = tr 

2199 

2200 return r, t 

2201 

2202 

2203def rotate_to_lqt(traces, backazimuth, incidence, in_channels, 

2204 out_channels=('L', 'Q', 'T')): 

2205 ''' 

2206 Rotate traces from ZNE to LQT system. 

2207 

2208 :param traces: list of traces in arbitrary order 

2209 :param backazimuth: backazimuth in degrees clockwise from north 

2210 :param incidence: incidence angle in degrees from vertical 

2211 :param in_channels: input channel names 

2212 :param out_channels: output channel names (default: ('L', 'Q', 'T')) 

2213 :returns: list of transformed traces 

2214 ''' 

2215 i = incidence/180.*num.pi 

2216 b = backazimuth/180.*num.pi 

2217 

2218 ci = num.cos(i) 

2219 cb = num.cos(b) 

2220 si = num.sin(i) 

2221 sb = num.sin(b) 

2222 

2223 rotmat = num.array( 

2224 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]]) 

2225 return project(traces, rotmat, in_channels, out_channels) 

2226 

2227 

2228def _decompose(a): 

2229 ''' 

2230 Decompose matrix into independent submatrices. 

2231 ''' 

2232 

2233 def depends(iout, a): 

2234 row = a[iout, :] 

2235 return set(num.arange(row.size).compress(row != 0.0)) 

2236 

2237 def provides(iin, a): 

2238 col = a[:, iin] 

2239 return set(num.arange(col.size).compress(col != 0.0)) 

2240 

2241 a = num.asarray(a) 

2242 outs = set(range(a.shape[0])) 

2243 systems = [] 

2244 while outs: 

2245 iout = outs.pop() 

2246 

2247 gout = set() 

2248 for iin in depends(iout, a): 

2249 gout.update(provides(iin, a)) 

2250 

2251 if not gout: 

2252 continue 

2253 

2254 gin = set() 

2255 for iout2 in gout: 

2256 gin.update(depends(iout2, a)) 

2257 

2258 if not gin: 

2259 continue 

2260 

2261 for iout2 in gout: 

2262 if iout2 in outs: 

2263 outs.remove(iout2) 

2264 

2265 gin = list(gin) 

2266 gin.sort() 

2267 gout = list(gout) 

2268 gout.sort() 

2269 

2270 systems.append((gin, gout, a[gout, :][:, gin])) 

2271 

2272 return systems 

2273 

2274 

2275def _channels_to_names(channels): 

2276 names = [] 

2277 for ch in channels: 

2278 if isinstance(ch, model.Channel): 

2279 names.append(ch.name) 

2280 else: 

2281 names.append(ch) 

2282 return names 

2283 

2284 

2285def project(traces, matrix, in_channels, out_channels): 

2286 ''' 

2287 Affine transform of three-component traces. 

2288 

2289 Compute matrix-vector product of three-component traces, to e.g. rotate 

2290 traces into a different basis. The traces are distinguished and ordered by 

2291 their channel attribute. The tranform is applied to overlapping parts of 

2292 any appropriate combinations of the input traces. This should allow this 

2293 function to be robust with data gaps. It also tries to apply the 

2294 tranformation to subsets of the channels, if this is possible, so that, if 

2295 for example a vertical compontent is missing, horizontal components can 

2296 still be rotated. 

2297 

2298 :param traces: list of traces in arbitrary order 

2299 :param matrix: tranformation matrix 

2300 :param in_channels: input channel names 

2301 :param out_channels: output channel names 

2302 :returns: list of transformed traces 

2303 ''' 

2304 

2305 in_channels = tuple(_channels_to_names(in_channels)) 

2306 out_channels = tuple(_channels_to_names(out_channels)) 

2307 systems = _decompose(matrix) 

2308 

2309 # fallback to full matrix if some are not quadratic 

2310 for iins, iouts, submatrix in systems: 

2311 if submatrix.shape[0] != submatrix.shape[1]: 

2312 return _project3(traces, matrix, in_channels, out_channels) 

2313 

2314 projected = [] 

2315 for iins, iouts, submatrix in systems: 

2316 in_cha = tuple([in_channels[iin] for iin in iins]) 

2317 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2318 if submatrix.shape[0] == 1: 

2319 projected.extend(_project1(traces, submatrix, in_cha, out_cha)) 

2320 elif submatrix.shape[1] == 2: 

2321 projected.extend(_project2(traces, submatrix, in_cha, out_cha)) 

2322 else: 

2323 projected.extend(_project3(traces, submatrix, in_cha, out_cha)) 

2324 

2325 return projected 

2326 

2327 

2328def project_dependencies(matrix, in_channels, out_channels): 

2329 ''' 

2330 Figure out what dependencies project() would produce. 

2331 ''' 

2332 

2333 in_channels = tuple(_channels_to_names(in_channels)) 

2334 out_channels = tuple(_channels_to_names(out_channels)) 

2335 systems = _decompose(matrix) 

2336 

2337 subpro = [] 

2338 for iins, iouts, submatrix in systems: 

2339 if submatrix.shape[0] != submatrix.shape[1]: 

2340 subpro.append((matrix, in_channels, out_channels)) 

2341 

2342 if not subpro: 

2343 for iins, iouts, submatrix in systems: 

2344 in_cha = tuple([in_channels[iin] for iin in iins]) 

2345 out_cha = tuple([out_channels[iout] for iout in iouts]) 

2346 subpro.append((submatrix, in_cha, out_cha)) 

2347 

2348 deps = {} 

2349 for mat, in_cha, out_cha in subpro: 

2350 for oc in out_cha: 

2351 if oc not in deps: 

2352 deps[oc] = [] 

2353 

2354 for ic in in_cha: 

2355 deps[oc].append(ic) 

2356 

2357 return deps 

2358 

2359 

2360def _project1(traces, matrix, in_channels, out_channels): 

2361 assert len(in_channels) == 1 

2362 assert len(out_channels) == 1 

2363 assert matrix.shape == (1, 1) 

2364 

2365 projected = [] 

2366 for a in traces: 

2367 if not (a.channel,) == in_channels: 

2368 continue 

2369 

2370 ac = a.copy() 

2371 ac.set_ydata(matrix[0, 0]*a.get_ydata()) 

2372 ac.set_codes(channel=out_channels[0]) 

2373 projected.append(ac) 

2374 

2375 return projected 

2376 

2377 

2378def _project2(traces, matrix, in_channels, out_channels): 

2379 assert len(in_channels) == 2 

2380 assert len(out_channels) == 2 

2381 assert matrix.shape == (2, 2) 

2382 projected = [] 

2383 for a in traces: 

2384 for b in traces: 

2385 if not ((a.channel, b.channel) == in_channels and 

2386 a.nslc_id[:3] == b.nslc_id[:3] and 

2387 abs(a.deltat-b.deltat) < a.deltat*0.001): 

2388 continue 

2389 

2390 tmin = max(a.tmin, b.tmin) 

2391 tmax = min(a.tmax, b.tmax) 

2392 

2393 if tmin > tmax: 

2394 continue 

2395 

2396 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2397 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2398 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01: 

2399 logger.warning( 

2400 'Cannot project traces with displaced sampling ' 

2401 '(%s, %s, %s, %s)' % a.nslc_id) 

2402 continue 

2403 

2404 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata())) 

2405 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata())) 

2406 

2407 ac.set_ydata(acydata) 

2408 bc.set_ydata(bcydata) 

2409 

2410 ac.set_codes(channel=out_channels[0]) 

2411 bc.set_codes(channel=out_channels[1]) 

2412 

2413 projected.append(ac) 

2414 projected.append(bc) 

2415 

2416 return projected 

2417 

2418 

2419def _project3(traces, matrix, in_channels, out_channels): 

2420 assert len(in_channels) == 3 

2421 assert len(out_channels) == 3 

2422 assert matrix.shape == (3, 3) 

2423 projected = [] 

2424 for a in traces: 

2425 for b in traces: 

2426 for c in traces: 

2427 if not ((a.channel, b.channel, c.channel) == in_channels 

2428 and a.nslc_id[:3] == b.nslc_id[:3] 

2429 and b.nslc_id[:3] == c.nslc_id[:3] 

2430 and abs(a.deltat-b.deltat) < a.deltat*0.001 

2431 and abs(b.deltat-c.deltat) < b.deltat*0.001): 

2432 

2433 continue 

2434 

2435 tmin = max(a.tmin, b.tmin, c.tmin) 

2436 tmax = min(a.tmax, b.tmax, c.tmax) 

2437 

2438 if tmin >= tmax: 

2439 continue 

2440 

2441 ac = a.chop(tmin, tmax, inplace=False, include_last=True) 

2442 bc = b.chop(tmin, tmax, inplace=False, include_last=True) 

2443 cc = c.chop(tmin, tmax, inplace=False, include_last=True) 

2444 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01 

2445 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01): 

2446 

2447 logger.warning( 

2448 'Cannot project traces with displaced sampling ' 

2449 '(%s, %s, %s, %s)' % a.nslc_id) 

2450 continue 

2451 

2452 acydata = num.dot( 

2453 matrix[0], 

2454 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2455 bcydata = num.dot( 

2456 matrix[1], 

2457 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2458 ccydata = num.dot( 

2459 matrix[2], 

2460 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata())) 

2461 

2462 ac.set_ydata(acydata) 

2463 bc.set_ydata(bcydata) 

2464 cc.set_ydata(ccydata) 

2465 

2466 ac.set_codes(channel=out_channels[0]) 

2467 bc.set_codes(channel=out_channels[1]) 

2468 cc.set_codes(channel=out_channels[2]) 

2469 

2470 projected.append(ac) 

2471 projected.append(bc) 

2472 projected.append(cc) 

2473 

2474 return projected 

2475 

2476 

2477def correlate(a, b, mode='valid', normalization=None, use_fft=False): 

2478 ''' 

2479 Cross correlation of two traces. 

2480 

2481 :param a,b: input traces 

2482 :param mode: ``'valid'``, ``'full'``, or ``'same'`` 

2483 :param normalization: ``'normal'``, ``'gliding'``, or ``None`` 

2484 :param use_fft: bool, whether to do cross correlation in spectral domain 

2485 

2486 :returns: trace containing cross correlation coefficients 

2487 

2488 This function computes the cross correlation between two traces. It 

2489 evaluates the discrete equivalent of 

2490 

2491 .. math:: 

2492 

2493 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau 

2494 

2495 where the star denotes complex conjugate. Note, that the arguments here are 

2496 swapped when compared with the :py:func:`numpy.correlate` function, 

2497 which is internally called. This function should be safe even with older 

2498 versions of NumPy, where the correlate function has some problems. 

2499 

2500 A trace containing the cross correlation coefficients is returned. The time 

2501 information of the output trace is set so that the returned cross 

2502 correlation can be viewed directly as a function of time lag. 

2503 

2504 Example:: 

2505 

2506 # align two traces a and b containing a time shifted similar signal: 

2507 c = pyrocko.trace.correlate(a,b) 

2508 t, coef = c.max() # get time and value of maximum 

2509 b.shift(-t) # align b with a 

2510 

2511 ''' 

2512 

2513 assert_same_sampling_rate(a, b) 

2514 

2515 ya, yb = a.ydata, b.ydata 

2516 

2517 # need reversed order here: 

2518 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft) 

2519 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft) 

2520 

2521 if normalization == 'normal': 

2522 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2)) 

2523 yc = yc/normfac 

2524 

2525 elif normalization == 'gliding': 

2526 if mode != 'valid': 

2527 assert False, 'gliding normalization currently only available ' \ 

2528 'with "valid" mode.' 

2529 

2530 if ya.size < yb.size: 

2531 yshort, ylong = ya, yb 

2532 else: 

2533 yshort, ylong = yb, ya 

2534 

2535 epsilon = 0.00001 

2536 normfac_short = num.sqrt(num.sum(yshort**2)) 

2537 normfac = normfac_short * num.sqrt( 

2538 moving_sum(ylong**2, yshort.size, mode='valid')) \ 

2539 + normfac_short*epsilon 

2540 

2541 if yb.size <= ya.size: 

2542 normfac = normfac[::-1] 

2543 

2544 yc /= normfac 

2545 

2546 c = a.copy() 

2547 c.set_ydata(yc) 

2548 c.set_codes(*merge_codes(a, b, '~')) 

2549 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat) 

2550 

2551 return c 

2552 

2553 

2554def deconvolve( 

2555 a, b, waterlevel, 

2556 tshift=0., 

2557 pad=0.5, 

2558 fd_taper=None, 

2559 pad_to_pow2=True): 

2560 

2561 same_sampling_rate(a, b) 

2562 assert abs(a.tmin - b.tmin) < a.deltat * 0.001 

2563 deltat = a.deltat 

2564 npad = int(round(a.data_len()*pad + tshift / deltat)) 

2565 

2566 ndata = max(a.data_len(), b.data_len()) 

2567 ndata_pad = ndata + npad 

2568 

2569 if pad_to_pow2: 

2570 ntrans = nextpow2(ndata_pad) 

2571 else: 

2572 ntrans = ndata 

2573 

2574 aspec = num.fft.rfft(a.ydata, ntrans) 

2575 bspec = num.fft.rfft(b.ydata, ntrans) 

2576 

2577 out = aspec * num.conj(bspec) 

2578 

2579 bautocorr = bspec*num.conj(bspec) 

2580 denom = num.maximum(bautocorr, waterlevel * bautocorr.max()) 

2581 

2582 out /= denom 

2583 df = 1/(ntrans*deltat) 

2584 

2585 if fd_taper is not None: 

2586 fd_taper(out, 0.0, df) 

2587 

2588 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat))) 

2589 c = a.copy(data=False) 

2590 c.set_ydata(ydata[:ndata]) 

2591 c.set_codes(*merge_codes(a, b, '/')) 

2592 return c 

2593 

2594 

2595def assert_same_sampling_rate(a, b, eps=1.0e-6): 

2596 assert same_sampling_rate(a, b, eps), \ 

2597 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat) 

2598 

2599 

2600def same_sampling_rate(a, b, eps=1.0e-6): 

2601 ''' 

2602 Check if two traces have the same sampling rate. 

2603 

2604 :param a,b: input traces 

2605 :param eps: relative tolerance 

2606 ''' 

2607 

2608 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps 

2609 

2610 

2611def fix_deltat_rounding_errors(deltat): 

2612 ''' 

2613 Try to undo sampling rate rounding errors. 

2614 

2615 Fix rounding errors of sampling intervals when these are read from single 

2616 precision floating point values. 

2617 

2618 Assumes that the true sampling rate or sampling interval was an integer 

2619 value. No correction will be applied if this would change the sampling 

2620 rate by more than 0.001%. 

2621 ''' 

2622 

2623 if deltat <= 1.0: 

2624 deltat_new = 1.0 / round(1.0 / deltat) 

2625 else: 

2626 deltat_new = round(deltat) 

2627 

2628 if abs(deltat_new - deltat) / deltat > 1e-5: 

2629 deltat_new = deltat 

2630 

2631 return deltat_new 

2632 

2633 

2634def merge_codes(a, b, sep='-'): 

2635 ''' 

2636 Merge network-station-location-channel codes of a pair of traces. 

2637 ''' 

2638 

2639 o = [] 

2640 for xa, xb in zip(a.nslc_id, b.nslc_id): 

2641 if xa == xb: 

2642 o.append(xa) 

2643 else: 

2644 o.append(sep.join((xa, xb))) 

2645 return o 

2646 

2647 

2648class Taper(Object): 

2649 ''' 

2650 Base class for tapers. 

2651 

2652 Does nothing by default. 

2653 ''' 

2654 

2655 def __call__(self, y, x0, dx): 

2656 pass 

2657 

2658 

2659class CosTaper(Taper): 

2660 ''' 

2661 Cosine Taper. 

2662 

2663 :param a: start of fading in 

2664 :param b: end of fading in 

2665 :param c: start of fading out 

2666 :param d: end of fading out 

2667 ''' 

2668 

2669 a = Float.T() 

2670 b = Float.T() 

2671 c = Float.T() 

2672 d = Float.T() 

2673 

2674 def __init__(self, a, b, c, d): 

2675 Taper.__init__(self, a=a, b=b, c=c, d=d) 

2676 

2677 def __call__(self, y, x0, dx): 

2678 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2679 

2680 def span(self, y, x0, dx): 

2681 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx) 

2682 

2683 def time_span(self): 

2684 return self.a, self.d 

2685 

2686 

2687class CosFader(Taper): 

2688 ''' 

2689 Cosine Fader. 

2690 

2691 :param xfade: fade in and fade out time in seconds (optional) 

2692 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional) 

2693 

2694 Only one argument can be set. The other should to be ``None``. 

2695 ''' 

2696 

2697 xfade = Float.T(optional=True) 

2698 xfrac = Float.T(optional=True) 

2699 

2700 def __init__(self, xfade=None, xfrac=None): 

2701 Taper.__init__(self, xfade=xfade, xfrac=xfrac) 

2702 assert (xfade is None) != (xfrac is None) 

2703 self._xfade = xfade 

2704 self._xfrac = xfrac 

2705 

2706 def __call__(self, y, x0, dx): 

2707 

2708 xfade = self._xfade 

2709 

2710 xlen = (y.size - 1)*dx 

2711 if xfade is None: 

2712 xfade = xlen * self._xfrac 

2713 

2714 a = x0 

2715 b = x0 + xfade 

2716 c = x0 + xlen - xfade 

2717 d = x0 + xlen 

2718 

2719 apply_costaper(a, b, c, d, y, x0, dx) 

2720 

2721 def span(self, y, x0, dx): 

2722 return 0, y.size 

2723 

2724 def time_span(self): 

2725 return None, None 

2726 

2727 

2728def none_min(li): 

2729 if None in li: 

2730 return None 

2731 else: 

2732 return min(x for x in li if x is not None) 

2733 

2734 

2735def none_max(li): 

2736 if None in li: 

2737 return None 

2738 else: 

2739 return max(x for x in li if x is not None) 

2740 

2741 

2742class MultiplyTaper(Taper): 

2743 ''' 

2744 Multiplication of several tapers. 

2745 ''' 

2746 

2747 tapers = List.T(Taper.T()) 

2748 

2749 def __init__(self, tapers=None): 

2750 if tapers is None: 

2751 tapers = [] 

2752 

2753 Taper.__init__(self, tapers=tapers) 

2754 

2755 def __call__(self, y, x0, dx): 

2756 for taper in self.tapers: 

2757 taper(y, x0, dx) 

2758 

2759 def span(self, y, x0, dx): 

2760 spans = [] 

2761 for taper in self.tapers: 

2762 spans.append(taper.span(y, x0, dx)) 

2763 

2764 mins, maxs = list(zip(*spans)) 

2765 return min(mins), max(maxs) 

2766 

2767 def time_span(self): 

2768 spans = [] 

2769 for taper in self.tapers: 

2770 spans.append(taper.time_span()) 

2771 

2772 mins, maxs = list(zip(*spans)) 

2773 return none_min(mins), none_max(maxs) 

2774 

2775 

2776class GaussTaper(Taper): 

2777 ''' 

2778 Frequency domain Gaussian filter. 

2779 ''' 

2780 

2781 alpha = Float.T() 

2782 

2783 def __init__(self, alpha): 

2784 Taper.__init__(self, alpha=alpha) 

2785 self._alpha = alpha 

2786 

2787 def __call__(self, y, x0, dx): 

2788 f = x0 + num.arange(y.size)*dx 

2789 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2) 

2790 

2791 

2792class FrequencyResponse(Object): 

2793 ''' 

2794 Evaluates frequency response at given frequencies. 

2795 ''' 

2796 

2797 def evaluate(self, freqs): 

2798 coefs = num.ones(freqs.size, dtype=complex) 

2799 return coefs 

2800 

2801 def is_scalar(self): 

2802 ''' 

2803 Check if this is a flat response. 

2804 ''' 

2805 

2806 if type(self) == FrequencyResponse: 

2807 return True 

2808 else: 

2809 return False # default for derived classes 

2810 

2811 

2812class Evalresp(FrequencyResponse): 

2813 ''' 

2814 Calls evalresp and generates values of the instrument response transfer 

2815 function. 

2816 

2817 :param respfile: response file in evalresp format 

2818 :param trace: trace for which the response is to be extracted from the file 

2819 :param target: ``'dis'`` for displacement or ``'vel'`` for velocity 

2820 ''' 

2821 

2822 respfile = String.T() 

2823 nslc_id = Tuple.T(4, String.T()) 

2824 target = String.T(default='dis') 

2825 instant = Float.T() 

2826 

2827 def __init__( 

2828 self, respfile, trace=None, target='dis', nslc_id=None, time=None): 

2829 

2830 if trace is not None: 

2831 nslc_id = trace.nslc_id 

2832 time = (trace.tmin + trace.tmax) / 2. 

2833 

2834 FrequencyResponse.__init__( 

2835 self, 

2836 respfile=respfile, 

2837 nslc_id=nslc_id, 

2838 instant=time, 

2839 target=target) 

2840 

2841 def evaluate(self, freqs): 

2842 network, station, location, channel = self.nslc_id 

2843 x = evalresp.evalresp( 

2844 sta_list=station, 

2845 cha_list=channel, 

2846 net_code=network, 

2847 locid=location, 

2848 instant=self.instant, 

2849 freqs=freqs, 

2850 units=self.target.upper(), 

2851 file=self.respfile, 

2852 rtype='CS') 

2853 

2854 transfer = x[0][4] 

2855 return transfer 

2856 

2857 

2858class InverseEvalresp(FrequencyResponse): 

2859 ''' 

2860 Calls evalresp and generates values of the inverse instrument response for 

2861 deconvolution of instrument response. 

2862 

2863 :param respfile: response file in evalresp format 

2864 :param trace: trace for which the response is to be extracted from the file 

2865 :param target: ``'dis'`` for displacement or ``'vel'`` for velocity 

2866 ''' 

2867 

2868 respfile = String.T() 

2869 nslc_id = Tuple.T(4, String.T()) 

2870 target = String.T(default='dis') 

2871 instant = Timestamp.T() 

2872 

2873 def __init__(self, respfile, trace, target='dis'): 

2874 FrequencyResponse.__init__( 

2875 self, 

2876 respfile=respfile, 

2877 nslc_id=trace.nslc_id, 

2878 instant=(trace.tmin + trace.tmax)/2., 

2879 target=target) 

2880 

2881 def evaluate(self, freqs): 

2882 network, station, location, channel = self.nslc_id 

2883 x = evalresp.evalresp(sta_list=station, 

2884 cha_list=channel, 

2885 net_code=network, 

2886 locid=location, 

2887 instant=self.instant, 

2888 freqs=freqs, 

2889 units=self.target.upper(), 

2890 file=self.respfile, 

2891 rtype='CS') 

2892 

2893 transfer = x[0][4] 

2894 return 1./transfer 

2895 

2896 

2897class PoleZeroResponse(FrequencyResponse): 

2898 ''' 

2899 Evaluates frequency response from pole-zero representation. 

2900 

2901 :param zeros: :py:class:`numpy.array` containing complex positions of zeros 

2902 :param poles: :py:class:`numpy.array` containing complex positions of poles 

2903 :param constant: gain as floating point number 

2904 

2905 :: 

2906 

2907 (j*2*pi*f - zeros[0]) * (j*2*pi*f - zeros[1]) * ... 

2908 T(f) = constant * ---------------------------------------------------- 

2909 (j*2*pi*f - poles[0]) * (j*2*pi*f - poles[1]) * ... 

2910 

2911 

2912 The poles and zeros should be given as angular frequencies, not in Hz. 

2913 ''' 

2914 

2915 zeros = List.T(Complex.T()) 

2916 poles = List.T(Complex.T()) 

2917 constant = Complex.T(default=1.0+0j) 

2918 

2919 def __init__(self, zeros=None, poles=None, constant=1.0+0j): 

2920 if zeros is None: 

2921 zeros = [] 

2922 if poles is None: 

2923 poles = [] 

2924 FrequencyResponse.__init__( 

2925 self, zeros=zeros, poles=poles, constant=constant) 

2926 

2927 def evaluate(self, freqs): 

2928 jomeg = 1.0j * 2.*num.pi*freqs 

2929 

2930 a = num.ones(freqs.size, dtype=complex)*self.constant 

2931 for z in self.zeros: 

2932 a *= jomeg-z 

2933 for p in self.poles: 

2934 a /= jomeg-p 

2935 

2936 return a 

2937 

2938 def is_scalar(self): 

2939 return len(self.zeros) == 0 and len(self.poles) == 0 

2940 

2941 

2942class ButterworthResponse(FrequencyResponse): 

2943 ''' 

2944 Butterworth frequency response. 

2945 

2946 :param corner: corner frequency of the response 

2947 :param order: order of the response 

2948 :param type: either ``high`` or ``low`` 

2949 ''' 

2950 

2951 corner = Float.T(default=1.0) 

2952 order = Int.T(default=4) 

2953 type = StringChoice.T(choices=['low', 'high'], default='low') 

2954 

2955 def evaluate(self, freqs): 

2956 b, a = signal.butter( 

2957 int(self.order), float(self.corner), self.type, analog=True) 

2958 w, h = signal.freqs(b, a, freqs) 

2959 return h 

2960 

2961 

2962class SampledResponse(FrequencyResponse): 

2963 ''' 

2964 Interpolates frequency response given at a set of sampled frequencies. 

2965 

2966 :param frequencies,values: frequencies and values of the sampled response 

2967 function. 

2968 :param left,right: values to return when input is out of range. If set to 

2969 ``None`` (the default) the endpoints are returned. 

2970 ''' 

2971 

2972 frequencies = Array.T(shape=(None,), dtype=float, serialize_as='list') 

2973 values = Array.T(shape=(None,), dtype=complex, serialize_as='list') 

2974 left = Complex.T(optional=True) 

2975 right = Complex.T(optional=True) 

2976 

2977 def __init__(self, frequencies, values, left=None, right=None): 

2978 FrequencyResponse.__init__( 

2979 self, 

2980 frequencies=asarray_1d(frequencies, float), 

2981 values=asarray_1d(values, complex)) 

2982 

2983 def evaluate(self, freqs): 

2984 ereal = num.interp( 

2985 freqs, self.frequencies, num.real(self.values), 

2986 left=self.left, right=self.right) 

2987 eimag = num.interp( 

2988 freqs, self.frequencies, num.imag(self.values), 

2989 left=self.left, right=self.right) 

2990 transfer = ereal + 1.0j*eimag 

2991 return transfer 

2992 

2993 def inverse(self): 

2994 ''' 

2995 Get inverse as a new :py:class:`SampledResponse` object. 

2996 ''' 

2997 

2998 def inv_or_none(x): 

2999 if x is not None: 

3000 return 1./x 

3001 

3002 return SampledResponse( 

3003 self.frequencies, 1./self.values, 

3004 left=inv_or_none(self.left), 

3005 right=inv_or_none(self.right)) 

3006 

3007 

3008class IntegrationResponse(FrequencyResponse): 

3009 ''' 

3010 The integration response, optionally multiplied by a constant gain. 

3011 

3012 :param n: exponent (integer) 

3013 :param gain: gain factor (float) 

3014 

3015 :: 

3016 

3017 gain 

3018 T(f) = -------------- 

3019 (j*2*pi * f)^n 

3020 ''' 

3021 

3022 n = Int.T(optional=True, default=1) 

3023 gain = Float.T(optional=True, default=1.0) 

3024 

3025 def __init__(self, n=1, gain=1.0): 

3026 FrequencyResponse.__init__(self, n=n, gain=gain) 

3027 

3028 def evaluate(self, freqs): 

3029 nonzero = freqs != 0.0 

3030 resp = num.empty(freqs.size, dtype=complex) 

3031 resp[nonzero] = self.gain / (1.0j * 2. * num.pi*freqs[nonzero])**self.n 

3032 resp[num.logical_not(nonzero)] = 0.0 

3033 return resp 

3034 

3035 

3036class DifferentiationResponse(FrequencyResponse): 

3037 ''' 

3038 The differentiation response, optionally multiplied by a constant gain. 

3039 

3040 :param n: exponent (integer) 

3041 :param gain: gain factor (float) 

3042 

3043 :: 

3044 

3045 T(f) = gain * (j*2*pi * f)^n 

3046 ''' 

3047 

3048 n = Int.T(optional=True, default=1) 

3049 gain = Float.T(optional=True, default=1.0) 

3050 

3051 def __init__(self, n=1, gain=1.0): 

3052 FrequencyResponse.__init__(self, n=n, gain=gain) 

3053 

3054 def evaluate(self, freqs): 

3055 return self.gain * (1.0j * 2. * num.pi * freqs)**self.n 

3056 

3057 

3058class AnalogFilterResponse(FrequencyResponse): 

3059 ''' 

3060 Frequency response of an analog filter. 

3061 

3062 (see :py:func:`scipy.signal.freqs`). 

3063 ''' 

3064 

3065 b = List.T(Float.T()) 

3066 a = List.T(Float.T()) 

3067 

3068 def __init__(self, b, a): 

3069 FrequencyResponse.__init__(self, b=b, a=a) 

3070 

3071 def evaluate(self, freqs): 

3072 return signal.freqs(self.b, self.a, freqs/(2.*num.pi))[1] 

3073 

3074 

3075class MultiplyResponse(FrequencyResponse): 

3076 ''' 

3077 Multiplication of several :py:class:`FrequencyResponse` objects. 

3078 ''' 

3079 

3080 responses = List.T(FrequencyResponse.T()) 

3081 

3082 def __init__(self, responses=None): 

3083 if responses is None: 

3084 responses = [] 

3085 FrequencyResponse.__init__(self, responses=responses) 

3086 

3087 def evaluate(self, freqs): 

3088 a = num.ones(freqs.size, dtype=complex) 

3089 for resp in self.responses: 

3090 a *= resp.evaluate(freqs) 

3091 

3092 return a 

3093 

3094 def is_scalar(self): 

3095 return all(resp.is_scalar() for resp in self.responses) 

3096 

3097 

3098def asarray_1d(x, dtype): 

3099 if isinstance(x, (list, tuple)) and x and isinstance(x[0], (str, newstr)): 

3100 return num.asarray(list(map(dtype, x)), dtype=dtype) 

3101 else: 

3102 a = num.asarray(x, dtype=dtype) 

3103 if not a.ndim == 1: 

3104 raise ValueError('could not convert to 1D array') 

3105 return a 

3106 

3107 

3108cached_coefficients = {} 

3109 

3110 

3111def _get_cached_filter_coefs(order, corners, btype): 

3112 ck = (order, tuple(corners), btype) 

3113 if ck not in cached_coefficients: 

3114 if len(corners) == 0: 

3115 cached_coefficients[ck] = signal.butter( 

3116 order, corners[0], btype=btype) 

3117 else: 

3118 cached_coefficients[ck] = signal.butter( 

3119 order, corners, btype=btype) 

3120 

3121 return cached_coefficients[ck] 

3122 

3123 

3124class _globals(object): 

3125 _numpy_has_correlate_flip_bug = None 

3126 

3127 

3128def _default_key(tr): 

3129 return (tr.network, tr.station, tr.location, tr.channel) 

3130 

3131 

3132def numpy_has_correlate_flip_bug(): 

3133 ''' 

3134 Check if NumPy's correlate function reveals old behaviour. 

3135 ''' 

3136 

3137 if _globals._numpy_has_correlate_flip_bug is None: 

3138 a = num.array([0, 0, 1, 0, 0, 0, 0]) 

3139 b = num.array([0, 0, 0, 0, 1, 0, 0, 0]) 

3140 ab = num.correlate(a, b, mode='same') 

3141 ba = num.correlate(b, a, mode='same') 

3142 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba) 

3143 

3144 return _globals._numpy_has_correlate_flip_bug 

3145 

3146 

3147def numpy_correlate_fixed(a, b, mode='valid', use_fft=False): 

3148 ''' 

3149 Call :py:func:`numpy.correlate` with fixes. 

3150 

3151 c[k] = sum_i a[i+k] * conj(b[i]) 

3152 

3153 Note that the result produced by newer numpy.correlate is always flipped 

3154 with respect to the formula given in its documentation (if ascending k 

3155 assumed for the output). 

3156 ''' 

3157 

3158 if use_fft: 

3159 if a.size < b.size: 

3160 c = signal.fftconvolve(b[::-1], a, mode=mode) 

3161 else: 

3162 c = signal.fftconvolve(a, b[::-1], mode=mode) 

3163 return c 

3164 

3165 else: 

3166 buggy = numpy_has_correlate_flip_bug() 

3167 

3168 a = num.asarray(a) 

3169 b = num.asarray(b) 

3170 

3171 if buggy: 

3172 b = num.conj(b) 

3173 

3174 c = num.correlate(a, b, mode=mode) 

3175 

3176 if buggy and a.size < b.size: 

3177 return c[::-1] 

3178 else: 

3179 return c 

3180 

3181 

3182def numpy_correlate_emulate(a, b, mode='valid'): 

3183 ''' 

3184 Slow version of :py:func:`numpy.correlate` for comparison. 

3185 ''' 

3186 

3187 a = num.asarray(a) 

3188 b = num.asarray(b) 

3189 kmin = -(b.size-1) 

3190 klen = a.size-kmin 

3191 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode) 

3192 kmin = int(kmin) 

3193 kmax = int(kmax) 

3194 klen = kmax - kmin + 1 

3195 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ())) 

3196 for k in range(kmin, kmin+klen): 

3197 imin = max(0, -k) 

3198 ilen = min(b.size, a.size-k) - imin 

3199 c[k-kmin] = num.sum( 

3200 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen])) 

3201 

3202 return c 

3203 

3204 

3205def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False): 

3206 ''' 

3207 Get range of lags for which :py:func:`numpy.correlate` produces values. 

3208 ''' 

3209 

3210 a = num.asarray(a) 

3211 b = num.asarray(b) 

3212 

3213 kmin = -(b.size-1) 

3214 if mode == 'full': 

3215 klen = a.size-kmin 

3216 elif mode == 'same': 

3217 klen = max(a.size, b.size) 

3218 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \ 

3219 int(not use_fft and a.size % 2 == 0 and b.size > a.size) 

3220 elif mode == 'valid': 

3221 klen = abs(a.size - b.size) + 1 

3222 kmin += min(a.size, b.size) - 1 

3223 

3224 return kmin, kmin + klen - 1 

3225 

3226 

3227def autocorr(x, nshifts): 

3228 ''' 

3229 Compute biased estimate of the first autocorrelation coefficients. 

3230 

3231 :param x: input array 

3232 :param nshifts: number of coefficients to calculate 

3233 ''' 

3234 

3235 mean = num.mean(x) 

3236 std = num.std(x) 

3237 n = x.size 

3238 xdm = x - mean 

3239 r = num.zeros(nshifts) 

3240 for k in range(nshifts): 

3241 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:]) 

3242 

3243 return r 

3244 

3245 

3246def yulewalker(x, order): 

3247 ''' 

3248 Compute autoregression coefficients using Yule-Walker method. 

3249 

3250 :param x: input array 

3251 :param order: number of coefficients to produce 

3252 

3253 A biased estimate of the autocorrelation is used. The Yule-Walker equations 

3254 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin 

3255 recursion which is normally used. 

3256 ''' 

3257 

3258 gamma = autocorr(x, order+1) 

3259 d = gamma[1:1+order] 

3260 a = num.zeros((order, order)) 

3261 gamma2 = num.concatenate((gamma[::-1], gamma[1:order])) 

3262 for i in range(order): 

3263 ioff = order-i 

3264 a[i, :] = gamma2[ioff:ioff+order] 

3265 

3266 return num.dot(num.linalg.inv(a), -d) 

3267 

3268 

3269def moving_avg(x, n): 

3270 n = int(n) 

3271 cx = x.cumsum() 

3272 nn = len(x) 

3273 y = num.zeros(nn, dtype=cx.dtype) 

3274 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n 

3275 y[:n//2] = y[n//2] 

3276 y[n//2+(nn-n):] = y[n//2+(nn-n)-1] 

3277 return y 

3278 

3279 

3280def moving_sum(x, n, mode='valid'): 

3281 n = int(n) 

3282 cx = x.cumsum() 

3283 nn = len(x) 

3284 

3285 if mode == 'valid': 

3286 if nn-n+1 <= 0: 

3287 return num.zeros(0, dtype=cx.dtype) 

3288 y = num.zeros(nn-n+1, dtype=cx.dtype) 

3289 y[0] = cx[n-1] 

3290 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n] 

3291 

3292 if mode == 'full': 

3293 y = num.zeros(nn+n-1, dtype=cx.dtype) 

3294 if n <= nn: 

3295 y[0:n] = cx[0:n] 

3296 y[n:nn] = cx[n:nn]-cx[0:nn-n] 

3297 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1] 

3298 else: 

3299 y[0:nn] = cx[0:nn] 

3300 y[nn:n] = cx[nn-1] 

3301 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1] 

3302 

3303 if mode == 'same': 

3304 n1 = (n-1)//2 

3305 y = num.zeros(nn, dtype=cx.dtype) 

3306 if n <= nn: 

3307 y[0:n-n1] = cx[n1:n] 

3308 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n] 

3309 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1] 

3310 else: 

3311 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn] 

3312 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1] 

3313 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))] 

3314 

3315 return y 

3316 

3317 

3318def nextpow2(i): 

3319 return 2**int(math.ceil(math.log(i)/math.log(2.))) 

3320 

3321 

3322def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil): 

3323 def snap(x): 

3324 return max(0, min(int(snapfun((x-offset)/delta)), nmax)) 

3325 return snap 

3326 

3327 

3328def snapper(nmax, delta, snapfun=math.ceil): 

3329 def snap(x): 

3330 return max(0, min(int(snapfun(x/delta)), nmax)) 

3331 return snap 

3332 

3333 

3334def apply_costaper(a, b, c, d, y, x0, dx): 

3335 hi = snapper_w_offset(y.size, x0, dx) 

3336 y[:hi(a)] = 0. 

3337 y[hi(a):hi(b)] *= 0.5 \ 

3338 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi) 

3339 y[hi(c):hi(d)] *= 0.5 \ 

3340 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi) 

3341 y[hi(d):] = 0. 

3342 

3343 

3344def span_costaper(a, b, c, d, y, x0, dx): 

3345 hi = snapper_w_offset(y.size, x0, dx) 

3346 return hi(a), hi(d) - hi(a) 

3347 

3348 

3349def costaper(a, b, c, d, nfreqs, deltaf): 

3350 hi = snapper(nfreqs, deltaf) 

3351 tap = num.zeros(nfreqs) 

3352 tap[hi(a):hi(b)] = 0.5 \ 

3353 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi) 

3354 tap[hi(b):hi(c)] = 1. 

3355 tap[hi(c):hi(d)] = 0.5 \ 

3356 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi) 

3357 

3358 return tap 

3359 

3360 

3361def t2ind(t, tdelta, snap=round): 

3362 return int(snap(t/tdelta)) 

3363 

3364 

3365def hilbert(x, N=None): 

3366 ''' 

3367 Return the hilbert transform of x of length N. 

3368 

3369 (from scipy.signal, but changed to use fft and ifft from numpy.fft) 

3370 ''' 

3371 

3372 x = num.asarray(x) 

3373 if N is None: 

3374 N = len(x) 

3375 if N <= 0: 

3376 raise ValueError("N must be positive.") 

3377 if num.iscomplexobj(x): 

3378 logger.warning('imaginary part of x ignored.') 

3379 x = num.real(x) 

3380 

3381 Xf = num.fft.fft(x, N, axis=0) 

3382 h = num.zeros(N) 

3383 if N % 2 == 0: 

3384 h[0] = h[N//2] = 1 

3385 h[1:N//2] = 2 

3386 else: 

3387 h[0] = 1 

3388 h[1:(N+1)//2] = 2 

3389 

3390 if len(x.shape) > 1: 

3391 h = h[:, num.newaxis] 

3392 x = num.fft.ifft(Xf*h) 

3393 return x 

3394 

3395 

3396def near(a, b, eps): 

3397 return abs(a-b) < eps 

3398 

3399 

3400def coroutine(func): 

3401 def wrapper(*args, **kwargs): 

3402 gen = func(*args, **kwargs) 

3403 next(gen) 

3404 return gen 

3405 

3406 wrapper.__name__ = func.__name__ 

3407 wrapper.__dict__ = func.__dict__ 

3408 wrapper.__doc__ = func.__doc__ 

3409 return wrapper 

3410 

3411 

3412class States(object): 

3413 ''' 

3414 Utility to store channel-specific state in coroutines. 

3415 ''' 

3416 

3417 def __init__(self): 

3418 self._states = {} 

3419 

3420 def get(self, tr): 

3421 k = tr.nslc_id 

3422 if k in self._states: 

3423 tmin, deltat, dtype, value = self._states[k] 

3424 if (near(tmin, tr.tmin, deltat/100.) 

3425 and near(deltat, tr.deltat, deltat/10000.) 

3426 and dtype == tr.ydata.dtype): 

3427 

3428 return value 

3429 

3430 return None 

3431 

3432 def set(self, tr, value): 

3433 k = tr.nslc_id 

3434 if k in self._states and self._states[k][-1] is not value: 

3435 self.free(self._states[k][-1]) 

3436 

3437 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value) 

3438 

3439 def free(self, value): 

3440 pass 

3441 

3442 

3443@coroutine 

3444def co_list_append(list): 

3445 while True: 

3446 list.append((yield)) 

3447 

3448 

3449class ScipyBug(Exception): 

3450 pass 

3451 

3452 

3453@coroutine 

3454def co_lfilter(target, b, a): 

3455 ''' 

3456 Successively filter broken continuous trace data (coroutine). 

3457 

3458 Create coroutine which takes :py:class:`Trace` objects, filters their data 

3459 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace` 

3460 objects containing the filtered data to target. This is useful, if one 

3461 wants to filter a long continuous time series, which is split into many 

3462 successive traces without producing filter artifacts at trace boundaries. 

3463 

3464 Filter states are kept *per channel*, specifically, for each (network, 

3465 station, location, channel) combination occuring in the input traces, a 

3466 separate state is created and maintained. This makes it possible to filter 

3467 multichannel or multistation data with only one :py:func:`co_lfilter` 

3468 instance. 

3469 

3470 Filter state is reset, when gaps occur. 

3471 

3472 Use it like this:: 

3473 

3474 from pyrocko.trace import co_lfilter, co_list_append 

3475 

3476 filtered_traces = [] 

3477 pipe = co_lfilter(co_list_append(filtered_traces), a, b) 

3478 for trace in traces: 

3479 pipe.send(trace) 

3480 

3481 pipe.close() 

3482 

3483 ''' 

3484 

3485 try: 

3486 states = States() 

3487 output = None 

3488 while True: 

3489 input = (yield) 

3490 

3491 zi = states.get(input) 

3492 if zi is None: 

3493 zi = num.zeros(max(len(a), len(b))-1, dtype=float) 

3494 

3495 output = input.copy(data=False) 

3496 try: 

3497 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi) 

3498 except ValueError: 

3499 raise ScipyBug( 

3500 'signal.lfilter failed: could be related to a bug ' 

3501 'in some older scipy versions, e.g. on opensuse42.1') 

3502 

3503 output.set_ydata(ydata) 

3504 states.set(input, zf) 

3505 target.send(output) 

3506 

3507 except GeneratorExit: 

3508 target.close() 

3509 

3510 

3511def co_antialias(target, q, n=None, ftype='fir'): 

3512 b, a, n = util.decimate_coeffs(q, n, ftype) 

3513 anti = co_lfilter(target, b, a) 

3514 return anti 

3515 

3516 

3517@coroutine 

3518def co_dropsamples(target, q, nfir): 

3519 try: 

3520 states = States() 

3521 while True: 

3522 tr = (yield) 

3523 newdeltat = q * tr.deltat 

3524 ioffset = states.get(tr) 

3525 if ioffset is None: 

3526 # for fir filter, the first nfir samples are pulluted by 

3527 # boundary effects; cut it off. 

3528 # for iir this may be (much) more, we do not correct for that. 

3529 # put sample instances to a time which is a multiple of the 

3530 # new sampling interval. 

3531 newtmin_want = math.ceil( 

3532 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \ 

3533 - (nfir/2*tr.deltat) 

3534 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat)) 

3535 if ioffset < 0: 

3536 ioffset = ioffset % q 

3537 

3538 newtmin_have = tr.tmin + ioffset * tr.deltat 

3539 newtr = tr.copy(data=False) 

3540 newtr.deltat = newdeltat 

3541 # because the fir kernel shifts data by nfir/2 samples: 

3542 newtr.tmin = newtmin_have - (nfir/2*tr.deltat) 

3543 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy()) 

3544 states.set(tr, (ioffset % q - tr.data_len() % q) % q) 

3545 target.send(newtr) 

3546 

3547 except GeneratorExit: 

3548 target.close() 

3549 

3550 

3551def co_downsample(target, q, n=None, ftype='fir'): 

3552 ''' 

3553 Successively downsample broken continuous trace data (coroutine). 

3554 

3555 Create coroutine which takes :py:class:`Trace` objects, downsamples their 

3556 data and sends new :py:class:`Trace` objects containing the downsampled 

3557 data to target. This is useful, if one wants to downsample a long 

3558 continuous time series, which is split into many successive traces without 

3559 producing filter artifacts and gaps at trace boundaries. 

3560 

3561 Filter states are kept *per channel*, specifically, for each (network, 

3562 station, location, channel) combination occuring in the input traces, a 

3563 separate state is created and maintained. This makes it possible to filter 

3564 multichannel or multistation data with only one :py:func:`co_lfilter` 

3565 instance. 

3566 

3567 Filter state is reset, when gaps occur. The sampling instances are choosen 

3568 so that they occur at (or as close as possible) to even multiples of the 

3569 sampling interval of the downsampled trace (based on system time). 

3570 ''' 

3571 

3572 b, a, n = util.decimate_coeffs(q, n, ftype) 

3573 return co_antialias(co_dropsamples(target, q, n), q, n, ftype) 

3574 

3575 

3576@coroutine 

3577def co_downsample_to(target, deltat): 

3578 

3579 decimators = {} 

3580 try: 

3581 while True: 

3582 tr = (yield) 

3583 ratio = deltat / tr.deltat 

3584 rratio = round(ratio) 

3585 if abs(rratio - ratio)/ratio > 0.0001: 

3586 raise util.UnavailableDecimation('ratio = %g' % ratio) 

3587 

3588 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1) 

3589 if deci_seq not in decimators: 

3590 pipe = target 

3591 for q in deci_seq[::-1]: 

3592 pipe = co_downsample(pipe, q) 

3593 

3594 decimators[deci_seq] = pipe 

3595 

3596 decimators[deci_seq].send(tr) 

3597 

3598 except GeneratorExit: 

3599 for g in decimators.values(): 

3600 g.close() 

3601 

3602 

3603class DomainChoice(StringChoice): 

3604 choices = [ 

3605 'time_domain', 

3606 'frequency_domain', 

3607 'envelope', 

3608 'absolute', 

3609 'cc_max_norm'] 

3610 

3611 

3612class MisfitSetup(Object): 

3613 ''' 

3614 Contains misfit setup to be used in :py:func:`trace.misfit` 

3615 

3616 :param description: Description of the setup 

3617 :param norm: L-norm classifier 

3618 :param taper: Object of :py:class:`Taper` 

3619 :param filter: Object of :py:class:`FrequencyResponse` 

3620 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute', 

3621 'cc_max_norm'] 

3622 

3623 Can be dumped to a yaml file. 

3624 ''' 

3625 

3626 xmltagname = 'misfitsetup' 

3627 description = String.T(optional=True) 

3628 norm = Int.T(optional=False) 

3629 taper = Taper.T(optional=False) 

3630 filter = FrequencyResponse.T(optional=True) 

3631 domain = DomainChoice.T(default='time_domain') 

3632 

3633 

3634def equalize_sampling_rates(trace_1, trace_2): 

3635 ''' 

3636 Equalize sampling rates of two traces (reduce higher sampling rate to 

3637 lower). 

3638 

3639 :param trace_1: :py:class:`Trace` object 

3640 :param trace_2: :py:class:`Trace` object 

3641 

3642 Returns a copy of the resampled trace if resampling is needed. 

3643 ''' 

3644 

3645 if same_sampling_rate(trace_1, trace_2): 

3646 return trace_1, trace_2 

3647 

3648 if trace_1.deltat < trace_2.deltat: 

3649 t1_out = trace_1.copy() 

3650 t1_out.downsample_to(deltat=trace_2.deltat, snap=True) 

3651 logger.debug('Trace downsampled (return copy of trace): %s' 

3652 % '.'.join(t1_out.nslc_id)) 

3653 return t1_out, trace_2 

3654 

3655 elif trace_1.deltat > trace_2.deltat: 

3656 t2_out = trace_2.copy() 

3657 t2_out.downsample_to(deltat=trace_1.deltat, snap=True) 

3658 logger.debug('Trace downsampled (return copy of trace): %s' 

3659 % '.'.join(t2_out.nslc_id)) 

3660 return trace_1, t2_out 

3661 

3662 

3663def Lx_norm(u, v, norm=2): 

3664 ''' 

3665 Calculate the misfit denominator *m* and the normalization devisor *n* 

3666 according to norm. 

3667 

3668 The normalization divisor *n* is calculated from ``v``. 

3669 

3670 :param u: :py:class:`numpy.array` 

3671 :param v: :py:class:`numpy.array` 

3672 :param norm: (default = 2) 

3673 

3674 ``u`` and ``v`` must be of same size. 

3675 ''' 

3676 

3677 if norm == 1: 

3678 return ( 

3679 num.sum(num.abs(v-u)), 

3680 num.sum(num.abs(v))) 

3681 

3682 elif norm == 2: 

3683 return ( 

3684 num.sqrt(num.sum((v-u)**2)), 

3685 num.sqrt(num.sum(v**2))) 

3686 

3687 else: 

3688 return ( 

3689 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm), 

3690 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm)) 

3691 

3692 

3693def do_downsample(tr, deltat): 

3694 if abs(tr.deltat - deltat) / tr.deltat > 1e-6: 

3695 tr = tr.copy() 

3696 tr.downsample_to(deltat, snap=True, demean=False) 

3697 else: 

3698 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6: 

3699 tr = tr.copy() 

3700 tr.snap() 

3701 return tr 

3702 

3703 

3704def do_extend(tr, tmin, tmax): 

3705 if tmin < tr.tmin or tmax > tr.tmax: 

3706 tr = tr.copy() 

3707 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat') 

3708 

3709 return tr 

3710 

3711 

3712def do_pre_taper(tr, taper): 

3713 return tr.taper(taper, inplace=False, chop=True) 

3714 

3715 

3716def do_fft(tr, filter): 

3717 if filter is None: 

3718 return tr 

3719 else: 

3720 ndata = tr.ydata.size 

3721 nfft = nextpow2(ndata) 

3722 padded = num.zeros(nfft, dtype=float) 

3723 padded[:ndata] = tr.ydata 

3724 spectrum = num.fft.rfft(padded) 

3725 df = 1.0 / (tr.deltat * nfft) 

3726 frequencies = num.arange(spectrum.size)*df 

3727 return [tr, frequencies, spectrum] 

3728 

3729 

3730def do_filter(inp, filter): 

3731 if filter is None: 

3732 return inp 

3733 else: 

3734 tr, frequencies, spectrum = inp 

3735 spectrum *= filter.evaluate(frequencies) 

3736 return [tr, frequencies, spectrum] 

3737 

3738 

3739def do_ifft(inp): 

3740 if isinstance(inp, Trace): 

3741 return inp 

3742 else: 

3743 tr, _, spectrum = inp 

3744 ndata = tr.ydata.size 

3745 tr = tr.copy(data=False) 

3746 tr.set_ydata(num.fft.irfft(spectrum)[:ndata]) 

3747 return tr 

3748 

3749 

3750def check_alignment(t1, t2): 

3751 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \ 

3752 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \ 

3753 t1.ydata.shape != t2.ydata.shape: 

3754 raise MisalignedTraces( 

3755 'Cannot calculate misfit of %s and %s due to misaligned ' 

3756 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))