1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6This module provides basic signal processing for seismic traces.
7'''
8from __future__ import division, absolute_import
10import time
11import math
12import copy
13import logging
15import numpy as num
16from scipy import signal
18from . import util, evalresp, orthodrome, pchain, model
19from .util import reuse, UnavailableDecimation
20from .guts import Object, Float, Int, String, Complex, Tuple, List, \
21 StringChoice, Timestamp
22from .guts_array import Array
24try:
25 newstr = unicode
26except NameError:
27 newstr = str
30UnavailableDecimation # noqa
32guts_prefix = 'pf'
34logger = logging.getLogger('pyrocko.trace')
37class Trace(Object):
39 '''
40 Create new trace object.
42 A ``Trace`` object represents a single continuous strip of evenly sampled
43 time series data. It is built from a 1D NumPy array containing the data
44 samples and some attributes describing its beginning and ending time, its
45 sampling rate and four string identifiers (its network, station, location
46 and channel code).
48 :param network: network code
49 :param station: station code
50 :param location: location code
51 :param channel: channel code
52 :param tmin: system time of first sample in [s]
53 :param tmax: system time of last sample in [s] (if set to ``None`` it is
54 computed from length of ``ydata``)
55 :param deltat: sampling interval in [s]
56 :param ydata: 1D numpy array with data samples (can be ``None`` when
57 ``tmax`` is not ``None``)
58 :param mtime: optional modification time
60 :param meta: additional meta information (not used, but maintained by the
61 library)
63 The length of the network, station, location and channel codes is not
64 resricted by this software, but data formats like SAC, Mini-SEED or GSE
65 have different limits on the lengths of these codes. The codes set here are
66 silently truncated when the trace is stored
67 '''
69 network = String.T(default='')
70 station = String.T(default='STA')
71 location = String.T(default='')
72 channel = String.T(default='')
74 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
75 tmax = Timestamp.T()
77 deltat = Float.T(default=1.0)
78 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
80 mtime = Timestamp.T(optional=True)
82 cached_frequencies = {}
84 def __init__(self, network='', station='STA', location='', channel='',
85 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
86 meta=None):
88 Object.__init__(self, init_props=False)
90 time_float = util.get_time_float()
92 if not isinstance(tmin, time_float):
93 tmin = Trace.tmin.regularize_extra(tmin)
95 if tmax is not None and not isinstance(tmax, time_float):
96 tmax = Trace.tmax.regularize_extra(tmax)
98 if mtime is not None and not isinstance(mtime, time_float):
99 mtime = Trace.mtime.regularize_extra(mtime)
101 self._growbuffer = None
103 tmin = time_float(tmin)
104 if tmax is not None:
105 tmax = time_float(tmax)
107 if mtime is None:
108 mtime = time_float(time.time())
110 self.network, self.station, self.location, self.channel = [
111 reuse(x) for x in (network, station, location, channel)]
113 self.tmin = tmin
114 self.deltat = deltat
116 if tmax is None:
117 if ydata is not None:
118 self.tmax = self.tmin + (ydata.size-1)*self.deltat
119 else:
120 raise Exception(
121 'fixme: trace must be created with tmax or ydata')
122 else:
123 n = int(round((tmax - self.tmin) / self.deltat)) + 1
124 self.tmax = self.tmin + (n - 1) * self.deltat
126 self.meta = meta
127 self.ydata = ydata
128 self.mtime = mtime
129 self._update_ids()
130 self.file = None
131 self._pchain = None
133 def __str__(self):
134 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
135 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
136 s += ' timerange: %s - %s\n' % (
137 util.time_to_str(self.tmin, format=fmt),
138 util.time_to_str(self.tmax, format=fmt))
140 s += ' delta t: %g\n' % self.deltat
141 if self.meta:
142 for k in sorted(self.meta.keys()):
143 s += ' %s: %s\n' % (k, self.meta[k])
144 return s
146 def __getstate__(self):
147 return (self.network, self.station, self.location, self.channel,
148 self.tmin, self.tmax, self.deltat, self.mtime,
149 self.ydata, self.meta)
151 def __setstate__(self, state):
152 if len(state) == 10:
153 self.network, self.station, self.location, self.channel, \
154 self.tmin, self.tmax, self.deltat, self.mtime, \
155 self.ydata, self.meta = state
157 else:
158 # backward compatibility with old behaviour
159 self.network, self.station, self.location, self.channel, \
160 self.tmin, self.tmax, self.deltat, self.mtime = state
161 self.ydata = None
162 self.meta = None
164 self._growbuffer = None
165 self._update_ids()
167 def name(self):
168 '''
169 Get a short string description.
170 '''
172 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
173 util.time_to_str(self.tmin),
174 util.time_to_str(self.tmax)))
176 return s
178 def __eq__(self, other):
179 return (
180 isinstance(other, Trace)
181 and self.network == other.network
182 and self.station == other.station
183 and self.location == other.location
184 and self.channel == other.channel
185 and (abs(self.deltat - other.deltat)
186 < (self.deltat + other.deltat)*1e-6)
187 and abs(self.tmin-other.tmin) < self.deltat*0.01
188 and abs(self.tmax-other.tmax) < self.deltat*0.01
189 and num.all(self.ydata == other.ydata))
191 def almost_equal(self, other, rtol=1e-5, atol=0.0):
192 return (
193 self.network == other.network
194 and self.station == other.station
195 and self.location == other.location
196 and self.channel == other.channel
197 and (abs(self.deltat - other.deltat)
198 < (self.deltat + other.deltat)*1e-6)
199 and abs(self.tmin-other.tmin) < self.deltat*0.01
200 and abs(self.tmax-other.tmax) < self.deltat*0.01
201 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
203 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
205 assert self.network == other.network, \
206 'network codes differ: %s, %s' % (self.network, other.network)
207 assert self.station == other.station, \
208 'station codes differ: %s, %s' % (self.station, other.station)
209 assert self.location == other.location, \
210 'location codes differ: %s, %s' % (self.location, other.location)
211 assert self.channel == other.channel, 'channel codes differ'
212 assert (abs(self.deltat - other.deltat)
213 < (self.deltat + other.deltat)*1e-6), \
214 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
215 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
216 'start times differ: %s, %s' % (
217 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
218 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
219 'end times differ: %s, %s' % (
220 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
222 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
223 'trace values differ'
225 def __hash__(self):
226 return id(self)
228 def __call__(self, t, clip=False, snap=round):
229 it = int(snap((t-self.tmin)/self.deltat))
230 if clip:
231 it = max(0, min(it, self.ydata.size-1))
232 else:
233 if it < 0 or self.ydata.size <= it:
234 raise IndexError()
236 return self.tmin+it*self.deltat, self.ydata[it]
238 def interpolate(self, t, clip=False):
239 '''
240 Value of trace between supporting points through linear interpolation.
242 :param t: time instant
243 :param clip: whether to clip indices to trace ends
244 '''
246 t0, y0 = self(t, clip=clip, snap=math.floor)
247 t1, y1 = self(t, clip=clip, snap=math.ceil)
248 if t0 == t1:
249 return y0
250 else:
251 return y0+(t-t0)/(t1-t0)*(y1-y0)
253 def index_clip(self, i):
254 '''
255 Clip index to valid range.
256 '''
258 return min(max(0, i), self.ydata.size)
260 def add(self, other, interpolate=True, left=0., right=0.):
261 '''
262 Add values of other trace (self += other).
264 Add values of ``other`` trace to the values of ``self``, where it
265 intersects with ``other``. This method does not change the extent of
266 ``self``. If ``interpolate`` is ``True`` (the default), the values of
267 ``other`` to be added are interpolated at sampling instants of
268 ``self``. Linear interpolation is performed. In this case the sampling
269 rate of ``other`` must be equal to or lower than that of ``self``. If
270 ``interpolate`` is ``False``, the sampling rates of the two traces must
271 match.
272 '''
274 if interpolate:
275 assert self.deltat <= other.deltat \
276 or same_sampling_rate(self, other)
278 other_xdata = other.get_xdata()
279 xdata = self.get_xdata()
280 self.ydata += num.interp(
281 xdata, other_xdata, other.ydata, left=left, right=left)
282 else:
283 assert self.deltat == other.deltat
284 ioff = int(round((other.tmin-self.tmin)/self.deltat))
285 ibeg = max(0, ioff)
286 iend = min(self.data_len(), ioff+other.data_len())
287 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
289 def mult(self, other, interpolate=True):
290 '''
291 Muliply with values of other trace ``(self *= other)``.
293 Multiply values of ``other`` trace to the values of ``self``, where it
294 intersects with ``other``. This method does not change the extent of
295 ``self``. If ``interpolate`` is ``True`` (the default), the values of
296 ``other`` to be multiplied are interpolated at sampling instants of
297 ``self``. Linear interpolation is performed. In this case the sampling
298 rate of ``other`` must be equal to or lower than that of ``self``. If
299 ``interpolate`` is ``False``, the sampling rates of the two traces must
300 match.
301 '''
303 if interpolate:
304 assert self.deltat <= other.deltat or \
305 same_sampling_rate(self, other)
307 other_xdata = other.get_xdata()
308 xdata = self.get_xdata()
309 self.ydata *= num.interp(
310 xdata, other_xdata, other.ydata, left=0., right=0.)
311 else:
312 assert self.deltat == other.deltat
313 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
314 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
315 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
316 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
318 ibeg1 = self.index_clip(ibeg1)
319 iend1 = self.index_clip(iend1)
320 ibeg2 = self.index_clip(ibeg2)
321 iend2 = self.index_clip(iend2)
323 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
325 def max(self):
326 '''
327 Get time and value of data maximum.
328 '''
330 i = num.argmax(self.ydata)
331 return self.tmin + i*self.deltat, self.ydata[i]
333 def min(self):
334 '''
335 Get time and value of data minimum.
336 '''
338 i = num.argmin(self.ydata)
339 return self.tmin + i*self.deltat, self.ydata[i]
341 def absmax(self):
342 '''
343 Get time and value of maximum of the absolute of data.
344 '''
346 tmi, mi = self.min()
347 tma, ma = self.max()
348 if abs(mi) > abs(ma):
349 return tmi, abs(mi)
350 else:
351 return tma, abs(ma)
353 def set_codes(
354 self, network=None, station=None, location=None, channel=None):
356 '''
357 Set network, station, location, and channel codes.
358 '''
360 if network is not None:
361 self.network = network
362 if station is not None:
363 self.station = station
364 if location is not None:
365 self.location = location
366 if channel is not None:
367 self.channel = channel
369 self._update_ids()
371 def set_network(self, network):
372 self.network = network
373 self._update_ids()
375 def set_station(self, station):
376 self.station = station
377 self._update_ids()
379 def set_location(self, location):
380 self.location = location
381 self._update_ids()
383 def set_channel(self, channel):
384 self.channel = channel
385 self._update_ids()
387 def overlaps(self, tmin, tmax):
388 '''
389 Check if trace has overlap with a given time span.
390 '''
391 return not (tmax < self.tmin or self.tmax < tmin)
393 def is_relevant(self, tmin, tmax, selector=None):
394 '''
395 Check if trace has overlap with a given time span and matches a
396 condition callback. (internal use)
397 '''
399 return not (tmax <= self.tmin or self.tmax < tmin) \
400 and (selector is None or selector(self))
402 def _update_ids(self):
403 '''
404 Update dependent ids.
405 '''
407 self.full_id = (
408 self.network, self.station, self.location, self.channel, self.tmin)
409 self.nslc_id = reuse(
410 (self.network, self.station, self.location, self.channel))
412 def prune_from_reuse_cache(self):
413 util.deuse(self.nslc_id)
414 util.deuse(self.network)
415 util.deuse(self.station)
416 util.deuse(self.location)
417 util.deuse(self.channel)
419 def set_mtime(self, mtime):
420 '''
421 Set modification time of the trace.
422 '''
424 self.mtime = mtime
426 def get_xdata(self):
427 '''
428 Create array for time axis.
429 '''
431 if self.ydata is None:
432 raise NoData()
434 return self.tmin \
435 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
437 def get_ydata(self):
438 '''
439 Get data array.
440 '''
442 if self.ydata is None:
443 raise NoData()
445 return self.ydata
447 def set_ydata(self, new_ydata):
448 '''
449 Replace data array.
450 '''
452 self.drop_growbuffer()
453 self.ydata = new_ydata
454 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
456 def data_len(self):
457 if self.ydata is not None:
458 return self.ydata.size
459 else:
460 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
462 def drop_data(self):
463 '''
464 Forget data, make dataless trace.
465 '''
467 self.drop_growbuffer()
468 self.ydata = None
470 def drop_growbuffer(self):
471 '''
472 Detach the traces grow buffer.
473 '''
475 self._growbuffer = None
476 self._pchain = None
478 def copy(self, data=True):
479 '''
480 Make a deep copy of the trace.
481 '''
483 tracecopy = copy.copy(self)
484 tracecopy.drop_growbuffer()
485 if data:
486 tracecopy.ydata = self.ydata.copy()
487 tracecopy.meta = copy.deepcopy(self.meta)
488 return tracecopy
490 def crop_zeros(self):
491 '''
492 Remove any zeros at beginning and end.
493 '''
495 indices = num.where(self.ydata != 0.0)[0]
496 if indices.size == 0:
497 raise NoData()
499 ibeg = indices[0]
500 iend = indices[-1]+1
501 if ibeg == 0 and iend == self.ydata.size-1:
502 return
504 self.drop_growbuffer()
505 self.ydata = self.ydata[ibeg:iend].copy()
506 self.tmin = self.tmin+ibeg*self.deltat
507 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
508 self._update_ids()
510 def append(self, data):
511 '''
512 Append data to the end of the trace.
514 To make this method efficient when successively very few or even single
515 samples are appended, a larger grow buffer is allocated upon first
516 invocation. The traces data is then changed to be a view into the
517 currently filled portion of the grow buffer array.
518 '''
520 assert self.ydata.dtype == data.dtype
521 newlen = data.size + self.ydata.size
522 if self._growbuffer is None or self._growbuffer.size < newlen:
523 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
524 self._growbuffer[:self.ydata.size] = self.ydata
525 self._growbuffer[self.ydata.size:newlen] = data
526 self.ydata = self._growbuffer[:newlen]
527 self.tmax = self.tmin + (newlen-1)*self.deltat
529 def chop(
530 self, tmin, tmax, inplace=True, include_last=False,
531 snap=(round, round), want_incomplete=True):
533 '''
534 Cut the trace to given time span.
536 If the ``inplace`` argument is True (the default) the trace is cut in
537 place, otherwise a new trace with the cut part is returned. By
538 default, the indices where to start and end the trace data array are
539 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
540 using Python's :py:func:`round` function. This behaviour can be changed
541 with the ``snap`` argument, which takes a tuple of two functions (one
542 for the lower and one for the upper end) to be used instead of
543 :py:func:`round`. The last sample is by default not included unless
544 ``include_last`` is set to True. If the given time span exceeds the
545 available time span of the trace, the available part is returned,
546 unless ``want_incomplete`` is set to False - in that case, a
547 :py:exc:`NoData` exception is raised. This exception is always raised,
548 when the requested time span does dot overlap with the trace's time
549 span.
550 '''
552 if want_incomplete:
553 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
554 raise NoData()
555 else:
556 if tmin < self.tmin or self.tmax < tmax:
557 raise NoData()
559 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
560 iplus = 0
561 if include_last:
562 iplus = 1
564 iend = min(
565 self.data_len(),
566 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
568 if ibeg >= iend:
569 raise NoData()
571 obj = self
572 if not inplace:
573 obj = self.copy(data=False)
575 self.drop_growbuffer()
576 if self.ydata is not None:
577 obj.ydata = self.ydata[ibeg:iend].copy()
578 else:
579 obj.ydata = None
581 obj.tmin = obj.tmin+ibeg*obj.deltat
582 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
584 obj._update_ids()
586 return obj
588 def downsample(self, ndecimate, snap=False, initials=None, demean=False):
589 '''
590 Downsample trace by a given integer factor.
592 :param ndecimate: decimation factor, avoid values larger than 8
593 :param snap: whether to put the new sampling instances closest to
594 multiples of the sampling rate.
595 :param initials: ``None``, ``True``, or initial conditions for the
596 anti-aliasing filter, obtained from a previous run. In the latter
597 two cases the final state of the filter is returned instead of
598 ``None``.
599 :param demean: whether to demean the signal before filtering.
600 '''
602 newdeltat = self.deltat*ndecimate
603 if snap:
604 ilag = int(round(
605 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
606 / self.deltat))
607 else:
608 ilag = 0
610 if snap and ilag > 0 and ilag < self.ydata.size:
611 data = self.ydata.astype(num.float64)
612 self.tmin += ilag*self.deltat
613 else:
614 data = self.ydata.astype(num.float64)
616 if demean:
617 data -= num.mean(data)
619 if data.size != 0:
620 result = util.decimate(
621 data, ndecimate, ftype='fir', zi=initials, ioff=ilag)
622 else:
623 result = data
625 if initials is None:
626 self.ydata, finals = result, None
627 else:
628 self.ydata, finals = result
630 self.deltat = reuse(self.deltat*ndecimate)
631 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
632 self._update_ids()
634 return finals
636 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
637 initials=None, demean=False):
639 '''
640 Downsample to given sampling rate.
642 Tries to downsample the trace to a target sampling interval of
643 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
644 times. If allow_upsample_max is set to a value larger than 1,
645 intermediate upsampling steps are allowed, in order to increase the
646 number of possible downsampling ratios.
648 If the requested ratio is not supported, an exception of type
649 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
650 '''
652 ratio = deltat/self.deltat
653 rratio = round(ratio)
655 ok = False
656 for upsratio in range(1, allow_upsample_max+1):
657 dratio = (upsratio/self.deltat) / (1./deltat)
658 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
659 util.decitab(int(round(dratio))):
661 ok = True
662 break
664 if not ok:
665 raise util.UnavailableDecimation('ratio = %g' % ratio)
667 if upsratio > 1:
668 self.drop_growbuffer()
669 ydata = self.ydata
670 self.ydata = num.zeros(
671 ydata.size*upsratio-(upsratio-1), ydata.dtype)
672 self.ydata[::upsratio] = ydata
673 for i in range(1, upsratio):
674 self.ydata[i::upsratio] = \
675 float(i)/upsratio * ydata[:-1] \
676 + float(upsratio-i)/upsratio * ydata[1:]
677 self.deltat = self.deltat/upsratio
679 ratio = deltat/self.deltat
680 rratio = round(ratio)
682 deci_seq = util.decitab(int(rratio))
683 finals = []
684 for i, ndecimate in enumerate(deci_seq):
685 if ndecimate != 1:
686 xinitials = None
687 if initials is not None:
688 xinitials = initials[i]
689 finals.append(self.downsample(
690 ndecimate, snap=snap, initials=xinitials, demean=demean))
692 if initials is not None:
693 return finals
695 def resample(self, deltat):
696 '''
697 Resample to given sampling rate ``deltat``.
699 Resampling is performed in the frequency domain.
700 '''
702 ndata = self.ydata.size
703 ntrans = nextpow2(ndata)
704 fntrans2 = ntrans * self.deltat/deltat
705 ntrans2 = int(round(fntrans2))
706 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
707 ndata2 = int(round(ndata*self.deltat/deltat2))
708 if abs(fntrans2 - ntrans2) > 1e-7:
709 logger.warning(
710 'resample: requested deltat %g could not be matched exactly: '
711 '%g' % (deltat, deltat2))
713 data = self.ydata
714 data_pad = num.zeros(ntrans, dtype=float)
715 data_pad[:ndata] = data
716 fdata = num.fft.rfft(data_pad)
717 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
718 n = min(fdata.size, fdata2.size)
719 fdata2[:n] = fdata[:n]
720 data2 = num.fft.irfft(fdata2)
721 data2 = data2[:ndata2]
722 data2 *= float(ntrans2) / float(ntrans)
723 self.deltat = deltat2
724 self.set_ydata(data2)
726 def resample_simple(self, deltat):
727 tyear = 3600*24*365.
729 if deltat == self.deltat:
730 return
732 if abs(self.deltat - deltat) * tyear/deltat < deltat:
733 logger.warning(
734 'resample_simple: less than one sample would have to be '
735 'inserted/deleted per year. Doing nothing.')
736 return
738 ninterval = int(round(deltat / (self.deltat - deltat)))
739 if abs(ninterval) < 20:
740 logger.error(
741 'resample_simple: sample insertion/deletion interval less '
742 'than 20. results would be erroneous.')
743 raise ResamplingFailed()
745 delete = False
746 if ninterval < 0:
747 ninterval = - ninterval
748 delete = True
750 tyearbegin = util.year_start(self.tmin)
752 nmin = int(round((self.tmin - tyearbegin)/deltat))
754 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
755 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
756 if nindices > 0:
757 indices = ibegin + num.arange(nindices) * ninterval
758 data_split = num.split(self.ydata, indices)
759 data = []
760 for ln, h in zip(data_split[:-1], data_split[1:]):
761 if delete:
762 ln = ln[:-1]
764 data.append(ln)
765 if not delete:
766 if ln.size == 0:
767 v = h[0]
768 else:
769 v = 0.5*(ln[-1] + h[0])
770 data.append(num.array([v], dtype=ln.dtype))
772 data.append(data_split[-1])
774 ydata_new = num.concatenate(data)
776 self.tmin = tyearbegin + nmin * deltat
777 self.deltat = deltat
778 self.set_ydata(ydata_new)
780 def stretch(self, tmin_new, tmax_new):
781 '''
782 Stretch signal while preserving sample rate using sinc interpolation.
784 :param tmin_new: new time of first sample
785 :param tmax_new: new time of last sample
787 This method can be used to correct for a small linear time drift or to
788 introduce sub-sample time shifts. The amount of stretching is limited
789 to 10% by the implementation and is expected to be much smaller than
790 that by the approximations used.
791 '''
793 from pyrocko import signal_ext
795 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
796 t_control = num.array([tmin_new, tmax_new], dtype=float)
798 r = (tmax_new - tmin_new) / self.deltat + 1.0
799 n_new = int(round(r))
800 if abs(n_new - r) > 0.001:
801 n_new = int(math.floor(r))
803 assert n_new >= 2
805 tmax_new = tmin_new + (n_new-1) * self.deltat
807 ydata_new = num.empty(n_new, dtype=float)
808 signal_ext.antidrift(i_control, t_control,
809 self.ydata.astype(float),
810 tmin_new, self.deltat, ydata_new)
812 self.tmin = tmin_new
813 self.set_ydata(ydata_new)
814 self._update_ids()
816 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
817 raise_exception=False):
819 '''
820 Check if a given frequency is above the Nyquist frequency of the trace.
822 :param intro: string used to introduce the warning/error message
823 :param warn: whether to emit a warning
824 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
825 exception.
826 '''
828 if frequency >= 0.5/self.deltat:
829 message = '%s (%g Hz) is equal to or higher than nyquist ' \
830 'frequency (%g Hz). (Trace %s)' \
831 % (intro, frequency, 0.5/self.deltat, self.name())
832 if warn:
833 logger.warning(message)
834 if raise_exception:
835 raise AboveNyquist(message)
837 def lowpass(self, order, corner, nyquist_warn=True,
838 nyquist_exception=False, demean=True):
840 '''
841 Apply Butterworth lowpass to the trace.
843 :param order: order of the filter
844 :param corner: corner frequency of the filter
846 Mean is removed before filtering.
847 '''
849 self.nyquist_check(
850 corner, 'Corner frequency of lowpass', nyquist_warn,
851 nyquist_exception)
853 (b, a) = _get_cached_filter_coefs(
854 order, [corner*2.0*self.deltat], btype='low')
856 if len(a) != order+1 or len(b) != order+1:
857 logger.warning(
858 'Erroneous filter coefficients returned by '
859 'scipy.signal.butter(). You may need to downsample the '
860 'signal before filtering.')
862 data = self.ydata.astype(num.float64)
863 if demean:
864 data -= num.mean(data)
865 self.drop_growbuffer()
866 self.ydata = signal.lfilter(b, a, data)
868 def highpass(self, order, corner, nyquist_warn=True,
869 nyquist_exception=False, demean=True):
871 '''
872 Apply butterworth highpass to the trace.
874 :param order: order of the filter
875 :param corner: corner frequency of the filter
877 Mean is removed before filtering.
878 '''
880 self.nyquist_check(
881 corner, 'Corner frequency of highpass', nyquist_warn,
882 nyquist_exception)
884 (b, a) = _get_cached_filter_coefs(
885 order, [corner*2.0*self.deltat], btype='high')
887 data = self.ydata.astype(num.float64)
888 if len(a) != order+1 or len(b) != order+1:
889 logger.warning(
890 'Erroneous filter coefficients returned by '
891 'scipy.signal.butter(). You may need to downsample the '
892 'signal before filtering.')
893 if demean:
894 data -= num.mean(data)
895 self.drop_growbuffer()
896 self.ydata = signal.lfilter(b, a, data)
898 def bandpass(self, order, corner_hp, corner_lp, demean=True):
899 '''
900 Apply butterworth bandpass to the trace.
902 :param order: order of the filter
903 :param corner_hp: lower corner frequency of the filter
904 :param corner_lp: upper corner frequency of the filter
906 Mean is removed before filtering.
907 '''
909 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
910 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
911 (b, a) = _get_cached_filter_coefs(
912 order,
913 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
914 btype='band')
915 data = self.ydata.astype(num.float64)
916 if demean:
917 data -= num.mean(data)
918 self.drop_growbuffer()
919 self.ydata = signal.lfilter(b, a, data)
921 def bandstop(self, order, corner_hp, corner_lp, demean=True):
922 '''
923 Apply bandstop (attenuates frequencies in band) to the trace.
925 :param order: order of the filter
926 :param corner_hp: lower corner frequency of the filter
927 :param corner_lp: upper corner frequency of the filter
929 Mean is removed before filtering.
930 '''
932 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
933 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
934 (b, a) = _get_cached_filter_coefs(
935 order,
936 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
937 btype='bandstop')
938 data = self.ydata.astype(num.float64)
939 if demean:
940 data -= num.mean(data)
941 self.drop_growbuffer()
942 self.ydata = signal.lfilter(b, a, data)
944 def abshilbert(self):
945 self.drop_growbuffer()
946 self.ydata = num.abs(hilbert(self.ydata))
948 def envelope(self, inplace=True):
949 '''
950 Calculate the envelope of the trace.
952 :param inplace: calculate envelope in place
954 The calculation follows:
956 .. math::
958 Y' = \\sqrt{Y^2+H(Y)^2}
960 where H is the Hilbert-Transform of the signal Y.
961 '''
963 if inplace:
964 self.drop_growbuffer()
965 self.ydata = num.sqrt(self.ydata**2 + hilbert(self.ydata)**2)
966 else:
967 tr = self.copy(data=False)
968 tr.ydata = num.sqrt(self.ydata**2 + hilbert(self.ydata)**2)
969 return tr
971 def taper(self, taperer, inplace=True, chop=False):
972 '''
973 Apply a :py:class:`Taper` to the trace.
975 :param taperer: instance of :py:class:`Taper` subclass
976 :param inplace: apply taper inplace
977 :param chop: if ``True``: exclude tapered parts from the resulting
978 trace
979 '''
981 if not inplace:
982 tr = self.copy()
983 else:
984 tr = self
986 if chop:
987 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
988 tr.shift(i*tr.deltat)
989 tr.set_ydata(tr.ydata[i:i+n])
991 taperer(tr.ydata, tr.tmin, tr.deltat)
993 if not inplace:
994 return tr
996 def whiten(self, order=6):
997 '''
998 Whiten signal in time domain using autoregression and recursive filter.
1000 :param order: order of the autoregression process
1001 '''
1003 b, a = self.whitening_coefficients(order)
1004 self.drop_growbuffer()
1005 self.ydata = signal.lfilter(b, a, self.ydata)
1007 def whitening_coefficients(self, order=6):
1008 ar = yulewalker(self.ydata, order)
1009 b, a = [1.] + ar.tolist(), [1.]
1010 return b, a
1012 def ampspec_whiten(
1013 self,
1014 width,
1015 td_taper='auto',
1016 fd_taper='auto',
1017 pad_to_pow2=True,
1018 demean=True):
1020 '''
1021 Whiten signal via frequency domain using moving average on amplitude
1022 spectra.
1024 :param width: width of smoothing kernel [Hz]
1025 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1026 ``None`` or ``'auto'``.
1027 :param fd_taper: frequency domain taper, object of type
1028 :py:class:`Taper` or ``None`` or ``'auto'``.
1029 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1030 of 2^n
1031 :param demean: whether to demean the signal before tapering
1033 The signal is first demeaned and then tapered using ``td_taper``. Then,
1034 the spectrum is calculated and inversely weighted with a smoothed
1035 version of its amplitude spectrum. A moving average is used for the
1036 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1037 Finally, the smoothed and tapered spectrum is back-transformed into the
1038 time domain.
1040 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1041 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1042 '''
1044 ndata = self.data_len()
1046 if pad_to_pow2:
1047 ntrans = nextpow2(ndata)
1048 else:
1049 ntrans = ndata
1051 df = 1./(ntrans*self.deltat)
1052 nw = int(round(width/df))
1053 if ndata//2+1 <= nw:
1054 raise TraceTooShort(
1055 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1057 if td_taper == 'auto':
1058 td_taper = CosFader(1./width)
1060 if fd_taper == 'auto':
1061 fd_taper = CosFader(width)
1063 if td_taper:
1064 self.taper(td_taper)
1066 ydata = self.get_ydata().astype(float)
1067 if demean:
1068 ydata -= ydata.mean()
1070 spec = num.fft.rfft(ydata, ntrans)
1072 amp = num.abs(spec)
1073 nspec = amp.size
1074 csamp = num.cumsum(amp)
1075 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1076 n1, n2 = nw//2, nw//2 + nspec - nw
1077 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1078 amp_smoothed[:n1] = amp_smoothed[n1]
1079 amp_smoothed[n2:] = amp_smoothed[n2-1]
1081 denom = amp_smoothed * amp
1082 numer = amp
1083 eps = num.mean(denom) * 1e-9
1084 if eps == 0.0:
1085 eps = 1e-9
1087 numer += eps
1088 denom += eps
1089 spec *= numer/denom
1091 if fd_taper:
1092 fd_taper(spec, 0., df)
1094 ydata = num.fft.irfft(spec)
1095 self.set_ydata(ydata[:ndata])
1097 def _get_cached_freqs(self, nf, deltaf):
1098 ck = (nf, deltaf)
1099 if ck not in Trace.cached_frequencies:
1100 Trace.cached_frequencies[ck] = deltaf * num.arange(
1101 nf, dtype=float)
1103 return Trace.cached_frequencies[ck]
1105 def bandpass_fft(self, corner_hp, corner_lp):
1106 '''
1107 Apply boxcar bandbpass to trace (in spectral domain).
1108 '''
1110 n = len(self.ydata)
1111 n2 = nextpow2(n)
1112 data = num.zeros(n2, dtype=num.float64)
1113 data[:n] = self.ydata
1114 fdata = num.fft.rfft(data)
1115 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1116 fdata[0] = 0.0
1117 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1118 data = num.fft.irfft(fdata)
1119 self.drop_growbuffer()
1120 self.ydata = data[:n]
1122 def shift(self, tshift):
1123 '''
1124 Time shift the trace.
1125 '''
1127 self.tmin += tshift
1128 self.tmax += tshift
1129 self._update_ids()
1131 def snap(self, inplace=True, interpolate=False):
1132 '''
1133 Shift trace samples to nearest even multiples of the sampling rate.
1135 :param inplace: (boolean) snap traces inplace
1137 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1138 both, the snapped and the original trace is smaller than 0.01 x deltat,
1139 :py:func:`snap` returns the unsnapped instance of the original trace.
1140 '''
1142 tmin = round(self.tmin/self.deltat)*self.deltat
1143 tmax = tmin + (self.ydata.size-1)*self.deltat
1145 if inplace:
1146 xself = self
1147 else:
1148 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1149 abs(self.tmax - tmax) < 1e-2*self.deltat:
1150 return self
1152 xself = self.copy()
1154 if interpolate:
1155 from pyrocko import signal_ext
1156 n = xself.data_len()
1157 ydata_new = num.empty(n, dtype=float)
1158 i_control = num.array([0, n-1], dtype=num.int64)
1159 tref = tmin
1160 t_control = num.array(
1161 [float(xself.tmin-tref), float(xself.tmax-tref)],
1162 dtype=float)
1164 signal_ext.antidrift(i_control, t_control,
1165 xself.ydata.astype(float),
1166 float(tmin-tref), xself.deltat, ydata_new)
1168 xself.ydata = ydata_new
1170 xself.tmin = tmin
1171 xself.tmax = tmax
1172 xself._update_ids()
1174 return xself
1176 def fix_deltat_rounding_errors(self):
1177 '''
1178 Try to undo sampling rate rounding errors.
1180 See :py:func:`fix_deltat_rounding_errors`.
1181 '''
1183 self.deltat = fix_deltat_rounding_errors(self.deltat)
1184 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1186 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1187 '''
1188 Run special STA/LTA filter where the short time window is centered on
1189 the long time window.
1191 :param tshort: length of short time window in [s]
1192 :param tlong: length of long time window in [s]
1193 :param quad: whether to square the data prior to applying the STA/LTA
1194 filter
1195 :param scalingmethod: integer key to select how output values are
1196 scaled / normalized (``1``, ``2``, or ``3``)
1198 ============= ====================================== ===========
1199 Scalingmethod Implementation Range
1200 ============= ====================================== ===========
1201 ``1`` As/Al* Ts/Tl [0,1]
1202 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1203 ``3`` Like ``2`` but clipping range at zero [0,1]
1204 ============= ====================================== ===========
1206 '''
1208 nshort = int(round(tshort/self.deltat))
1209 nlong = int(round(tlong/self.deltat))
1211 assert nshort < nlong
1212 if nlong > len(self.ydata):
1213 raise TraceTooShort(
1214 'Samples in trace: %s, samples needed: %s'
1215 % (len(self.ydata), nlong))
1217 if quad:
1218 sqrdata = self.ydata**2
1219 else:
1220 sqrdata = self.ydata
1222 mavg_short = moving_avg(sqrdata, nshort)
1223 mavg_long = moving_avg(sqrdata, nlong)
1225 self.drop_growbuffer()
1227 if scalingmethod not in (1, 2, 3):
1228 raise Exception('Invalid argument to scalingrange argument.')
1230 if scalingmethod == 1:
1231 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1232 elif scalingmethod in (2, 3):
1233 self.ydata = (mavg_short/mavg_long - 1.) \
1234 / ((float(nlong)/float(nshort)) - 1)
1236 if scalingmethod == 3:
1237 self.ydata = num.maximum(self.ydata, 0.)
1239 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1240 '''
1241 Run special STA/LTA filter where the short time window is overlapping
1242 with the last part of the long time window.
1244 :param tshort: length of short time window in [s]
1245 :param tlong: length of long time window in [s]
1246 :param quad: whether to square the data prior to applying the STA/LTA
1247 filter
1248 :param scalingmethod: integer key to select how output values are
1249 scaled / normalized (``1``, ``2``, or ``3``)
1251 ============= ====================================== ===========
1252 Scalingmethod Implementation Range
1253 ============= ====================================== ===========
1254 ``1`` As/Al* Ts/Tl [0,1]
1255 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1256 ``3`` Like ``2`` but clipping range at zero [0,1]
1257 ============= ====================================== ===========
1259 With ``scalingmethod=1``, the values produced by this variant of the
1260 STA/LTA are equivalent to
1262 .. math::
1263 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1264 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1266 where :math:`f_j` are the input samples, :math:`s` are the number of
1267 samples in the short time window and :math:`l` are the number of
1268 samples in the long time window.
1269 '''
1271 n = self.data_len()
1272 tmin = self.tmin
1274 nshort = max(1, int(round(tshort/self.deltat)))
1275 nlong = max(1, int(round(tlong/self.deltat)))
1277 assert nshort < nlong
1279 if nlong > len(self.ydata):
1280 raise TraceTooShort(
1281 'Samples in trace: %s, samples needed: %s'
1282 % (len(self.ydata), nlong))
1284 if quad:
1285 sqrdata = self.ydata**2
1286 else:
1287 sqrdata = self.ydata
1289 nshift = int(math.floor(0.5 * (nlong - nshort)))
1290 if nlong % 2 != 0 and nshort % 2 == 0:
1291 nshift += 1
1293 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1294 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1296 self.drop_growbuffer()
1298 if scalingmethod not in (1, 2, 3):
1299 raise Exception('Invalid argument to scalingrange argument.')
1301 if scalingmethod == 1:
1302 ydata = mavg_short/mavg_long * nshort/nlong
1303 elif scalingmethod in (2, 3):
1304 ydata = (mavg_short/mavg_long - 1.) \
1305 / ((float(nlong)/float(nshort)) - 1)
1307 if scalingmethod == 3:
1308 ydata = num.maximum(self.ydata, 0.)
1310 self.set_ydata(ydata)
1312 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1314 self.chop(
1315 tmin + (nlong - nshort) * self.deltat,
1316 tmin + (n - nshort) * self.deltat)
1318 def peaks(self, threshold, tsearch,
1319 deadtime=False,
1320 nblock_duration_detection=100):
1322 '''
1323 Detect peaks above a given threshold (method 1).
1325 From every instant, where the signal rises above ``threshold``, a time
1326 length of ``tsearch`` seconds is searched for a maximum. A list with
1327 tuples (time, value) for each detected peak is returned. The
1328 ``deadtime`` argument turns on a special deadtime duration detection
1329 algorithm useful in combination with recursive STA/LTA filters.
1330 '''
1332 y = self.ydata
1333 above = num.where(y > threshold, 1, 0)
1334 deriv = num.zeros(y.size, dtype=num.int8)
1335 deriv[1:] = above[1:]-above[:-1]
1336 itrig_positions = num.nonzero(deriv > 0)[0]
1337 tpeaks = []
1338 apeaks = []
1339 tzeros = []
1340 tzero = self.tmin
1342 for itrig_pos in itrig_positions:
1343 ibeg = itrig_pos
1344 iend = min(
1345 len(self.ydata),
1346 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1347 ipeak = num.argmax(y[ibeg:iend])
1348 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1349 apeak = y[ibeg+ipeak]
1351 if tpeak < tzero:
1352 continue
1354 if deadtime:
1355 ibeg = itrig_pos
1356 iblock = 0
1357 nblock = nblock_duration_detection
1358 totalsum = 0.
1359 while True:
1360 if ibeg+iblock*nblock >= len(y):
1361 tzero = self.tmin + (len(y)-1) * self.deltat
1362 break
1364 logy = num.log(
1365 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1366 logy[0] += totalsum
1367 ysum = num.cumsum(logy)
1368 totalsum = ysum[-1]
1369 below = num.where(ysum <= 0., 1, 0)
1370 deriv = num.zeros(ysum.size, dtype=num.int8)
1371 deriv[1:] = below[1:]-below[:-1]
1372 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1373 if len(izero_positions) > 0:
1374 tzero = self.tmin + self.deltat * (
1375 ibeg + izero_positions[0])
1376 break
1377 iblock += 1
1378 else:
1379 tzero = ibeg*self.deltat + self.tmin + tsearch
1381 tpeaks.append(tpeak)
1382 apeaks.append(apeak)
1383 tzeros.append(tzero)
1385 if deadtime:
1386 return tpeaks, apeaks, tzeros
1387 else:
1388 return tpeaks, apeaks
1390 def peaks2(self, threshold, tsearch):
1392 '''
1393 Detect peaks above a given threshold (method 2).
1395 This variant of peak detection is a bit more robust (and slower) than
1396 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1397 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1398 iteratively the one with the maximum amplitude ``a[j]`` and time
1399 ``t[j]`` is choosen and potential peaks within
1400 ``t[j] - tsearch, t[j] + tsearch``
1401 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1402 no more potential peaks are left.
1403 '''
1405 a = num.copy(self.ydata)
1407 amin = num.min(a)
1409 a[0] = amin
1410 a[1: -1][num.diff(a, 2) <= 0.] = amin
1411 a[-1] = amin
1413 data = []
1414 while True:
1415 imax = num.argmax(a)
1416 amax = a[imax]
1418 if amax < threshold or amax == amin:
1419 break
1421 data.append((self.tmin + imax * self.deltat, amax))
1423 ntsearch = int(round(tsearch / self.deltat))
1424 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1426 if data:
1427 data.sort()
1428 tpeaks, apeaks = list(zip(*data))
1429 else:
1430 tpeaks, apeaks = [], []
1432 return tpeaks, apeaks
1434 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1435 '''
1436 Extend trace to given span.
1438 :param tmin: begin time of new span
1439 :param tmax: end time of new span
1440 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1441 ``'median'``
1442 '''
1444 nold = self.ydata.size
1446 if tmin is not None:
1447 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1448 else:
1449 nl = 0
1451 if tmax is not None:
1452 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1453 else:
1454 nh = nold - 1
1456 n = nh - nl + 1
1457 data = num.zeros(n, dtype=self.ydata.dtype)
1458 data[-nl:-nl + nold] = self.ydata
1459 if self.ydata.size >= 1:
1460 if fillmethod == 'repeat':
1461 data[:-nl] = self.ydata[0]
1462 data[-nl + nold:] = self.ydata[-1]
1463 elif fillmethod == 'median':
1464 v = num.median(self.ydata)
1465 data[:-nl] = v
1466 data[-nl + nold:] = v
1467 elif fillmethod == 'mean':
1468 v = num.mean(self.ydata)
1469 data[:-nl] = v
1470 data[-nl + nold:] = v
1472 self.drop_growbuffer()
1473 self.ydata = data
1475 self.tmin += nl * self.deltat
1476 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1478 self._update_ids()
1480 def transfer(self,
1481 tfade=0.,
1482 freqlimits=None,
1483 transfer_function=None,
1484 cut_off_fading=True,
1485 demean=True,
1486 invert=False):
1488 '''
1489 Return new trace with transfer function applied (convolution).
1491 :param tfade: rise/fall time in seconds of taper applied in timedomain
1492 at both ends of trace.
1493 :param freqlimits: 4-tuple with corner frequencies in Hz.
1494 :param transfer_function: FrequencyResponse object; must provide a
1495 method 'evaluate(freqs)', which returns the transfer function
1496 coefficients at the frequencies 'freqs'.
1497 :param cut_off_fading: whether to cut off rise/fall interval in output
1498 trace.
1499 :param demean: remove mean before applying transfer function
1500 :param invert: set to True to do a deconvolution
1501 '''
1503 if transfer_function is None:
1504 transfer_function = FrequencyResponse()
1506 if self.tmax - self.tmin <= tfade*2.:
1507 raise TraceTooShort(
1508 'Trace %s.%s.%s.%s too short for fading length setting. '
1509 'trace length = %g, fading length = %g'
1510 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1512 if freqlimits is None and (
1513 transfer_function is None or transfer_function.is_scalar()):
1515 # special case for flat responses
1517 output = self.copy()
1518 data = self.ydata
1519 ndata = data.size
1521 if transfer_function is not None:
1522 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1524 if invert:
1525 c = 1.0/c
1527 data *= c
1529 if tfade != 0.0:
1530 data *= costaper(
1531 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1532 ndata, self.deltat)
1534 output.ydata = data
1536 else:
1537 ndata = self.ydata.size
1538 ntrans = nextpow2(ndata*1.2)
1539 coefs = self._get_tapered_coefs(
1540 ntrans, freqlimits, transfer_function, invert=invert)
1542 data = self.ydata
1544 data_pad = num.zeros(ntrans, dtype=float)
1545 data_pad[:ndata] = data
1546 if demean:
1547 data_pad[:ndata] -= data.mean()
1549 if tfade != 0.0:
1550 data_pad[:ndata] *= costaper(
1551 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1552 ndata, self.deltat)
1554 fdata = num.fft.rfft(data_pad)
1555 fdata *= coefs
1556 ddata = num.fft.irfft(fdata)
1557 output = self.copy()
1558 output.ydata = ddata[:ndata]
1560 if cut_off_fading and tfade != 0.0:
1561 try:
1562 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1563 except NoData:
1564 raise TraceTooShort(
1565 'Trace %s.%s.%s.%s too short for fading length setting. '
1566 'trace length = %g, fading length = %g'
1567 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1568 else:
1569 output.ydata = output.ydata.copy()
1571 return output
1573 def differentiate(self, n=1, order=4, inplace=True):
1574 '''
1575 Approximate first or second derivative of the trace.
1577 :param n: 1 for first derivative, 2 for second
1578 :param order: order of the approximation 2 and 4 are supported
1579 :param inplace: if ``True`` the trace is differentiated in place,
1580 otherwise a new trace object with the derivative is returned.
1582 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1584 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1585 '''
1587 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1589 if inplace:
1590 self.ydata = ddata
1591 else:
1592 output = self.copy(data=False)
1593 output.set_ydata(ddata)
1594 return output
1596 def drop_chain_cache(self):
1597 if self._pchain:
1598 self._pchain.clear()
1600 def init_chain(self):
1601 self._pchain = pchain.Chain(
1602 do_downsample,
1603 do_extend,
1604 do_pre_taper,
1605 do_fft,
1606 do_filter,
1607 do_ifft)
1609 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1610 if setup.domain == 'frequency_domain':
1611 _, _, data = self._pchain(
1612 (self, deltat),
1613 (tmin, tmax),
1614 (setup.taper,),
1615 (setup.filter,),
1616 (setup.filter,),
1617 nocache=nocache)
1619 return num.abs(data), num.abs(data)
1621 else:
1622 processed = self._pchain(
1623 (self, deltat),
1624 (tmin, tmax),
1625 (setup.taper,),
1626 (setup.filter,),
1627 (setup.filter,),
1628 (),
1629 nocache=nocache)
1631 if setup.domain == 'time_domain':
1632 data = processed.get_ydata()
1634 elif setup.domain == 'envelope':
1635 processed = processed.envelope(inplace=False)
1637 elif setup.domain == 'absolute':
1638 processed.set_ydata(num.abs(processed.get_ydata()))
1640 return processed.get_ydata(), processed
1642 def misfit(self, candidate, setup, nocache=False, debug=False):
1643 '''
1644 Calculate misfit and normalization factor against candidate trace.
1646 :param candidate: :py:class:`Trace` object
1647 :param setup: :py:class:`MisfitSetup` object
1648 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1649 normalization divisor
1651 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1652 with the higher sampling rate will be downsampled.
1653 '''
1655 a = self
1656 b = candidate
1658 for tr in (a, b):
1659 if not tr._pchain:
1660 tr.init_chain()
1662 deltat = max(a.deltat, b.deltat)
1663 tmin = min(a.tmin, b.tmin) - deltat
1664 tmax = max(a.tmax, b.tmax) + deltat
1666 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1667 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1669 if setup.domain != 'cc_max_norm':
1670 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1671 else:
1672 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1673 ccmax = ctr.max()[1]
1674 m = 0.5 - 0.5 * ccmax
1675 n = 0.5
1677 if debug:
1678 return m, n, aproc, bproc
1679 else:
1680 return m, n
1682 def spectrum(self, pad_to_pow2=False, tfade=None):
1683 '''
1684 Get FFT spectrum of trace.
1686 :param pad_to_pow2: whether to zero-pad the data to next larger
1687 power-of-two length
1688 :param tfade: ``None`` or a time length in seconds, to apply cosine
1689 shaped tapers to both
1691 :returns: a tuple with (frequencies, values)
1692 '''
1694 ndata = self.ydata.size
1696 if pad_to_pow2:
1697 ntrans = nextpow2(ndata)
1698 else:
1699 ntrans = ndata
1701 if tfade is None:
1702 ydata = self.ydata
1703 else:
1704 ydata = self.ydata * costaper(
1705 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1706 ndata, self.deltat)
1708 fydata = num.fft.rfft(ydata, ntrans)
1709 df = 1./(ntrans*self.deltat)
1710 fxdata = num.arange(len(fydata))*df
1711 return fxdata, fydata
1713 def multi_filter(self, filter_freqs, bandwidth):
1715 class Gauss(FrequencyResponse):
1716 def __init__(self, f0, a=1.0):
1717 self._omega0 = 2.*math.pi*f0
1718 self._a = a
1720 def evaluate(self, freqs):
1721 omega = 2.*math.pi*freqs
1722 return num.exp(-((omega-self._omega0)
1723 / (self._a*self._omega0))**2)
1725 freqs, coefs = self.spectrum()
1726 n = self.data_len()
1727 nfilt = len(filter_freqs)
1728 signal_tf = num.zeros((nfilt, n))
1729 centroid_freqs = num.zeros(nfilt)
1730 for ifilt, f0 in enumerate(filter_freqs):
1731 taper = Gauss(f0, a=bandwidth)
1732 weights = taper.evaluate(freqs)
1733 nhalf = freqs.size
1734 analytic_spec = num.zeros(n, dtype=complex)
1735 analytic_spec[:nhalf] = coefs*weights
1737 enorm = num.abs(analytic_spec[:nhalf])**2
1738 enorm /= num.sum(enorm)
1740 if n % 2 == 0:
1741 analytic_spec[1:nhalf-1] *= 2.
1742 else:
1743 analytic_spec[1:nhalf] *= 2.
1745 analytic = num.fft.ifft(analytic_spec)
1746 signal_tf[ifilt, :] = num.abs(analytic)
1748 enorm = num.abs(analytic_spec[:nhalf])**2
1749 enorm /= num.sum(enorm)
1750 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1752 return centroid_freqs, signal_tf
1754 def _get_tapered_coefs(
1755 self, ntrans, freqlimits, transfer_function, invert=False):
1757 deltaf = 1./(self.deltat*ntrans)
1758 nfreqs = ntrans//2 + 1
1759 transfer = num.ones(nfreqs, dtype=complex)
1760 hi = snapper(nfreqs, deltaf)
1761 if freqlimits is not None:
1762 a, b, c, d = freqlimits
1763 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1764 + hi(a)*deltaf
1766 if invert:
1767 coeffs = transfer_function.evaluate(freqs)
1768 if num.any(coeffs == 0.0):
1769 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1771 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1772 else:
1773 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1775 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1776 else:
1777 if invert:
1778 raise Exception(
1779 'transfer: `freqlimits` must be given when `invert` is '
1780 'set to `True`')
1782 freqs = num.arange(nfreqs) * deltaf
1783 tapered_transfer = transfer_function.evaluate(freqs)
1785 tapered_transfer[0] = 0.0 # don't introduce static offsets
1786 return tapered_transfer
1788 def fill_template(self, template, **additional):
1789 '''
1790 Fill string template with trace metadata.
1792 Uses normal python '%(placeholder)s' string templates. The following
1793 placeholders are considered: ``network``, ``station``, ``location``,
1794 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1795 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1796 ``tmin_year``, ``tmax_year``, ``julianday``. The variants ending with
1797 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1798 microseconds, those with ``'_year'`` contain only the year.
1799 '''
1801 template = template.replace('%n', '%(network)s')\
1802 .replace('%s', '%(station)s')\
1803 .replace('%l', '%(location)s')\
1804 .replace('%c', '%(channel)s')\
1805 .replace('%b', '%(tmin)s')\
1806 .replace('%e', '%(tmax)s')\
1807 .replace('%j', '%(julianday)s')
1809 params = dict(
1810 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1811 params['tmin'] = util.time_to_str(
1812 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1813 params['tmax'] = util.time_to_str(
1814 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1815 params['tmin_ms'] = util.time_to_str(
1816 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1817 params['tmax_ms'] = util.time_to_str(
1818 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1819 params['tmin_us'] = util.time_to_str(
1820 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1821 params['tmax_us'] = util.time_to_str(
1822 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1823 params['tmin_year'] = util.time_to_str(
1824 self.tmin, format='%Y')
1825 params['tmax_year'] = util.time_to_str(
1826 self.tmax, format='%Y')
1827 params['julianday'] = util.julian_day_of_year(self.tmin)
1828 params.update(additional)
1829 return template % params
1831 def plot(self):
1832 '''
1833 Show trace with matplotlib.
1835 See also: :py:meth:`Trace.snuffle`.
1836 '''
1838 import pylab
1839 pylab.plot(self.get_xdata(), self.get_ydata())
1840 name = '%s %s %s - %s' % (
1841 self.channel,
1842 self.station,
1843 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)),
1844 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax)))
1846 pylab.title(name)
1847 pylab.show()
1849 def snuffle(self, **kwargs):
1850 '''
1851 Show trace in a snuffler window.
1853 :param stations: list of `pyrocko.model.Station` objects or ``None``
1854 :param events: list of `pyrocko.model.Event` objects or ``None``
1855 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1856 :param ntracks: float, number of tracks to be shown initially (default:
1857 12)
1858 :param follow: time interval (in seconds) for real time follow mode or
1859 ``None``
1860 :param controls: bool, whether to show the main controls (default:
1861 ``True``)
1862 :param opengl: bool, whether to use opengl (default: ``False``)
1863 '''
1865 return snuffle([self], **kwargs)
1868def snuffle(traces, **kwargs):
1869 '''
1870 Show traces in a snuffler window.
1872 :param stations: list of `pyrocko.model.Station` objects or ``None``
1873 :param events: list of `pyrocko.model.Event` objects or ``None``
1874 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1875 :param ntracks: float, number of tracks to be shown initially (default: 12)
1876 :param follow: time interval (in seconds) for real time follow mode or
1877 ``None``
1878 :param controls: bool, whether to show the main controls (default:
1879 ``True``)
1880 :param opengl: bool, whether to use opengl (default: ``False``)
1881 '''
1883 from pyrocko import pile
1884 from pyrocko.gui import snuffler
1885 p = pile.Pile()
1886 if traces:
1887 trf = pile.MemTracesFile(None, traces)
1888 p.add_file(trf)
1889 return snuffler.snuffle(p, **kwargs)
1892class InfiniteResponse(Exception):
1893 '''
1894 This exception is raised by :py:class:`Trace` operations when deconvolution
1895 of a frequency response (instrument response transfer function) would
1896 result in a division by zero.
1897 '''
1900class MisalignedTraces(Exception):
1901 '''
1902 This exception is raised by some :py:class:`Trace` operations when tmin,
1903 tmax or number of samples do not match.
1904 '''
1906 pass
1909class NoData(Exception):
1910 '''
1911 This exception is raised by some :py:class:`Trace` operations when no or
1912 not enough data is available.
1913 '''
1915 pass
1918class AboveNyquist(Exception):
1919 '''
1920 This exception is raised by some :py:class:`Trace` operations when given
1921 frequencies are above the Nyquist frequency.
1922 '''
1924 pass
1927class TraceTooShort(Exception):
1928 '''
1929 This exception is raised by some :py:class:`Trace` operations when the
1930 trace is too short.
1931 '''
1933 pass
1936class ResamplingFailed(Exception):
1937 pass
1940def minmax(traces, key=None, mode='minmax'):
1942 '''
1943 Get data range given traces grouped by selected pattern.
1945 :param key: a callable which takes as single argument a trace and returns a
1946 key for the grouping of the results. If this is ``None``, the default,
1947 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
1948 used.
1949 :param mode: 'minmax' or floating point number. If this is 'minmax',
1950 minimum and maximum of the traces are used, if it is a number, mean +-
1951 standard deviation times ``mode`` is used.
1953 :returns: a dict with the combined data ranges.
1955 Examples::
1957 ranges = minmax(traces, lambda tr: tr.channel)
1958 print ranges['N'] # print min & max of all traces with channel == 'N'
1959 print ranges['E'] # print min & max of all traces with channel == 'E'
1961 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
1962 print ranges['GR', 'HAM3'] # print min & max of all traces with
1963 # network == 'GR' and station == 'HAM3'
1965 ranges = minmax(traces, lambda tr: None)
1966 print ranges[None] # prints min & max of all traces
1967 '''
1969 if key is None:
1970 key = _default_key
1972 ranges = {}
1973 for trace in traces:
1974 if isinstance(mode, str) and mode == 'minmax':
1975 mi, ma = trace.ydata.min(), trace.ydata.max()
1976 else:
1977 mean = trace.ydata.mean()
1978 std = trace.ydata.std()
1979 mi, ma = mean-std*mode, mean+std*mode
1981 k = key(trace)
1982 if k not in ranges:
1983 ranges[k] = mi, ma
1984 else:
1985 tmi, tma = ranges[k]
1986 ranges[k] = min(tmi, mi), max(tma, ma)
1988 return ranges
1991def minmaxtime(traces, key=None):
1993 '''
1994 Get time range given traces grouped by selected pattern.
1996 :param key: a callable which takes as single argument a trace and returns a
1997 key for the grouping of the results. If this is ``None``, the default,
1998 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
1999 used.
2001 :returns: a dict with the combined data ranges.
2002 '''
2004 if key is None:
2005 key = _default_key
2007 ranges = {}
2008 for trace in traces:
2009 mi, ma = trace.tmin, trace.tmax
2010 k = key(trace)
2011 if k not in ranges:
2012 ranges[k] = mi, ma
2013 else:
2014 tmi, tma = ranges[k]
2015 ranges[k] = min(tmi, mi), max(tma, ma)
2017 return ranges
2020def degapper(
2021 traces,
2022 maxgap=5,
2023 fillmethod='interpolate',
2024 deoverlap='use_second',
2025 maxlap=None):
2027 '''
2028 Try to connect traces and remove gaps.
2030 This method will combine adjacent traces, which match in their network,
2031 station, location and channel attributes. Overlapping parts are handled
2032 according to the ``deoverlap`` argument.
2034 :param traces: input traces, must be sorted by their full_id attribute.
2035 :param maxgap: maximum number of samples to interpolate.
2036 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2037 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2038 second trace (default), 'use_first' to use data from first trace,
2039 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2040 values.
2041 :param maxlap: maximum number of samples of overlap which are removed
2043 :returns: list of traces
2044 '''
2046 in_traces = traces
2047 out_traces = []
2048 if not in_traces:
2049 return out_traces
2050 out_traces.append(in_traces.pop(0))
2051 while in_traces:
2053 a = out_traces[-1]
2054 b = in_traces.pop(0)
2056 avirt, bvirt = a.ydata is None, b.ydata is None
2057 assert avirt == bvirt, \
2058 'traces given to degapper() must either all have data or have ' \
2059 'no data.'
2061 virtual = avirt and bvirt
2063 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2064 and a.data_len() >= 1 and b.data_len() >= 1
2065 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2067 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2068 idist = int(round(dist))
2069 if abs(dist - idist) > 0.05 and idist <= maxgap:
2070 # logger.warning('Cannot degap traces with displaced sampling '
2071 # '(%s, %s, %s, %s)' % a.nslc_id)
2072 pass
2073 else:
2074 if 1 < idist <= maxgap:
2075 if not virtual:
2076 if fillmethod == 'interpolate':
2077 filler = a.ydata[-1] + (
2078 ((1.0 + num.arange(idist-1, dtype=float))
2079 / idist) * (b.ydata[0]-a.ydata[-1])
2080 ).astype(a.ydata.dtype)
2081 elif fillmethod == 'zeros':
2082 filler = num.zeros(idist-1, dtype=a.ydist.dtype)
2083 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2084 a.tmax = b.tmax
2085 if a.mtime and b.mtime:
2086 a.mtime = max(a.mtime, b.mtime)
2087 continue
2089 elif idist == 1:
2090 if not virtual:
2091 a.ydata = num.concatenate((a.ydata, b.ydata))
2092 a.tmax = b.tmax
2093 if a.mtime and b.mtime:
2094 a.mtime = max(a.mtime, b.mtime)
2095 continue
2097 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2098 if b.tmax > a.tmax:
2099 if not virtual:
2100 na = a.ydata.size
2101 n = -idist+1
2102 if deoverlap == 'use_second':
2103 a.ydata = num.concatenate(
2104 (a.ydata[:-n], b.ydata))
2105 elif deoverlap in ('use_first', 'crossfade_cos'):
2106 a.ydata = num.concatenate(
2107 (a.ydata, b.ydata[n:]))
2108 elif deoverlap == 'add':
2109 a.ydata[-n:] += b.ydata[:n]
2110 a.ydata = num.concatenate(
2111 (a.ydata, b.ydata[n:]))
2112 else:
2113 assert False, 'unknown deoverlap method'
2115 if deoverlap == 'crossfade_cos':
2116 n = -idist+1
2117 taper = 0.5-0.5*num.cos(
2118 (1.+num.arange(n))/(1.+n)*num.pi)
2119 a.ydata[na-n:na] *= 1.-taper
2120 a.ydata[na-n:na] += b.ydata[:n] * taper
2122 a.tmax = b.tmax
2123 if a.mtime and b.mtime:
2124 a.mtime = max(a.mtime, b.mtime)
2125 continue
2126 else:
2127 # make short second trace vanish
2128 continue
2130 if b.data_len() >= 1:
2131 out_traces.append(b)
2133 for tr in out_traces:
2134 tr._update_ids()
2136 return out_traces
2139def rotate(traces, azimuth, in_channels, out_channels):
2140 '''
2141 2D rotation of traces.
2143 :param traces: list of input traces
2144 :param azimuth: difference of the azimuths of the component directions
2145 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2146 :param in_channels: names of the input channels (e.g. 'N', 'E')
2147 :param out_channels: names of the output channels (e.g. 'R', 'T')
2148 :returns: list of rotated traces
2149 '''
2151 phi = azimuth/180.*math.pi
2152 cphi = math.cos(phi)
2153 sphi = math.sin(phi)
2154 rotated = []
2155 in_channels = tuple(_channels_to_names(in_channels))
2156 out_channels = tuple(_channels_to_names(out_channels))
2157 for a in traces:
2158 for b in traces:
2159 if ((a.channel, b.channel) == in_channels and
2160 a.nslc_id[:3] == b.nslc_id[:3] and
2161 abs(a.deltat-b.deltat) < a.deltat*0.001):
2162 tmin = max(a.tmin, b.tmin)
2163 tmax = min(a.tmax, b.tmax)
2165 if tmin < tmax:
2166 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2167 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2168 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2169 logger.warning(
2170 'Cannot rotate traces with displaced sampling '
2171 '(%s, %s, %s, %s)' % a.nslc_id)
2172 continue
2174 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2175 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2176 ac.set_ydata(acydata)
2177 bc.set_ydata(bcydata)
2179 ac.set_codes(channel=out_channels[0])
2180 bc.set_codes(channel=out_channels[1])
2181 rotated.append(ac)
2182 rotated.append(bc)
2184 return rotated
2187def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2188 azimuth = orthodrome.azimuth(receiver, source) + 180.
2189 in_channels = n.channel, e.channel
2190 out = rotate(
2191 [n, e], azimuth,
2192 in_channels=in_channels,
2193 out_channels=out_channels)
2195 assert len(out) == 2
2196 for tr in out:
2197 if tr.channel == 'R':
2198 r = tr
2199 elif tr.channel == 'T':
2200 t = tr
2202 return r, t
2205def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2206 out_channels=('L', 'Q', 'T')):
2207 '''
2208 Rotate traces from ZNE to LQT system.
2210 :param traces: list of traces in arbitrary order
2211 :param backazimuth: backazimuth in degrees clockwise from north
2212 :param incidence: incidence angle in degrees from vertical
2213 :param in_channels: input channel names
2214 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2215 :returns: list of transformed traces
2216 '''
2217 i = incidence/180.*num.pi
2218 b = backazimuth/180.*num.pi
2220 ci = num.cos(i)
2221 cb = num.cos(b)
2222 si = num.sin(i)
2223 sb = num.sin(b)
2225 rotmat = num.array(
2226 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2227 return project(traces, rotmat, in_channels, out_channels)
2230def _decompose(a):
2231 '''
2232 Decompose matrix into independent submatrices.
2233 '''
2235 def depends(iout, a):
2236 row = a[iout, :]
2237 return set(num.arange(row.size).compress(row != 0.0))
2239 def provides(iin, a):
2240 col = a[:, iin]
2241 return set(num.arange(col.size).compress(col != 0.0))
2243 a = num.asarray(a)
2244 outs = set(range(a.shape[0]))
2245 systems = []
2246 while outs:
2247 iout = outs.pop()
2249 gout = set()
2250 for iin in depends(iout, a):
2251 gout.update(provides(iin, a))
2253 if not gout:
2254 continue
2256 gin = set()
2257 for iout2 in gout:
2258 gin.update(depends(iout2, a))
2260 if not gin:
2261 continue
2263 for iout2 in gout:
2264 if iout2 in outs:
2265 outs.remove(iout2)
2267 gin = list(gin)
2268 gin.sort()
2269 gout = list(gout)
2270 gout.sort()
2272 systems.append((gin, gout, a[gout, :][:, gin]))
2274 return systems
2277def _channels_to_names(channels):
2278 names = []
2279 for ch in channels:
2280 if isinstance(ch, model.Channel):
2281 names.append(ch.name)
2282 else:
2283 names.append(ch)
2284 return names
2287def project(traces, matrix, in_channels, out_channels):
2288 '''
2289 Affine transform of three-component traces.
2291 Compute matrix-vector product of three-component traces, to e.g. rotate
2292 traces into a different basis. The traces are distinguished and ordered by
2293 their channel attribute. The tranform is applied to overlapping parts of
2294 any appropriate combinations of the input traces. This should allow this
2295 function to be robust with data gaps. It also tries to apply the
2296 tranformation to subsets of the channels, if this is possible, so that, if
2297 for example a vertical compontent is missing, horizontal components can
2298 still be rotated.
2300 :param traces: list of traces in arbitrary order
2301 :param matrix: tranformation matrix
2302 :param in_channels: input channel names
2303 :param out_channels: output channel names
2304 :returns: list of transformed traces
2305 '''
2307 in_channels = tuple(_channels_to_names(in_channels))
2308 out_channels = tuple(_channels_to_names(out_channels))
2309 systems = _decompose(matrix)
2311 # fallback to full matrix if some are not quadratic
2312 for iins, iouts, submatrix in systems:
2313 if submatrix.shape[0] != submatrix.shape[1]:
2314 return _project3(traces, matrix, in_channels, out_channels)
2316 projected = []
2317 for iins, iouts, submatrix in systems:
2318 in_cha = tuple([in_channels[iin] for iin in iins])
2319 out_cha = tuple([out_channels[iout] for iout in iouts])
2320 if submatrix.shape[0] == 1:
2321 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2322 elif submatrix.shape[1] == 2:
2323 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2324 else:
2325 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2327 return projected
2330def project_dependencies(matrix, in_channels, out_channels):
2331 '''
2332 Figure out what dependencies project() would produce.
2333 '''
2335 in_channels = tuple(_channels_to_names(in_channels))
2336 out_channels = tuple(_channels_to_names(out_channels))
2337 systems = _decompose(matrix)
2339 subpro = []
2340 for iins, iouts, submatrix in systems:
2341 if submatrix.shape[0] != submatrix.shape[1]:
2342 subpro.append((matrix, in_channels, out_channels))
2344 if not subpro:
2345 for iins, iouts, submatrix in systems:
2346 in_cha = tuple([in_channels[iin] for iin in iins])
2347 out_cha = tuple([out_channels[iout] for iout in iouts])
2348 subpro.append((submatrix, in_cha, out_cha))
2350 deps = {}
2351 for mat, in_cha, out_cha in subpro:
2352 for oc in out_cha:
2353 if oc not in deps:
2354 deps[oc] = []
2356 for ic in in_cha:
2357 deps[oc].append(ic)
2359 return deps
2362def _project1(traces, matrix, in_channels, out_channels):
2363 assert len(in_channels) == 1
2364 assert len(out_channels) == 1
2365 assert matrix.shape == (1, 1)
2367 projected = []
2368 for a in traces:
2369 if not (a.channel,) == in_channels:
2370 continue
2372 ac = a.copy()
2373 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2374 ac.set_codes(channel=out_channels[0])
2375 projected.append(ac)
2377 return projected
2380def _project2(traces, matrix, in_channels, out_channels):
2381 assert len(in_channels) == 2
2382 assert len(out_channels) == 2
2383 assert matrix.shape == (2, 2)
2384 projected = []
2385 for a in traces:
2386 for b in traces:
2387 if not ((a.channel, b.channel) == in_channels and
2388 a.nslc_id[:3] == b.nslc_id[:3] and
2389 abs(a.deltat-b.deltat) < a.deltat*0.001):
2390 continue
2392 tmin = max(a.tmin, b.tmin)
2393 tmax = min(a.tmax, b.tmax)
2395 if tmin > tmax:
2396 continue
2398 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2399 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2400 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2401 logger.warning(
2402 'Cannot project traces with displaced sampling '
2403 '(%s, %s, %s, %s)' % a.nslc_id)
2404 continue
2406 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2407 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2409 ac.set_ydata(acydata)
2410 bc.set_ydata(bcydata)
2412 ac.set_codes(channel=out_channels[0])
2413 bc.set_codes(channel=out_channels[1])
2415 projected.append(ac)
2416 projected.append(bc)
2418 return projected
2421def _project3(traces, matrix, in_channels, out_channels):
2422 assert len(in_channels) == 3
2423 assert len(out_channels) == 3
2424 assert matrix.shape == (3, 3)
2425 projected = []
2426 for a in traces:
2427 for b in traces:
2428 for c in traces:
2429 if not ((a.channel, b.channel, c.channel) == in_channels
2430 and a.nslc_id[:3] == b.nslc_id[:3]
2431 and b.nslc_id[:3] == c.nslc_id[:3]
2432 and abs(a.deltat-b.deltat) < a.deltat*0.001
2433 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2435 continue
2437 tmin = max(a.tmin, b.tmin, c.tmin)
2438 tmax = min(a.tmax, b.tmax, c.tmax)
2440 if tmin >= tmax:
2441 continue
2443 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2444 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2445 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2446 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2447 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2449 logger.warning(
2450 'Cannot project traces with displaced sampling '
2451 '(%s, %s, %s, %s)' % a.nslc_id)
2452 continue
2454 acydata = num.dot(
2455 matrix[0],
2456 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2457 bcydata = num.dot(
2458 matrix[1],
2459 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2460 ccydata = num.dot(
2461 matrix[2],
2462 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2464 ac.set_ydata(acydata)
2465 bc.set_ydata(bcydata)
2466 cc.set_ydata(ccydata)
2468 ac.set_codes(channel=out_channels[0])
2469 bc.set_codes(channel=out_channels[1])
2470 cc.set_codes(channel=out_channels[2])
2472 projected.append(ac)
2473 projected.append(bc)
2474 projected.append(cc)
2476 return projected
2479def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2480 '''
2481 Cross correlation of two traces.
2483 :param a,b: input traces
2484 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2485 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2486 :param use_fft: bool, whether to do cross correlation in spectral domain
2488 :returns: trace containing cross correlation coefficients
2490 This function computes the cross correlation between two traces. It
2491 evaluates the discrete equivalent of
2493 .. math::
2495 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2497 where the star denotes complex conjugate. Note, that the arguments here are
2498 swapped when compared with the :py:func:`numpy.correlate` function,
2499 which is internally called. This function should be safe even with older
2500 versions of NumPy, where the correlate function has some problems.
2502 A trace containing the cross correlation coefficients is returned. The time
2503 information of the output trace is set so that the returned cross
2504 correlation can be viewed directly as a function of time lag.
2506 Example::
2508 # align two traces a and b containing a time shifted similar signal:
2509 c = pyrocko.trace.correlate(a,b)
2510 t, coef = c.max() # get time and value of maximum
2511 b.shift(-t) # align b with a
2513 '''
2515 assert_same_sampling_rate(a, b)
2517 ya, yb = a.ydata, b.ydata
2519 # need reversed order here:
2520 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2521 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2523 if normalization == 'normal':
2524 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2525 yc = yc/normfac
2527 elif normalization == 'gliding':
2528 if mode != 'valid':
2529 assert False, 'gliding normalization currently only available ' \
2530 'with "valid" mode.'
2532 if ya.size < yb.size:
2533 yshort, ylong = ya, yb
2534 else:
2535 yshort, ylong = yb, ya
2537 epsilon = 0.00001
2538 normfac_short = num.sqrt(num.sum(yshort**2))
2539 normfac = normfac_short * num.sqrt(
2540 moving_sum(ylong**2, yshort.size, mode='valid')) \
2541 + normfac_short*epsilon
2543 if yb.size <= ya.size:
2544 normfac = normfac[::-1]
2546 yc /= normfac
2548 c = a.copy()
2549 c.set_ydata(yc)
2550 c.set_codes(*merge_codes(a, b, '~'))
2551 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2553 return c
2556def deconvolve(
2557 a, b, waterlevel,
2558 tshift=0.,
2559 pad=0.5,
2560 fd_taper=None,
2561 pad_to_pow2=True):
2563 same_sampling_rate(a, b)
2564 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2565 deltat = a.deltat
2566 npad = int(round(a.data_len()*pad + tshift / deltat))
2568 ndata = max(a.data_len(), b.data_len())
2569 ndata_pad = ndata + npad
2571 if pad_to_pow2:
2572 ntrans = nextpow2(ndata_pad)
2573 else:
2574 ntrans = ndata
2576 aspec = num.fft.rfft(a.ydata, ntrans)
2577 bspec = num.fft.rfft(b.ydata, ntrans)
2579 out = aspec * num.conj(bspec)
2581 bautocorr = bspec*num.conj(bspec)
2582 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2584 out /= denom
2585 df = 1/(ntrans*deltat)
2587 if fd_taper is not None:
2588 fd_taper(out, 0.0, df)
2590 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2591 c = a.copy(data=False)
2592 c.set_ydata(ydata[:ndata])
2593 c.set_codes(*merge_codes(a, b, '/'))
2594 return c
2597def assert_same_sampling_rate(a, b, eps=1.0e-6):
2598 assert same_sampling_rate(a, b, eps), \
2599 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2602def same_sampling_rate(a, b, eps=1.0e-6):
2603 '''
2604 Check if two traces have the same sampling rate.
2606 :param a,b: input traces
2607 :param eps: relative tolerance
2608 '''
2610 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2613def fix_deltat_rounding_errors(deltat):
2614 '''
2615 Try to undo sampling rate rounding errors.
2617 Fix rounding errors of sampling intervals when these are read from single
2618 precision floating point values.
2620 Assumes that the true sampling rate or sampling interval was an integer
2621 value. No correction will be applied if this would change the sampling
2622 rate by more than 0.001%.
2623 '''
2625 if deltat <= 1.0:
2626 deltat_new = 1.0 / round(1.0 / deltat)
2627 else:
2628 deltat_new = round(deltat)
2630 if abs(deltat_new - deltat) / deltat > 1e-5:
2631 deltat_new = deltat
2633 return deltat_new
2636def merge_codes(a, b, sep='-'):
2637 '''
2638 Merge network-station-location-channel codes of a pair of traces.
2639 '''
2641 o = []
2642 for xa, xb in zip(a.nslc_id, b.nslc_id):
2643 if xa == xb:
2644 o.append(xa)
2645 else:
2646 o.append(sep.join((xa, xb)))
2647 return o
2650class Taper(Object):
2651 '''
2652 Base class for tapers.
2654 Does nothing by default.
2655 '''
2657 def __call__(self, y, x0, dx):
2658 pass
2661class CosTaper(Taper):
2662 '''
2663 Cosine Taper.
2665 :param a: start of fading in
2666 :param b: end of fading in
2667 :param c: start of fading out
2668 :param d: end of fading out
2669 '''
2671 a = Float.T()
2672 b = Float.T()
2673 c = Float.T()
2674 d = Float.T()
2676 def __init__(self, a, b, c, d):
2677 Taper.__init__(self, a=a, b=b, c=c, d=d)
2679 def __call__(self, y, x0, dx):
2680 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2682 def span(self, y, x0, dx):
2683 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2685 def time_span(self):
2686 return self.a, self.d
2689class CosFader(Taper):
2690 '''
2691 Cosine Fader.
2693 :param xfade: fade in and fade out time in seconds (optional)
2694 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2696 Only one argument can be set. The other should to be ``None``.
2697 '''
2699 xfade = Float.T(optional=True)
2700 xfrac = Float.T(optional=True)
2702 def __init__(self, xfade=None, xfrac=None):
2703 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2704 assert (xfade is None) != (xfrac is None)
2705 self._xfade = xfade
2706 self._xfrac = xfrac
2708 def __call__(self, y, x0, dx):
2710 xfade = self._xfade
2712 xlen = (y.size - 1)*dx
2713 if xfade is None:
2714 xfade = xlen * self._xfrac
2716 a = x0
2717 b = x0 + xfade
2718 c = x0 + xlen - xfade
2719 d = x0 + xlen
2721 apply_costaper(a, b, c, d, y, x0, dx)
2723 def span(self, y, x0, dx):
2724 return 0, y.size
2726 def time_span(self):
2727 return None, None
2730def none_min(li):
2731 if None in li:
2732 return None
2733 else:
2734 return min(x for x in li if x is not None)
2737def none_max(li):
2738 if None in li:
2739 return None
2740 else:
2741 return max(x for x in li if x is not None)
2744class MultiplyTaper(Taper):
2745 '''
2746 Multiplication of several tapers.
2747 '''
2749 tapers = List.T(Taper.T())
2751 def __init__(self, tapers=None):
2752 if tapers is None:
2753 tapers = []
2755 Taper.__init__(self, tapers=tapers)
2757 def __call__(self, y, x0, dx):
2758 for taper in self.tapers:
2759 taper(y, x0, dx)
2761 def span(self, y, x0, dx):
2762 spans = []
2763 for taper in self.tapers:
2764 spans.append(taper.span(y, x0, dx))
2766 mins, maxs = list(zip(*spans))
2767 return min(mins), max(maxs)
2769 def time_span(self):
2770 spans = []
2771 for taper in self.tapers:
2772 spans.append(taper.time_span())
2774 mins, maxs = list(zip(*spans))
2775 return none_min(mins), none_max(maxs)
2778class GaussTaper(Taper):
2779 '''
2780 Frequency domain Gaussian filter.
2781 '''
2783 alpha = Float.T()
2785 def __init__(self, alpha):
2786 Taper.__init__(self, alpha=alpha)
2787 self._alpha = alpha
2789 def __call__(self, y, x0, dx):
2790 f = x0 + num.arange(y.size)*dx
2791 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2794class FrequencyResponse(Object):
2795 '''
2796 Evaluates frequency response at given frequencies.
2797 '''
2799 def evaluate(self, freqs):
2800 coefs = num.ones(freqs.size, dtype=complex)
2801 return coefs
2803 def is_scalar(self):
2804 '''
2805 Check if this is a flat response.
2806 '''
2808 if type(self) == FrequencyResponse:
2809 return True
2810 else:
2811 return False # default for derived classes
2814class Evalresp(FrequencyResponse):
2815 '''
2816 Calls evalresp and generates values of the instrument response transfer
2817 function.
2819 :param respfile: response file in evalresp format
2820 :param trace: trace for which the response is to be extracted from the file
2821 :param target: ``'dis'`` for displacement or ``'vel'`` for velocity
2822 '''
2824 respfile = String.T()
2825 nslc_id = Tuple.T(4, String.T())
2826 target = String.T(default='dis')
2827 instant = Float.T()
2829 def __init__(
2830 self, respfile, trace=None, target='dis', nslc_id=None, time=None):
2832 if trace is not None:
2833 nslc_id = trace.nslc_id
2834 time = (trace.tmin + trace.tmax) / 2.
2836 FrequencyResponse.__init__(
2837 self,
2838 respfile=respfile,
2839 nslc_id=nslc_id,
2840 instant=time,
2841 target=target)
2843 def evaluate(self, freqs):
2844 network, station, location, channel = self.nslc_id
2845 x = evalresp.evalresp(
2846 sta_list=station,
2847 cha_list=channel,
2848 net_code=network,
2849 locid=location,
2850 instant=self.instant,
2851 freqs=freqs,
2852 units=self.target.upper(),
2853 file=self.respfile,
2854 rtype='CS')
2856 transfer = x[0][4]
2857 return transfer
2860class InverseEvalresp(FrequencyResponse):
2861 '''
2862 Calls evalresp and generates values of the inverse instrument response for
2863 deconvolution of instrument response.
2865 :param respfile: response file in evalresp format
2866 :param trace: trace for which the response is to be extracted from the file
2867 :param target: ``'dis'`` for displacement or ``'vel'`` for velocity
2868 '''
2870 respfile = String.T()
2871 nslc_id = Tuple.T(4, String.T())
2872 target = String.T(default='dis')
2873 instant = Timestamp.T()
2875 def __init__(self, respfile, trace, target='dis'):
2876 FrequencyResponse.__init__(
2877 self,
2878 respfile=respfile,
2879 nslc_id=trace.nslc_id,
2880 instant=(trace.tmin + trace.tmax)/2.,
2881 target=target)
2883 def evaluate(self, freqs):
2884 network, station, location, channel = self.nslc_id
2885 x = evalresp.evalresp(sta_list=station,
2886 cha_list=channel,
2887 net_code=network,
2888 locid=location,
2889 instant=self.instant,
2890 freqs=freqs,
2891 units=self.target.upper(),
2892 file=self.respfile,
2893 rtype='CS')
2895 transfer = x[0][4]
2896 return 1./transfer
2899class PoleZeroResponse(FrequencyResponse):
2900 '''
2901 Evaluates frequency response from pole-zero representation.
2903 :param zeros: :py:class:`numpy.array` containing complex positions of zeros
2904 :param poles: :py:class:`numpy.array` containing complex positions of poles
2905 :param constant: gain as floating point number
2907 ::
2909 (j*2*pi*f - zeros[0]) * (j*2*pi*f - zeros[1]) * ...
2910 T(f) = constant * ----------------------------------------------------
2911 (j*2*pi*f - poles[0]) * (j*2*pi*f - poles[1]) * ...
2914 The poles and zeros should be given as angular frequencies, not in Hz.
2915 '''
2917 zeros = List.T(Complex.T())
2918 poles = List.T(Complex.T())
2919 constant = Complex.T(default=1.0+0j)
2921 def __init__(self, zeros=None, poles=None, constant=1.0+0j):
2922 if zeros is None:
2923 zeros = []
2924 if poles is None:
2925 poles = []
2926 FrequencyResponse.__init__(
2927 self, zeros=zeros, poles=poles, constant=constant)
2929 def evaluate(self, freqs):
2930 jomeg = 1.0j * 2.*num.pi*freqs
2932 a = num.ones(freqs.size, dtype=complex)*self.constant
2933 for z in self.zeros:
2934 a *= jomeg-z
2935 for p in self.poles:
2936 a /= jomeg-p
2938 return a
2940 def is_scalar(self):
2941 return len(self.zeros) == 0 and len(self.poles) == 0
2944class ButterworthResponse(FrequencyResponse):
2945 '''
2946 Butterworth frequency response.
2948 :param corner: corner frequency of the response
2949 :param order: order of the response
2950 :param type: either ``high`` or ``low``
2951 '''
2953 corner = Float.T(default=1.0)
2954 order = Int.T(default=4)
2955 type = StringChoice.T(choices=['low', 'high'], default='low')
2957 def evaluate(self, freqs):
2958 b, a = signal.butter(
2959 int(self.order), float(self.corner), self.type, analog=True)
2960 w, h = signal.freqs(b, a, freqs)
2961 return h
2964class SampledResponse(FrequencyResponse):
2965 '''
2966 Interpolates frequency response given at a set of sampled frequencies.
2968 :param frequencies,values: frequencies and values of the sampled response
2969 function.
2970 :param left,right: values to return when input is out of range. If set to
2971 ``None`` (the default) the endpoints are returned.
2972 '''
2974 frequencies = Array.T(shape=(None,), dtype=float, serialize_as='list')
2975 values = Array.T(shape=(None,), dtype=complex, serialize_as='list')
2976 left = Complex.T(optional=True)
2977 right = Complex.T(optional=True)
2979 def __init__(self, frequencies, values, left=None, right=None):
2980 FrequencyResponse.__init__(
2981 self,
2982 frequencies=asarray_1d(frequencies, float),
2983 values=asarray_1d(values, complex))
2985 def evaluate(self, freqs):
2986 ereal = num.interp(
2987 freqs, self.frequencies, num.real(self.values),
2988 left=self.left, right=self.right)
2989 eimag = num.interp(
2990 freqs, self.frequencies, num.imag(self.values),
2991 left=self.left, right=self.right)
2992 transfer = ereal + 1.0j*eimag
2993 return transfer
2995 def inverse(self):
2996 '''
2997 Get inverse as a new :py:class:`SampledResponse` object.
2998 '''
3000 def inv_or_none(x):
3001 if x is not None:
3002 return 1./x
3004 return SampledResponse(
3005 self.frequencies, 1./self.values,
3006 left=inv_or_none(self.left),
3007 right=inv_or_none(self.right))
3010class IntegrationResponse(FrequencyResponse):
3011 '''
3012 The integration response, optionally multiplied by a constant gain.
3014 :param n: exponent (integer)
3015 :param gain: gain factor (float)
3017 ::
3019 gain
3020 T(f) = --------------
3021 (j*2*pi * f)^n
3022 '''
3024 n = Int.T(optional=True, default=1)
3025 gain = Float.T(optional=True, default=1.0)
3027 def __init__(self, n=1, gain=1.0):
3028 FrequencyResponse.__init__(self, n=n, gain=gain)
3030 def evaluate(self, freqs):
3031 nonzero = freqs != 0.0
3032 resp = num.empty(freqs.size, dtype=complex)
3033 resp[nonzero] = self.gain / (1.0j * 2. * num.pi*freqs[nonzero])**self.n
3034 resp[num.logical_not(nonzero)] = 0.0
3035 return resp
3038class DifferentiationResponse(FrequencyResponse):
3039 '''
3040 The differentiation response, optionally multiplied by a constant gain.
3042 :param n: exponent (integer)
3043 :param gain: gain factor (float)
3045 ::
3047 T(f) = gain * (j*2*pi * f)^n
3048 '''
3050 n = Int.T(optional=True, default=1)
3051 gain = Float.T(optional=True, default=1.0)
3053 def __init__(self, n=1, gain=1.0):
3054 FrequencyResponse.__init__(self, n=n, gain=gain)
3056 def evaluate(self, freqs):
3057 return self.gain * (1.0j * 2. * num.pi * freqs)**self.n
3060class AnalogFilterResponse(FrequencyResponse):
3061 '''
3062 Frequency response of an analog filter.
3064 (see :py:func:`scipy.signal.freqs`).
3065 '''
3067 b = List.T(Float.T())
3068 a = List.T(Float.T())
3070 def __init__(self, b, a):
3071 FrequencyResponse.__init__(self, b=b, a=a)
3073 def evaluate(self, freqs):
3074 return signal.freqs(self.b, self.a, freqs/(2.*num.pi))[1]
3077class MultiplyResponse(FrequencyResponse):
3078 '''
3079 Multiplication of several :py:class:`FrequencyResponse` objects.
3080 '''
3082 responses = List.T(FrequencyResponse.T())
3084 def __init__(self, responses=None):
3085 if responses is None:
3086 responses = []
3087 FrequencyResponse.__init__(self, responses=responses)
3089 def evaluate(self, freqs):
3090 a = num.ones(freqs.size, dtype=complex)
3091 for resp in self.responses:
3092 a *= resp.evaluate(freqs)
3094 return a
3096 def is_scalar(self):
3097 return all(resp.is_scalar() for resp in self.responses)
3100def asarray_1d(x, dtype):
3101 if isinstance(x, (list, tuple)) and x and isinstance(x[0], (str, newstr)):
3102 return num.asarray(list(map(dtype, x)), dtype=dtype)
3103 else:
3104 a = num.asarray(x, dtype=dtype)
3105 if not a.ndim == 1:
3106 raise ValueError('could not convert to 1D array')
3107 return a
3110cached_coefficients = {}
3113def _get_cached_filter_coefs(order, corners, btype):
3114 ck = (order, tuple(corners), btype)
3115 if ck not in cached_coefficients:
3116 if len(corners) == 0:
3117 cached_coefficients[ck] = signal.butter(
3118 order, corners[0], btype=btype)
3119 else:
3120 cached_coefficients[ck] = signal.butter(
3121 order, corners, btype=btype)
3123 return cached_coefficients[ck]
3126class _globals(object):
3127 _numpy_has_correlate_flip_bug = None
3130def _default_key(tr):
3131 return (tr.network, tr.station, tr.location, tr.channel)
3134def numpy_has_correlate_flip_bug():
3135 '''
3136 Check if NumPy's correlate function reveals old behaviour.
3137 '''
3139 if _globals._numpy_has_correlate_flip_bug is None:
3140 a = num.array([0, 0, 1, 0, 0, 0, 0])
3141 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
3142 ab = num.correlate(a, b, mode='same')
3143 ba = num.correlate(b, a, mode='same')
3144 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
3146 return _globals._numpy_has_correlate_flip_bug
3149def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
3150 '''
3151 Call :py:func:`numpy.correlate` with fixes.
3153 c[k] = sum_i a[i+k] * conj(b[i])
3155 Note that the result produced by newer numpy.correlate is always flipped
3156 with respect to the formula given in its documentation (if ascending k
3157 assumed for the output).
3158 '''
3160 if use_fft:
3161 if a.size < b.size:
3162 c = signal.fftconvolve(b[::-1], a, mode=mode)
3163 else:
3164 c = signal.fftconvolve(a, b[::-1], mode=mode)
3165 return c
3167 else:
3168 buggy = numpy_has_correlate_flip_bug()
3170 a = num.asarray(a)
3171 b = num.asarray(b)
3173 if buggy:
3174 b = num.conj(b)
3176 c = num.correlate(a, b, mode=mode)
3178 if buggy and a.size < b.size:
3179 return c[::-1]
3180 else:
3181 return c
3184def numpy_correlate_emulate(a, b, mode='valid'):
3185 '''
3186 Slow version of :py:func:`numpy.correlate` for comparison.
3187 '''
3189 a = num.asarray(a)
3190 b = num.asarray(b)
3191 kmin = -(b.size-1)
3192 klen = a.size-kmin
3193 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3194 kmin = int(kmin)
3195 kmax = int(kmax)
3196 klen = kmax - kmin + 1
3197 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
3198 for k in range(kmin, kmin+klen):
3199 imin = max(0, -k)
3200 ilen = min(b.size, a.size-k) - imin
3201 c[k-kmin] = num.sum(
3202 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3204 return c
3207def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3208 '''
3209 Get range of lags for which :py:func:`numpy.correlate` produces values.
3210 '''
3212 a = num.asarray(a)
3213 b = num.asarray(b)
3215 kmin = -(b.size-1)
3216 if mode == 'full':
3217 klen = a.size-kmin
3218 elif mode == 'same':
3219 klen = max(a.size, b.size)
3220 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3221 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3222 elif mode == 'valid':
3223 klen = abs(a.size - b.size) + 1
3224 kmin += min(a.size, b.size) - 1
3226 return kmin, kmin + klen - 1
3229def autocorr(x, nshifts):
3230 '''
3231 Compute biased estimate of the first autocorrelation coefficients.
3233 :param x: input array
3234 :param nshifts: number of coefficients to calculate
3235 '''
3237 mean = num.mean(x)
3238 std = num.std(x)
3239 n = x.size
3240 xdm = x - mean
3241 r = num.zeros(nshifts)
3242 for k in range(nshifts):
3243 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3245 return r
3248def yulewalker(x, order):
3249 '''
3250 Compute autoregression coefficients using Yule-Walker method.
3252 :param x: input array
3253 :param order: number of coefficients to produce
3255 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3256 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3257 recursion which is normally used.
3258 '''
3260 gamma = autocorr(x, order+1)
3261 d = gamma[1:1+order]
3262 a = num.zeros((order, order))
3263 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3264 for i in range(order):
3265 ioff = order-i
3266 a[i, :] = gamma2[ioff:ioff+order]
3268 return num.dot(num.linalg.inv(a), -d)
3271def moving_avg(x, n):
3272 n = int(n)
3273 cx = x.cumsum()
3274 nn = len(x)
3275 y = num.zeros(nn, dtype=cx.dtype)
3276 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3277 y[:n//2] = y[n//2]
3278 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3279 return y
3282def moving_sum(x, n, mode='valid'):
3283 n = int(n)
3284 cx = x.cumsum()
3285 nn = len(x)
3287 if mode == 'valid':
3288 if nn-n+1 <= 0:
3289 return num.zeros(0, dtype=cx.dtype)
3290 y = num.zeros(nn-n+1, dtype=cx.dtype)
3291 y[0] = cx[n-1]
3292 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3294 if mode == 'full':
3295 y = num.zeros(nn+n-1, dtype=cx.dtype)
3296 if n <= nn:
3297 y[0:n] = cx[0:n]
3298 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3299 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3300 else:
3301 y[0:nn] = cx[0:nn]
3302 y[nn:n] = cx[nn-1]
3303 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3305 if mode == 'same':
3306 n1 = (n-1)//2
3307 y = num.zeros(nn, dtype=cx.dtype)
3308 if n <= nn:
3309 y[0:n-n1] = cx[n1:n]
3310 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3311 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3312 else:
3313 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3314 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3315 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3317 return y
3320def nextpow2(i):
3321 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3324def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3325 def snap(x):
3326 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3327 return snap
3330def snapper(nmax, delta, snapfun=math.ceil):
3331 def snap(x):
3332 return max(0, min(int(snapfun(x/delta)), nmax))
3333 return snap
3336def apply_costaper(a, b, c, d, y, x0, dx):
3337 hi = snapper_w_offset(y.size, x0, dx)
3338 y[:hi(a)] = 0.
3339 y[hi(a):hi(b)] *= 0.5 \
3340 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3341 y[hi(c):hi(d)] *= 0.5 \
3342 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3343 y[hi(d):] = 0.
3346def span_costaper(a, b, c, d, y, x0, dx):
3347 hi = snapper_w_offset(y.size, x0, dx)
3348 return hi(a), hi(d) - hi(a)
3351def costaper(a, b, c, d, nfreqs, deltaf):
3352 hi = snapper(nfreqs, deltaf)
3353 tap = num.zeros(nfreqs)
3354 tap[hi(a):hi(b)] = 0.5 \
3355 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3356 tap[hi(b):hi(c)] = 1.
3357 tap[hi(c):hi(d)] = 0.5 \
3358 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3360 return tap
3363def t2ind(t, tdelta, snap=round):
3364 return int(snap(t/tdelta))
3367def hilbert(x, N=None):
3368 '''
3369 Return the hilbert transform of x of length N.
3371 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3372 '''
3374 x = num.asarray(x)
3375 if N is None:
3376 N = len(x)
3377 if N <= 0:
3378 raise ValueError("N must be positive.")
3379 if num.iscomplexobj(x):
3380 logger.warning('imaginary part of x ignored.')
3381 x = num.real(x)
3382 Xf = num.fft.fft(x, N, axis=0)
3383 h = num.zeros(N)
3384 if N % 2 == 0:
3385 h[0] = h[N//2] = 1
3386 h[1:N//2] = 2
3387 else:
3388 h[0] = 1
3389 h[1:(N+1)//2] = 2
3391 if len(x.shape) > 1:
3392 h = h[:, num.newaxis]
3393 x = num.fft.ifft(Xf*h)
3394 return x
3397def near(a, b, eps):
3398 return abs(a-b) < eps
3401def coroutine(func):
3402 def wrapper(*args, **kwargs):
3403 gen = func(*args, **kwargs)
3404 next(gen)
3405 return gen
3407 wrapper.__name__ = func.__name__
3408 wrapper.__dict__ = func.__dict__
3409 wrapper.__doc__ = func.__doc__
3410 return wrapper
3413class States(object):
3414 '''
3415 Utility to store channel-specific state in coroutines.
3416 '''
3418 def __init__(self):
3419 self._states = {}
3421 def get(self, tr):
3422 k = tr.nslc_id
3423 if k in self._states:
3424 tmin, deltat, dtype, value = self._states[k]
3425 if (near(tmin, tr.tmin, deltat/100.)
3426 and near(deltat, tr.deltat, deltat/10000.)
3427 and dtype == tr.ydata.dtype):
3429 return value
3431 return None
3433 def set(self, tr, value):
3434 k = tr.nslc_id
3435 if k in self._states and self._states[k][-1] is not value:
3436 self.free(self._states[k][-1])
3438 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3440 def free(self, value):
3441 pass
3444@coroutine
3445def co_list_append(list):
3446 while True:
3447 list.append((yield))
3450class ScipyBug(Exception):
3451 pass
3454@coroutine
3455def co_lfilter(target, b, a):
3456 '''
3457 Successively filter broken continuous trace data (coroutine).
3459 Create coroutine which takes :py:class:`Trace` objects, filters their data
3460 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3461 objects containing the filtered data to target. This is useful, if one
3462 wants to filter a long continuous time series, which is split into many
3463 successive traces without producing filter artifacts at trace boundaries.
3465 Filter states are kept *per channel*, specifically, for each (network,
3466 station, location, channel) combination occuring in the input traces, a
3467 separate state is created and maintained. This makes it possible to filter
3468 multichannel or multistation data with only one :py:func:`co_lfilter`
3469 instance.
3471 Filter state is reset, when gaps occur.
3473 Use it like this::
3475 from pyrocko.trace import co_lfilter, co_list_append
3477 filtered_traces = []
3478 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3479 for trace in traces:
3480 pipe.send(trace)
3482 pipe.close()
3484 '''
3486 try:
3487 states = States()
3488 output = None
3489 while True:
3490 input = (yield)
3492 zi = states.get(input)
3493 if zi is None:
3494 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3496 output = input.copy(data=False)
3497 try:
3498 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3499 except ValueError:
3500 raise ScipyBug(
3501 'signal.lfilter failed: could be related to a bug '
3502 'in some older scipy versions, e.g. on opensuse42.1')
3504 output.set_ydata(ydata)
3505 states.set(input, zf)
3506 target.send(output)
3508 except GeneratorExit:
3509 target.close()
3512def co_antialias(target, q, n=None, ftype='fir'):
3513 b, a, n = util.decimate_coeffs(q, n, ftype)
3514 anti = co_lfilter(target, b, a)
3515 return anti
3518@coroutine
3519def co_dropsamples(target, q, nfir):
3520 try:
3521 states = States()
3522 while True:
3523 tr = (yield)
3524 newdeltat = q * tr.deltat
3525 ioffset = states.get(tr)
3526 if ioffset is None:
3527 # for fir filter, the first nfir samples are pulluted by
3528 # boundary effects; cut it off.
3529 # for iir this may be (much) more, we do not correct for that.
3530 # put sample instances to a time which is a multiple of the
3531 # new sampling interval.
3532 newtmin_want = math.ceil(
3533 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3534 - (nfir/2*tr.deltat)
3535 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3536 if ioffset < 0:
3537 ioffset = ioffset % q
3539 newtmin_have = tr.tmin + ioffset * tr.deltat
3540 newtr = tr.copy(data=False)
3541 newtr.deltat = newdeltat
3542 # because the fir kernel shifts data by nfir/2 samples:
3543 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3544 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3545 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3546 target.send(newtr)
3548 except GeneratorExit:
3549 target.close()
3552def co_downsample(target, q, n=None, ftype='fir'):
3553 '''
3554 Successively downsample broken continuous trace data (coroutine).
3556 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3557 data and sends new :py:class:`Trace` objects containing the downsampled
3558 data to target. This is useful, if one wants to downsample a long
3559 continuous time series, which is split into many successive traces without
3560 producing filter artifacts and gaps at trace boundaries.
3562 Filter states are kept *per channel*, specifically, for each (network,
3563 station, location, channel) combination occuring in the input traces, a
3564 separate state is created and maintained. This makes it possible to filter
3565 multichannel or multistation data with only one :py:func:`co_lfilter`
3566 instance.
3568 Filter state is reset, when gaps occur. The sampling instances are choosen
3569 so that they occur at (or as close as possible) to even multiples of the
3570 sampling interval of the downsampled trace (based on system time).
3571 '''
3573 b, a, n = util.decimate_coeffs(q, n, ftype)
3574 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3577@coroutine
3578def co_downsample_to(target, deltat):
3580 decimators = {}
3581 try:
3582 while True:
3583 tr = (yield)
3584 ratio = deltat / tr.deltat
3585 rratio = round(ratio)
3586 if abs(rratio - ratio)/ratio > 0.0001:
3587 raise util.UnavailableDecimation('ratio = %g' % ratio)
3589 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3590 if deci_seq not in decimators:
3591 pipe = target
3592 for q in deci_seq[::-1]:
3593 pipe = co_downsample(pipe, q)
3595 decimators[deci_seq] = pipe
3597 decimators[deci_seq].send(tr)
3599 except GeneratorExit:
3600 for g in decimators.values():
3601 g.close()
3604class DomainChoice(StringChoice):
3605 choices = [
3606 'time_domain',
3607 'frequency_domain',
3608 'envelope',
3609 'absolute',
3610 'cc_max_norm']
3613class MisfitSetup(Object):
3614 '''
3615 Contains misfit setup to be used in :py:func:`trace.misfit`
3617 :param description: Description of the setup
3618 :param norm: L-norm classifier
3619 :param taper: Object of :py:class:`Taper`
3620 :param filter: Object of :py:class:`FrequencyResponse`
3621 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3622 'cc_max_norm']
3624 Can be dumped to a yaml file.
3625 '''
3627 xmltagname = 'misfitsetup'
3628 description = String.T(optional=True)
3629 norm = Int.T(optional=False)
3630 taper = Taper.T(optional=False)
3631 filter = FrequencyResponse.T(optional=True)
3632 domain = DomainChoice.T(default='time_domain')
3635def equalize_sampling_rates(trace_1, trace_2):
3636 '''
3637 Equalize sampling rates of two traces (reduce higher sampling rate to
3638 lower).
3640 :param trace_1: :py:class:`Trace` object
3641 :param trace_2: :py:class:`Trace` object
3643 Returns a copy of the resampled trace if resampling is needed.
3644 '''
3646 if same_sampling_rate(trace_1, trace_2):
3647 return trace_1, trace_2
3649 if trace_1.deltat < trace_2.deltat:
3650 t1_out = trace_1.copy()
3651 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3652 logger.debug('Trace downsampled (return copy of trace): %s'
3653 % '.'.join(t1_out.nslc_id))
3654 return t1_out, trace_2
3656 elif trace_1.deltat > trace_2.deltat:
3657 t2_out = trace_2.copy()
3658 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3659 logger.debug('Trace downsampled (return copy of trace): %s'
3660 % '.'.join(t2_out.nslc_id))
3661 return trace_1, t2_out
3664def Lx_norm(u, v, norm=2):
3665 '''
3666 Calculate the misfit denominator *m* and the normalization devisor *n*
3667 according to norm.
3669 The normalization divisor *n* is calculated from ``v``.
3671 :param u: :py:class:`numpy.array`
3672 :param v: :py:class:`numpy.array`
3673 :param norm: (default = 2)
3675 ``u`` and ``v`` must be of same size.
3676 '''
3678 if norm == 1:
3679 return (
3680 num.sum(num.abs(v-u)),
3681 num.sum(num.abs(v)))
3683 elif norm == 2:
3684 return (
3685 num.sqrt(num.sum((v-u)**2)),
3686 num.sqrt(num.sum(v**2)))
3688 else:
3689 return (
3690 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3691 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3694def do_downsample(tr, deltat):
3695 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3696 tr = tr.copy()
3697 tr.downsample_to(deltat, snap=True, demean=False)
3698 else:
3699 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3700 tr = tr.copy()
3701 tr.snap()
3702 return tr
3705def do_extend(tr, tmin, tmax):
3706 if tmin < tr.tmin or tmax > tr.tmax:
3707 tr = tr.copy()
3708 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3710 return tr
3713def do_pre_taper(tr, taper):
3714 return tr.taper(taper, inplace=False, chop=True)
3717def do_fft(tr, filter):
3718 if filter is None:
3719 return tr
3720 else:
3721 ndata = tr.ydata.size
3722 nfft = nextpow2(ndata)
3723 padded = num.zeros(nfft, dtype=float)
3724 padded[:ndata] = tr.ydata
3725 spectrum = num.fft.rfft(padded)
3726 df = 1.0 / (tr.deltat * nfft)
3727 frequencies = num.arange(spectrum.size)*df
3728 return [tr, frequencies, spectrum]
3731def do_filter(inp, filter):
3732 if filter is None:
3733 return inp
3734 else:
3735 tr, frequencies, spectrum = inp
3736 spectrum *= filter.evaluate(frequencies)
3737 return [tr, frequencies, spectrum]
3740def do_ifft(inp):
3741 if isinstance(inp, Trace):
3742 return inp
3743 else:
3744 tr, _, spectrum = inp
3745 ndata = tr.ydata.size
3746 tr = tr.copy(data=False)
3747 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3748 return tr
3751def check_alignment(t1, t2):
3752 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3753 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3754 t1.ydata.shape != t2.ydata.shape:
3755 raise MisalignedTraces(
3756 'Cannot calculate misfit of %s and %s due to misaligned '
3757 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))