1# https://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
7This module provides basic signal processing for seismic traces.
8'''
10import time
11import math
12import copy
13import logging
14import hashlib
15from collections import defaultdict
17import numpy as num
18from scipy import signal
20from pyrocko import util, orthodrome, pchain, model
21from pyrocko.util import reuse
22from pyrocko.guts import Object, Float, Int, String, List, \
23 StringChoice, Timestamp
24from pyrocko.guts_array import Array
25from pyrocko.model import squirrel_content
27# backward compatibility
28from pyrocko.util import UnavailableDecimation # noqa
29from pyrocko.response import ( # noqa
30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
31 ButterworthResponse, SampledResponse, IntegrationResponse,
32 DifferentiationResponse, MultiplyResponse)
35guts_prefix = 'pf'
37logger = logging.getLogger('pyrocko.trace')
40@squirrel_content
41class Trace(Object):
43 '''
44 Create new trace object.
46 A ``Trace`` object represents a single continuous strip of evenly sampled
47 time series data. It is built from a 1D NumPy array containing the data
48 samples and some attributes describing its beginning and ending time, its
49 sampling rate and four string identifiers (its network, station, location
50 and channel code).
52 :param network: network code
53 :param station: station code
54 :param location: location code
55 :param channel: channel code
56 :param extra: extra code
57 :param tmin: system time of first sample in [s]
58 :param tmax: system time of last sample in [s] (if set to ``None`` it is
59 computed from length of ``ydata``)
60 :param deltat: sampling interval in [s]
61 :param ydata: 1D numpy array with data samples (can be ``None`` when
62 ``tmax`` is not ``None``)
63 :param mtime: optional modification time
65 :param meta: additional meta information (not used, but maintained by the
66 library)
68 The length of the network, station, location and channel codes is not
69 resricted by this software, but data formats like SAC, Mini-SEED or GSE
70 have different limits on the lengths of these codes. The codes set here are
71 silently truncated when the trace is stored
72 '''
74 network = String.T(default='', help='Deployment/network code (1-8)')
75 station = String.T(default='STA', help='Station code (1-5)')
76 location = String.T(default='', help='Location code (0-2)')
77 channel = String.T(default='', help='Channel code (3)')
78 extra = String.T(default='', help='Extra/custom code')
80 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
81 tmax = Timestamp.T()
83 deltat = Float.T(default=1.0)
84 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
86 mtime = Timestamp.T(optional=True)
88 cached_frequencies = {}
90 def __init__(self, network='', station='STA', location='', channel='',
91 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
92 meta=None, extra=''):
94 Object.__init__(self, init_props=False)
96 time_float = util.get_time_float()
98 if not isinstance(tmin, time_float):
99 tmin = Trace.tmin.regularize_extra(tmin)
101 if tmax is not None and not isinstance(tmax, time_float):
102 tmax = Trace.tmax.regularize_extra(tmax)
104 if mtime is not None and not isinstance(mtime, time_float):
105 mtime = Trace.mtime.regularize_extra(mtime)
107 if ydata is not None and not isinstance(ydata, num.ndarray):
108 ydata = Trace.ydata.regularize_extra(ydata)
110 self._growbuffer = None
112 tmin = time_float(tmin)
113 if tmax is not None:
114 tmax = time_float(tmax)
116 if mtime is None:
117 mtime = time_float(time.time())
119 self.network, self.station, self.location, self.channel, \
120 self.extra = [
121 reuse(x) for x in (
122 network, station, location, channel, extra)]
124 self.tmin = tmin
125 self.deltat = deltat
127 if tmax is None:
128 if ydata is not None:
129 self.tmax = self.tmin + (ydata.size-1)*self.deltat
130 else:
131 raise Exception(
132 'fixme: trace must be created with tmax or ydata')
133 else:
134 n = int(round((tmax - self.tmin) / self.deltat)) + 1
135 self.tmax = self.tmin + (n - 1) * self.deltat
137 self.meta = meta
138 self.ydata = ydata
139 self.mtime = mtime
140 self._update_ids()
141 self.file = None
142 self._pchain = None
144 def __str__(self):
145 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
146 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
147 s += ' timerange: %s - %s\n' % (
148 util.time_to_str(self.tmin, format=fmt),
149 util.time_to_str(self.tmax, format=fmt))
151 s += ' delta t: %g\n' % self.deltat
152 if self.meta:
153 for k in sorted(self.meta.keys()):
154 s += ' %s: %s\n' % (k, self.meta[k])
155 return s
157 @property
158 def codes(self):
159 from pyrocko.squirrel import model
160 return model.CodesNSLCE(
161 self.network, self.station, self.location, self.channel,
162 self.extra)
164 @property
165 def time_span(self):
166 return self.tmin, self.tmax
168 @property
169 def summary_entries(self):
170 return (
171 self.__class__.__name__,
172 str(self.codes),
173 self.str_time_span,
174 '%g' % (1.0/self.deltat))
176 @property
177 def summary(self):
178 return util.fmt_summary(self.summary_entries, (10, 20, 55, 0))
180 def __getstate__(self):
181 return (self.network, self.station, self.location, self.channel,
182 self.tmin, self.tmax, self.deltat, self.mtime,
183 self.ydata, self.meta, self.extra)
185 def __setstate__(self, state):
186 if len(state) == 11:
187 self.network, self.station, self.location, self.channel, \
188 self.tmin, self.tmax, self.deltat, self.mtime, \
189 self.ydata, self.meta, self.extra = state
191 elif len(state) == 12:
192 # backward compatibility 0
193 self.network, self.station, self.location, self.channel, \
194 self.tmin, self.tmax, self.deltat, self.mtime, \
195 self.ydata, self.meta, _, self.extra = state
197 elif len(state) == 10:
198 # backward compatibility 1
199 self.network, self.station, self.location, self.channel, \
200 self.tmin, self.tmax, self.deltat, self.mtime, \
201 self.ydata, self.meta = state
203 self.extra = ''
205 else:
206 # backward compatibility 2
207 self.network, self.station, self.location, self.channel, \
208 self.tmin, self.tmax, self.deltat, self.mtime = state
209 self.ydata = None
210 self.meta = None
211 self.extra = ''
213 self._growbuffer = None
214 self._update_ids()
216 def hash(self, unsafe=False):
217 sha1 = hashlib.sha1()
218 for code in self.nslc_id:
219 sha1.update(code.encode())
220 sha1.update(self.tmin.hex().encode())
221 sha1.update(self.tmax.hex().encode())
222 sha1.update(self.deltat.hex().encode())
224 if self.ydata is not None:
225 if not unsafe:
226 sha1.update(memoryview(self.ydata))
227 else:
228 sha1.update(memoryview(self.ydata[:32]))
229 sha1.update(memoryview(self.ydata[-32:]))
231 return sha1.hexdigest()
233 def name(self):
234 '''
235 Get a short string description.
236 '''
238 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
239 util.time_to_str(self.tmin),
240 util.time_to_str(self.tmax)))
242 return s
244 def __eq__(self, other):
245 return (
246 isinstance(other, Trace)
247 and self.network == other.network
248 and self.station == other.station
249 and self.location == other.location
250 and self.channel == other.channel
251 and (abs(self.deltat - other.deltat)
252 < (self.deltat + other.deltat)*1e-6)
253 and abs(self.tmin-other.tmin) < self.deltat*0.01
254 and abs(self.tmax-other.tmax) < self.deltat*0.01
255 and num.all(self.ydata == other.ydata))
257 def almost_equal(self, other, rtol=1e-5, atol=0.0):
258 return (
259 self.network == other.network
260 and self.station == other.station
261 and self.location == other.location
262 and self.channel == other.channel
263 and (abs(self.deltat - other.deltat)
264 < (self.deltat + other.deltat)*1e-6)
265 and abs(self.tmin-other.tmin) < self.deltat*0.01
266 and abs(self.tmax-other.tmax) < self.deltat*0.01
267 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
269 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
271 assert self.network == other.network, \
272 'network codes differ: %s, %s' % (self.network, other.network)
273 assert self.station == other.station, \
274 'station codes differ: %s, %s' % (self.station, other.station)
275 assert self.location == other.location, \
276 'location codes differ: %s, %s' % (self.location, other.location)
277 assert self.channel == other.channel, 'channel codes differ'
278 assert (abs(self.deltat - other.deltat)
279 < (self.deltat + other.deltat)*1e-6), \
280 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
281 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
282 'start times differ: %s, %s' % (
283 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
284 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
285 'end times differ: %s, %s' % (
286 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
288 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
289 'trace values differ'
291 def __hash__(self):
292 return id(self)
294 def __call__(self, t, clip=False, snap=round):
295 it = int(snap((t-self.tmin)/self.deltat))
296 if clip:
297 it = max(0, min(it, self.ydata.size-1))
298 else:
299 if it < 0 or self.ydata.size <= it:
300 raise IndexError()
302 return self.tmin+it*self.deltat, self.ydata[it]
304 def interpolate(self, t, clip=False):
305 '''
306 Value of trace between supporting points through linear interpolation.
308 :param t: time instant
309 :param clip: whether to clip indices to trace ends
310 '''
312 t0, y0 = self(t, clip=clip, snap=math.floor)
313 t1, y1 = self(t, clip=clip, snap=math.ceil)
314 if t0 == t1:
315 return y0
316 else:
317 return y0+(t-t0)/(t1-t0)*(y1-y0)
319 def index_clip(self, i):
320 '''
321 Clip index to valid range.
322 '''
324 return min(max(0, i), self.ydata.size)
326 def add(self, other, interpolate=True, left=0., right=0.):
327 '''
328 Add values of other trace (self += other).
330 Add values of ``other`` trace to the values of ``self``, where it
331 intersects with ``other``. This method does not change the extent of
332 ``self``. If ``interpolate`` is ``True`` (the default), the values of
333 ``other`` to be added are interpolated at sampling instants of
334 ``self``. Linear interpolation is performed. In this case the sampling
335 rate of ``other`` must be equal to or lower than that of ``self``. If
336 ``interpolate`` is ``False``, the sampling rates of the two traces must
337 match.
338 '''
340 if interpolate:
341 assert self.deltat <= other.deltat \
342 or same_sampling_rate(self, other)
344 other_xdata = other.get_xdata()
345 xdata = self.get_xdata()
346 self.ydata += num.interp(
347 xdata, other_xdata, other.ydata, left=left, right=left)
348 else:
349 assert self.deltat == other.deltat
350 ioff = int(round((other.tmin-self.tmin)/self.deltat))
351 ibeg = max(0, ioff)
352 iend = min(self.data_len(), ioff+other.data_len())
353 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
355 def mult(self, other, interpolate=True):
356 '''
357 Muliply with values of other trace ``(self *= other)``.
359 Multiply values of ``other`` trace to the values of ``self``, where it
360 intersects with ``other``. This method does not change the extent of
361 ``self``. If ``interpolate`` is ``True`` (the default), the values of
362 ``other`` to be multiplied are interpolated at sampling instants of
363 ``self``. Linear interpolation is performed. In this case the sampling
364 rate of ``other`` must be equal to or lower than that of ``self``. If
365 ``interpolate`` is ``False``, the sampling rates of the two traces must
366 match.
367 '''
369 if interpolate:
370 assert self.deltat <= other.deltat or \
371 same_sampling_rate(self, other)
373 other_xdata = other.get_xdata()
374 xdata = self.get_xdata()
375 self.ydata *= num.interp(
376 xdata, other_xdata, other.ydata, left=0., right=0.)
377 else:
378 assert self.deltat == other.deltat
379 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
380 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
381 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
382 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
384 ibeg1 = self.index_clip(ibeg1)
385 iend1 = self.index_clip(iend1)
386 ibeg2 = self.index_clip(ibeg2)
387 iend2 = self.index_clip(iend2)
389 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
391 def max(self):
392 '''
393 Get time and value of data maximum.
394 '''
396 i = num.argmax(self.ydata)
397 return self.tmin + i*self.deltat, self.ydata[i]
399 def min(self):
400 '''
401 Get time and value of data minimum.
402 '''
404 i = num.argmin(self.ydata)
405 return self.tmin + i*self.deltat, self.ydata[i]
407 def absmax(self):
408 '''
409 Get time and value of maximum of the absolute of data.
410 '''
412 tmi, mi = self.min()
413 tma, ma = self.max()
414 if abs(mi) > abs(ma):
415 return tmi, abs(mi)
416 else:
417 return tma, abs(ma)
419 def set_codes(
420 self, network=None, station=None, location=None, channel=None,
421 extra=None):
423 '''
424 Set network, station, location, and channel codes.
425 '''
427 if network is not None:
428 self.network = network
429 if station is not None:
430 self.station = station
431 if location is not None:
432 self.location = location
433 if channel is not None:
434 self.channel = channel
435 if extra is not None:
436 self.extra = extra
438 self._update_ids()
440 def set_network(self, network):
441 self.network = network
442 self._update_ids()
444 def set_station(self, station):
445 self.station = station
446 self._update_ids()
448 def set_location(self, location):
449 self.location = location
450 self._update_ids()
452 def set_channel(self, channel):
453 self.channel = channel
454 self._update_ids()
456 def overlaps(self, tmin, tmax):
457 '''
458 Check if trace has overlap with a given time span.
459 '''
460 return not (tmax < self.tmin or self.tmax < tmin)
462 def is_relevant(self, tmin, tmax, selector=None):
463 '''
464 Check if trace has overlap with a given time span and matches a
465 condition callback. (internal use)
466 '''
468 return not (tmax <= self.tmin or self.tmax < tmin) \
469 and (selector is None or selector(self))
471 def _update_ids(self):
472 '''
473 Update dependent ids.
474 '''
476 self.full_id = (
477 self.network, self.station, self.location, self.channel, self.tmin)
478 self.nslc_id = reuse(
479 (self.network, self.station, self.location, self.channel))
481 def prune_from_reuse_cache(self):
482 util.deuse(self.nslc_id)
483 util.deuse(self.network)
484 util.deuse(self.station)
485 util.deuse(self.location)
486 util.deuse(self.channel)
488 def set_mtime(self, mtime):
489 '''
490 Set modification time of the trace.
491 '''
493 self.mtime = mtime
495 def get_xdata(self):
496 '''
497 Create array for time axis.
498 '''
500 if self.ydata is None:
501 raise NoData()
503 return self.tmin \
504 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
506 def get_ydata(self):
507 '''
508 Get data array.
509 '''
511 if self.ydata is None:
512 raise NoData()
514 return self.ydata
516 def set_ydata(self, new_ydata):
517 '''
518 Replace data array.
519 '''
521 self.drop_growbuffer()
522 self.ydata = new_ydata
523 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
525 def data_len(self):
526 if self.ydata is not None:
527 return self.ydata.size
528 else:
529 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
531 def drop_data(self):
532 '''
533 Forget data, make dataless trace.
534 '''
536 self.drop_growbuffer()
537 self.ydata = None
539 def drop_growbuffer(self):
540 '''
541 Detach the traces grow buffer.
542 '''
544 self._growbuffer = None
545 self._pchain = None
547 def copy(self, data=True):
548 '''
549 Make a deep copy of the trace.
550 '''
552 tracecopy = copy.copy(self)
553 tracecopy.drop_growbuffer()
554 if data:
555 tracecopy.ydata = self.ydata.copy()
556 tracecopy.meta = copy.deepcopy(self.meta)
557 return tracecopy
559 def crop_zeros(self):
560 '''
561 Remove any zeros at beginning and end.
562 '''
564 indices = num.where(self.ydata != 0.0)[0]
565 if indices.size == 0:
566 raise NoData()
568 ibeg = indices[0]
569 iend = indices[-1]+1
570 if ibeg == 0 and iend == self.ydata.size-1:
571 return
573 self.drop_growbuffer()
574 self.ydata = self.ydata[ibeg:iend].copy()
575 self.tmin = self.tmin+ibeg*self.deltat
576 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
577 self._update_ids()
579 def append(self, data):
580 '''
581 Append data to the end of the trace.
583 To make this method efficient when successively very few or even single
584 samples are appended, a larger grow buffer is allocated upon first
585 invocation. The traces data is then changed to be a view into the
586 currently filled portion of the grow buffer array.
587 '''
589 assert self.ydata.dtype == data.dtype
590 newlen = data.size + self.ydata.size
591 if self._growbuffer is None or self._growbuffer.size < newlen:
592 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
593 self._growbuffer[:self.ydata.size] = self.ydata
594 self._growbuffer[self.ydata.size:newlen] = data
595 self.ydata = self._growbuffer[:newlen]
596 self.tmax = self.tmin + (newlen-1)*self.deltat
598 def chop(
599 self, tmin, tmax, inplace=True, include_last=False,
600 snap=(round, round), want_incomplete=True):
602 '''
603 Cut the trace to given time span.
605 If the ``inplace`` argument is True (the default) the trace is cut in
606 place, otherwise a new trace with the cut part is returned. By
607 default, the indices where to start and end the trace data array are
608 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
609 using Python's :py:func:`round` function. This behaviour can be changed
610 with the ``snap`` argument, which takes a tuple of two functions (one
611 for the lower and one for the upper end) to be used instead of
612 :py:func:`round`. The last sample is by default not included unless
613 ``include_last`` is set to True. If the given time span exceeds the
614 available time span of the trace, the available part is returned,
615 unless ``want_incomplete`` is set to False - in that case, a
616 :py:exc:`NoData` exception is raised. This exception is always raised,
617 when the requested time span does dot overlap with the trace's time
618 span.
619 '''
621 if want_incomplete:
622 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
623 raise NoData()
624 else:
625 if tmin < self.tmin or self.tmax < tmax:
626 raise NoData()
628 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
629 iplus = 0
630 if include_last:
631 iplus = 1
633 iend = min(
634 self.data_len(),
635 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
637 if ibeg >= iend:
638 raise NoData()
640 obj = self
641 if not inplace:
642 obj = self.copy(data=False)
644 self.drop_growbuffer()
645 if self.ydata is not None:
646 obj.ydata = self.ydata[ibeg:iend].copy()
647 else:
648 obj.ydata = None
650 obj.tmin = obj.tmin+ibeg*obj.deltat
651 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
653 obj._update_ids()
655 return obj
657 def downsample(self, ndecimate, snap=False, initials=None, demean=False,
658 ftype='fir-remez'):
659 '''
660 Downsample trace by a given integer factor.
662 Antialiasing filter details:
664 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum
665 ripple of 0.05 dB in the passband.
666 * ``fir``: A FIR filter using a Hamming window and 31 taps and a
667 well-behaved phase response.
668 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm
669 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency.
671 Comparison of the digital filters:
673 .. figure :: ../../static/downsampling-filter-comparison.png
674 :width: 60%
675 :alt: Comparison of the downsampling filters.
677 :param ndecimate: decimation factor, avoid values larger than 8
678 :param snap: whether to put the new sampling instances closest to
679 multiples of the sampling rate.
680 :param initials: ``None``, ``True``, or initial conditions for the
681 anti-aliasing filter, obtained from a previous run. In the latter
682 two cases the final state of the filter is returned instead of
683 ``None``.
684 :param demean: whether to demean the signal before filtering.
685 :param ftype: which FIR filter to use, choose from
686 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``.
687 '''
688 newdeltat = self.deltat*ndecimate
689 if snap:
690 ilag = int(round(
691 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
692 / self.deltat))
693 else:
694 ilag = 0
696 if snap and ilag > 0 and ilag < self.ydata.size:
697 data = self.ydata.astype(num.float64)
698 self.tmin += ilag*self.deltat
699 else:
700 data = self.ydata.astype(num.float64)
702 if demean:
703 data -= num.mean(data)
705 if data.size != 0:
706 result = util.decimate(
707 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag)
708 else:
709 result = data
711 if initials is None:
712 self.ydata, finals = result, None
713 else:
714 self.ydata, finals = result
716 self.deltat = reuse(self.deltat*ndecimate)
717 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
718 self._update_ids()
720 return finals
722 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
723 initials=None, demean=False, ftype='fir-remez'):
725 '''
726 Downsample to given sampling rate.
728 Tries to downsample the trace to a target sampling interval of
729 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
730 times. If ``allow_upsample_max`` is set to a value larger than 1,
731 intermediate upsampling steps are allowed, in order to increase the
732 number of possible downsampling ratios.
734 If the requested ratio is not supported, an exception of type
735 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
737 :param deltat: desired sampling interval in [s]
738 :param allow_upsample_max: maximum upsampling of the signal to achieve
739 the desired deltat. Default is ``1``.
740 :param snap: whether to put the new sampling instances closest to
741 multiples of the sampling rate.
742 :param initials: ``None``, ``True``, or initial conditions for the
743 anti-aliasing filter, obtained from a previous run. In the latter
744 two cases the final state of the filter is returned instead of
745 ``None``.
746 :param demean: whether to demean the signal before filtering.
747 :param ftype: which FIR filter to use, choose from
748 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``.
749 See also: :meth:`Trace.downample`
750 '''
752 ratio = deltat/self.deltat
753 rratio = round(ratio)
755 ok = False
756 for upsratio in range(1, allow_upsample_max+1):
757 dratio = (upsratio/self.deltat) / (1./deltat)
758 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
759 util.decitab(int(round(dratio))):
761 ok = True
762 break
764 if not ok:
765 raise util.UnavailableDecimation('ratio = %g' % ratio)
767 if upsratio > 1:
768 self.drop_growbuffer()
769 ydata = self.ydata
770 self.ydata = num.zeros(
771 ydata.size*upsratio-(upsratio-1), ydata.dtype)
772 self.ydata[::upsratio] = ydata
773 for i in range(1, upsratio):
774 self.ydata[i::upsratio] = \
775 float(i)/upsratio * ydata[:-1] \
776 + float(upsratio-i)/upsratio * ydata[1:]
777 self.deltat = self.deltat/upsratio
779 ratio = deltat/self.deltat
780 rratio = round(ratio)
782 deci_seq = util.decitab(int(rratio))
783 finals = []
784 for i, ndecimate in enumerate(deci_seq):
785 if ndecimate != 1:
786 xinitials = None
787 if initials is not None:
788 xinitials = initials[i]
789 finals.append(self.downsample(
790 ndecimate, snap=snap, initials=xinitials, demean=demean,
791 ftype=ftype))
793 if initials is not None:
794 return finals
796 def resample(self, deltat):
797 '''
798 Resample to given sampling rate ``deltat``.
800 Resampling is performed in the frequency domain.
801 '''
803 ndata = self.ydata.size
804 ntrans = nextpow2(ndata)
805 fntrans2 = ntrans * self.deltat/deltat
806 ntrans2 = int(round(fntrans2))
807 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
808 ndata2 = int(round(ndata*self.deltat/deltat2))
809 if abs(fntrans2 - ntrans2) > 1e-7:
810 logger.warning(
811 'resample: requested deltat %g could not be matched exactly: '
812 '%g' % (deltat, deltat2))
814 data = self.ydata
815 data_pad = num.zeros(ntrans, dtype=float)
816 data_pad[:ndata] = data
817 fdata = num.fft.rfft(data_pad)
818 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
819 n = min(fdata.size, fdata2.size)
820 fdata2[:n] = fdata[:n]
821 data2 = num.fft.irfft(fdata2)
822 data2 = data2[:ndata2]
823 data2 *= float(ntrans2) / float(ntrans)
824 self.deltat = deltat2
825 self.set_ydata(data2)
827 def resample_simple(self, deltat):
828 tyear = 3600*24*365.
830 if deltat == self.deltat:
831 return
833 if abs(self.deltat - deltat) * tyear/deltat < deltat:
834 logger.warning(
835 'resample_simple: less than one sample would have to be '
836 'inserted/deleted per year. Doing nothing.')
837 return
839 ninterval = int(round(deltat / (self.deltat - deltat)))
840 if abs(ninterval) < 20:
841 logger.error(
842 'resample_simple: sample insertion/deletion interval less '
843 'than 20. results would be erroneous.')
844 raise ResamplingFailed()
846 delete = False
847 if ninterval < 0:
848 ninterval = - ninterval
849 delete = True
851 tyearbegin = util.year_start(self.tmin)
853 nmin = int(round((self.tmin - tyearbegin)/deltat))
855 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
856 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
857 if nindices > 0:
858 indices = ibegin + num.arange(nindices) * ninterval
859 data_split = num.split(self.ydata, indices)
860 data = []
861 for ln, h in zip(data_split[:-1], data_split[1:]):
862 if delete:
863 ln = ln[:-1]
865 data.append(ln)
866 if not delete:
867 if ln.size == 0:
868 v = h[0]
869 else:
870 v = 0.5*(ln[-1] + h[0])
871 data.append(num.array([v], dtype=ln.dtype))
873 data.append(data_split[-1])
875 ydata_new = num.concatenate(data)
877 self.tmin = tyearbegin + nmin * deltat
878 self.deltat = deltat
879 self.set_ydata(ydata_new)
881 def stretch(self, tmin_new, tmax_new):
882 '''
883 Stretch signal while preserving sample rate using sinc interpolation.
885 :param tmin_new: new time of first sample
886 :param tmax_new: new time of last sample
888 This method can be used to correct for a small linear time drift or to
889 introduce sub-sample time shifts. The amount of stretching is limited
890 to 10% by the implementation and is expected to be much smaller than
891 that by the approximations used.
892 '''
894 from pyrocko import signal_ext
896 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
897 t_control = num.array([tmin_new, tmax_new], dtype=float)
899 r = (tmax_new - tmin_new) / self.deltat + 1.0
900 n_new = int(round(r))
901 if abs(n_new - r) > 0.001:
902 n_new = int(math.floor(r))
904 assert n_new >= 2
906 tmax_new = tmin_new + (n_new-1) * self.deltat
908 ydata_new = num.empty(n_new, dtype=float)
909 signal_ext.antidrift(i_control, t_control,
910 self.ydata.astype(float),
911 tmin_new, self.deltat, ydata_new)
913 self.tmin = tmin_new
914 self.set_ydata(ydata_new)
915 self._update_ids()
917 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
918 raise_exception=False):
920 '''
921 Check if a given frequency is above the Nyquist frequency of the trace.
923 :param intro: string used to introduce the warning/error message
924 :param warn: whether to emit a warning
925 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
926 exception.
927 '''
929 if frequency >= 0.5/self.deltat:
930 message = '%s (%g Hz) is equal to or higher than nyquist ' \
931 'frequency (%g Hz). (Trace %s)' \
932 % (intro, frequency, 0.5/self.deltat, self.name())
933 if warn:
934 logger.warning(message)
935 if raise_exception:
936 raise AboveNyquist(message)
938 def lowpass(self, order, corner, nyquist_warn=True,
939 nyquist_exception=False, demean=True):
941 '''
942 Apply Butterworth lowpass to the trace.
944 :param order: order of the filter
945 :param corner: corner frequency of the filter
947 Mean is removed before filtering.
948 '''
950 self.nyquist_check(
951 corner, 'Corner frequency of lowpass', nyquist_warn,
952 nyquist_exception)
954 (b, a) = _get_cached_filter_coefs(
955 order, [corner*2.0*self.deltat], btype='low')
957 if len(a) != order+1 or len(b) != order+1:
958 logger.warning(
959 'Erroneous filter coefficients returned by '
960 'scipy.signal.butter(). You may need to downsample the '
961 'signal before filtering.')
963 data = self.ydata.astype(num.float64)
964 if demean:
965 data -= num.mean(data)
966 self.drop_growbuffer()
967 self.ydata = signal.lfilter(b, a, data)
969 def highpass(self, order, corner, nyquist_warn=True,
970 nyquist_exception=False, demean=True):
972 '''
973 Apply butterworth highpass to the trace.
975 :param order: order of the filter
976 :param corner: corner frequency of the filter
978 Mean is removed before filtering.
979 '''
981 self.nyquist_check(
982 corner, 'Corner frequency of highpass', nyquist_warn,
983 nyquist_exception)
985 (b, a) = _get_cached_filter_coefs(
986 order, [corner*2.0*self.deltat], btype='high')
988 data = self.ydata.astype(num.float64)
989 if len(a) != order+1 or len(b) != order+1:
990 logger.warning(
991 'Erroneous filter coefficients returned by '
992 'scipy.signal.butter(). You may need to downsample the '
993 'signal before filtering.')
994 if demean:
995 data -= num.mean(data)
996 self.drop_growbuffer()
997 self.ydata = signal.lfilter(b, a, data)
999 def bandpass(self, order, corner_hp, corner_lp, demean=True):
1000 '''
1001 Apply butterworth bandpass to the trace.
1003 :param order: order of the filter
1004 :param corner_hp: lower corner frequency of the filter
1005 :param corner_lp: upper corner frequency of the filter
1007 Mean is removed before filtering.
1008 '''
1010 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
1011 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
1012 (b, a) = _get_cached_filter_coefs(
1013 order,
1014 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1015 btype='band')
1016 data = self.ydata.astype(num.float64)
1017 if demean:
1018 data -= num.mean(data)
1019 self.drop_growbuffer()
1020 self.ydata = signal.lfilter(b, a, data)
1022 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1023 '''
1024 Apply bandstop (attenuates frequencies in band) to the trace.
1026 :param order: order of the filter
1027 :param corner_hp: lower corner frequency of the filter
1028 :param corner_lp: upper corner frequency of the filter
1030 Mean is removed before filtering.
1031 '''
1033 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1034 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1035 (b, a) = _get_cached_filter_coefs(
1036 order,
1037 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1038 btype='bandstop')
1039 data = self.ydata.astype(num.float64)
1040 if demean:
1041 data -= num.mean(data)
1042 self.drop_growbuffer()
1043 self.ydata = signal.lfilter(b, a, data)
1045 def envelope(self, inplace=True):
1046 '''
1047 Calculate the envelope of the trace.
1049 :param inplace: calculate envelope in place
1051 The calculation follows:
1053 .. math::
1055 Y' = \\sqrt{Y^2+H(Y)^2}
1057 where H is the Hilbert-Transform of the signal Y.
1058 '''
1060 y = self.ydata.astype(float)
1061 env = num.abs(hilbert(y))
1062 if inplace:
1063 self.drop_growbuffer()
1064 self.ydata = env
1065 else:
1066 tr = self.copy(data=False)
1067 tr.ydata = env
1068 return tr
1070 def taper(self, taperer, inplace=True, chop=False):
1071 '''
1072 Apply a :py:class:`Taper` to the trace.
1074 :param taperer: instance of :py:class:`Taper` subclass
1075 :param inplace: apply taper inplace
1076 :param chop: if ``True``: exclude tapered parts from the resulting
1077 trace
1078 '''
1080 if not inplace:
1081 tr = self.copy()
1082 else:
1083 tr = self
1085 if chop:
1086 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1087 tr.shift(i*tr.deltat)
1088 tr.set_ydata(tr.ydata[i:i+n])
1090 taperer(tr.ydata, tr.tmin, tr.deltat)
1092 if not inplace:
1093 return tr
1095 def whiten(self, order=6):
1096 '''
1097 Whiten signal in time domain using autoregression and recursive filter.
1099 :param order: order of the autoregression process
1100 '''
1102 b, a = self.whitening_coefficients(order)
1103 self.drop_growbuffer()
1104 self.ydata = signal.lfilter(b, a, self.ydata)
1106 def whitening_coefficients(self, order=6):
1107 ar = yulewalker(self.ydata, order)
1108 b, a = [1.] + ar.tolist(), [1.]
1109 return b, a
1111 def ampspec_whiten(
1112 self,
1113 width,
1114 td_taper='auto',
1115 fd_taper='auto',
1116 pad_to_pow2=True,
1117 demean=True):
1119 '''
1120 Whiten signal via frequency domain using moving average on amplitude
1121 spectra.
1123 :param width: width of smoothing kernel [Hz]
1124 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1125 ``None`` or ``'auto'``.
1126 :param fd_taper: frequency domain taper, object of type
1127 :py:class:`Taper` or ``None`` or ``'auto'``.
1128 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1129 of 2^n
1130 :param demean: whether to demean the signal before tapering
1132 The signal is first demeaned and then tapered using ``td_taper``. Then,
1133 the spectrum is calculated and inversely weighted with a smoothed
1134 version of its amplitude spectrum. A moving average is used for the
1135 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1136 Finally, the smoothed and tapered spectrum is back-transformed into the
1137 time domain.
1139 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1140 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1141 '''
1143 ndata = self.data_len()
1145 if pad_to_pow2:
1146 ntrans = nextpow2(ndata)
1147 else:
1148 ntrans = ndata
1150 df = 1./(ntrans*self.deltat)
1151 nw = int(round(width/df))
1152 if ndata//2+1 <= nw:
1153 raise TraceTooShort(
1154 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1156 if td_taper == 'auto':
1157 td_taper = CosFader(1./width)
1159 if fd_taper == 'auto':
1160 fd_taper = CosFader(width)
1162 if td_taper:
1163 self.taper(td_taper)
1165 ydata = self.get_ydata().astype(float)
1166 if demean:
1167 ydata -= ydata.mean()
1169 spec = num.fft.rfft(ydata, ntrans)
1171 amp = num.abs(spec)
1172 nspec = amp.size
1173 csamp = num.cumsum(amp)
1174 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1175 n1, n2 = nw//2, nw//2 + nspec - nw
1176 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1177 amp_smoothed[:n1] = amp_smoothed[n1]
1178 amp_smoothed[n2:] = amp_smoothed[n2-1]
1180 denom = amp_smoothed * amp
1181 numer = amp
1182 eps = num.mean(denom) * 1e-9
1183 if eps == 0.0:
1184 eps = 1e-9
1186 numer += eps
1187 denom += eps
1188 spec *= numer/denom
1190 if fd_taper:
1191 fd_taper(spec, 0., df)
1193 ydata = num.fft.irfft(spec)
1194 self.set_ydata(ydata[:ndata])
1196 def _get_cached_freqs(self, nf, deltaf):
1197 ck = (nf, deltaf)
1198 if ck not in Trace.cached_frequencies:
1199 Trace.cached_frequencies[ck] = deltaf * num.arange(
1200 nf, dtype=float)
1202 return Trace.cached_frequencies[ck]
1204 def bandpass_fft(self, corner_hp, corner_lp):
1205 '''
1206 Apply boxcar bandbpass to trace (in spectral domain).
1207 '''
1209 n = len(self.ydata)
1210 n2 = nextpow2(n)
1211 data = num.zeros(n2, dtype=num.float64)
1212 data[:n] = self.ydata
1213 fdata = num.fft.rfft(data)
1214 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1215 fdata[0] = 0.0
1216 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1217 data = num.fft.irfft(fdata)
1218 self.drop_growbuffer()
1219 self.ydata = data[:n]
1221 def shift(self, tshift):
1222 '''
1223 Time shift the trace.
1224 '''
1226 self.tmin += tshift
1227 self.tmax += tshift
1228 self._update_ids()
1230 def snap(self, inplace=True, interpolate=False):
1231 '''
1232 Shift trace samples to nearest even multiples of the sampling rate.
1234 :param inplace: (boolean) snap traces inplace
1236 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1237 both, the snapped and the original trace is smaller than 0.01 x deltat,
1238 :py:func:`snap` returns the unsnapped instance of the original trace.
1239 '''
1241 tmin = round(self.tmin/self.deltat)*self.deltat
1242 tmax = tmin + (self.ydata.size-1)*self.deltat
1244 if inplace:
1245 xself = self
1246 else:
1247 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1248 abs(self.tmax - tmax) < 1e-2*self.deltat:
1249 return self
1251 xself = self.copy()
1253 if interpolate:
1254 from pyrocko import signal_ext
1255 n = xself.data_len()
1256 ydata_new = num.empty(n, dtype=float)
1257 i_control = num.array([0, n-1], dtype=num.int64)
1258 tref = tmin
1259 t_control = num.array(
1260 [float(xself.tmin-tref), float(xself.tmax-tref)],
1261 dtype=float)
1263 signal_ext.antidrift(i_control, t_control,
1264 xself.ydata.astype(float),
1265 float(tmin-tref), xself.deltat, ydata_new)
1267 xself.ydata = ydata_new
1269 xself.tmin = tmin
1270 xself.tmax = tmax
1271 xself._update_ids()
1273 return xself
1275 def fix_deltat_rounding_errors(self):
1276 '''
1277 Try to undo sampling rate rounding errors.
1279 See :py:func:`fix_deltat_rounding_errors`.
1280 '''
1282 self.deltat = fix_deltat_rounding_errors(self.deltat)
1283 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1285 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1286 '''
1287 Run special STA/LTA filter where the short time window is centered on
1288 the long time window.
1290 :param tshort: length of short time window in [s]
1291 :param tlong: length of long time window in [s]
1292 :param quad: whether to square the data prior to applying the STA/LTA
1293 filter
1294 :param scalingmethod: integer key to select how output values are
1295 scaled / normalized (``1``, ``2``, or ``3``)
1297 ============= ====================================== ===========
1298 Scalingmethod Implementation Range
1299 ============= ====================================== ===========
1300 ``1`` As/Al* Ts/Tl [0,1]
1301 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1302 ``3`` Like ``2`` but clipping range at zero [0,1]
1303 ============= ====================================== ===========
1305 '''
1307 nshort = int(round(tshort/self.deltat))
1308 nlong = int(round(tlong/self.deltat))
1310 assert nshort < nlong
1311 if nlong > len(self.ydata):
1312 raise TraceTooShort(
1313 'Samples in trace: %s, samples needed: %s'
1314 % (len(self.ydata), nlong))
1316 if quad:
1317 sqrdata = self.ydata**2
1318 else:
1319 sqrdata = self.ydata
1321 mavg_short = moving_avg(sqrdata, nshort)
1322 mavg_long = moving_avg(sqrdata, nlong)
1324 self.drop_growbuffer()
1326 if scalingmethod not in (1, 2, 3):
1327 raise Exception('Invalid argument to scalingrange argument.')
1329 if scalingmethod == 1:
1330 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1331 elif scalingmethod in (2, 3):
1332 self.ydata = (mavg_short/mavg_long - 1.) \
1333 / ((float(nlong)/float(nshort)) - 1)
1335 if scalingmethod == 3:
1336 self.ydata = num.maximum(self.ydata, 0.)
1338 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1339 '''
1340 Run special STA/LTA filter where the short time window is overlapping
1341 with the last part of the long time window.
1343 :param tshort: length of short time window in [s]
1344 :param tlong: length of long time window in [s]
1345 :param quad: whether to square the data prior to applying the STA/LTA
1346 filter
1347 :param scalingmethod: integer key to select how output values are
1348 scaled / normalized (``1``, ``2``, or ``3``)
1350 ============= ====================================== ===========
1351 Scalingmethod Implementation Range
1352 ============= ====================================== ===========
1353 ``1`` As/Al* Ts/Tl [0,1]
1354 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1355 ``3`` Like ``2`` but clipping range at zero [0,1]
1356 ============= ====================================== ===========
1358 With ``scalingmethod=1``, the values produced by this variant of the
1359 STA/LTA are equivalent to
1361 .. math::
1362 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1363 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1365 where :math:`f_j` are the input samples, :math:`s` are the number of
1366 samples in the short time window and :math:`l` are the number of
1367 samples in the long time window.
1368 '''
1370 n = self.data_len()
1371 tmin = self.tmin
1373 nshort = max(1, int(round(tshort/self.deltat)))
1374 nlong = max(1, int(round(tlong/self.deltat)))
1376 assert nshort < nlong
1378 if nlong > len(self.ydata):
1379 raise TraceTooShort(
1380 'Samples in trace: %s, samples needed: %s'
1381 % (len(self.ydata), nlong))
1383 if quad:
1384 sqrdata = self.ydata**2
1385 else:
1386 sqrdata = self.ydata
1388 nshift = int(math.floor(0.5 * (nlong - nshort)))
1389 if nlong % 2 != 0 and nshort % 2 == 0:
1390 nshift += 1
1392 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1393 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1395 self.drop_growbuffer()
1397 if scalingmethod not in (1, 2, 3):
1398 raise Exception('Invalid argument to scalingrange argument.')
1400 if scalingmethod == 1:
1401 ydata = mavg_short/mavg_long * nshort/nlong
1402 elif scalingmethod in (2, 3):
1403 ydata = (mavg_short/mavg_long - 1.) \
1404 / ((float(nlong)/float(nshort)) - 1)
1406 if scalingmethod == 3:
1407 ydata = num.maximum(self.ydata, 0.)
1409 self.set_ydata(ydata)
1411 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1413 self.chop(
1414 tmin + (nlong - nshort) * self.deltat,
1415 tmin + (n - nshort) * self.deltat)
1417 def peaks(self, threshold, tsearch,
1418 deadtime=False,
1419 nblock_duration_detection=100):
1421 '''
1422 Detect peaks above a given threshold (method 1).
1424 From every instant, where the signal rises above ``threshold``, a time
1425 length of ``tsearch`` seconds is searched for a maximum. A list with
1426 tuples (time, value) for each detected peak is returned. The
1427 ``deadtime`` argument turns on a special deadtime duration detection
1428 algorithm useful in combination with recursive STA/LTA filters.
1429 '''
1431 y = self.ydata
1432 above = num.where(y > threshold, 1, 0)
1433 deriv = num.zeros(y.size, dtype=num.int8)
1434 deriv[1:] = above[1:]-above[:-1]
1435 itrig_positions = num.nonzero(deriv > 0)[0]
1436 tpeaks = []
1437 apeaks = []
1438 tzeros = []
1439 tzero = self.tmin
1441 for itrig_pos in itrig_positions:
1442 ibeg = itrig_pos
1443 iend = min(
1444 len(self.ydata),
1445 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1446 ipeak = num.argmax(y[ibeg:iend])
1447 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1448 apeak = y[ibeg+ipeak]
1450 if tpeak < tzero:
1451 continue
1453 if deadtime:
1454 ibeg = itrig_pos
1455 iblock = 0
1456 nblock = nblock_duration_detection
1457 totalsum = 0.
1458 while True:
1459 if ibeg+iblock*nblock >= len(y):
1460 tzero = self.tmin + (len(y)-1) * self.deltat
1461 break
1463 logy = num.log(
1464 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1465 logy[0] += totalsum
1466 ysum = num.cumsum(logy)
1467 totalsum = ysum[-1]
1468 below = num.where(ysum <= 0., 1, 0)
1469 deriv = num.zeros(ysum.size, dtype=num.int8)
1470 deriv[1:] = below[1:]-below[:-1]
1471 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1472 if len(izero_positions) > 0:
1473 tzero = self.tmin + self.deltat * (
1474 ibeg + izero_positions[0])
1475 break
1476 iblock += 1
1477 else:
1478 tzero = ibeg*self.deltat + self.tmin + tsearch
1480 tpeaks.append(tpeak)
1481 apeaks.append(apeak)
1482 tzeros.append(tzero)
1484 if deadtime:
1485 return tpeaks, apeaks, tzeros
1486 else:
1487 return tpeaks, apeaks
1489 def peaks2(self, threshold, tsearch):
1491 '''
1492 Detect peaks above a given threshold (method 2).
1494 This variant of peak detection is a bit more robust (and slower) than
1495 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1496 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1497 iteratively the one with the maximum amplitude ``a[j]`` and time
1498 ``t[j]`` is choosen and potential peaks within
1499 ``t[j] - tsearch, t[j] + tsearch``
1500 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1501 no more potential peaks are left.
1502 '''
1504 a = num.copy(self.ydata)
1506 amin = num.min(a)
1508 a[0] = amin
1509 a[1: -1][num.diff(a, 2) <= 0.] = amin
1510 a[-1] = amin
1512 data = []
1513 while True:
1514 imax = num.argmax(a)
1515 amax = a[imax]
1517 if amax < threshold or amax == amin:
1518 break
1520 data.append((self.tmin + imax * self.deltat, amax))
1522 ntsearch = int(round(tsearch / self.deltat))
1523 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1525 if data:
1526 data.sort()
1527 tpeaks, apeaks = list(zip(*data))
1528 else:
1529 tpeaks, apeaks = [], []
1531 return tpeaks, apeaks
1533 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1534 '''
1535 Extend trace to given span.
1537 :param tmin: begin time of new span
1538 :param tmax: end time of new span
1539 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1540 ``'median'``
1541 '''
1543 nold = self.ydata.size
1545 if tmin is not None:
1546 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1547 else:
1548 nl = 0
1550 if tmax is not None:
1551 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1552 else:
1553 nh = nold - 1
1555 n = nh - nl + 1
1556 data = num.zeros(n, dtype=self.ydata.dtype)
1557 data[-nl:-nl + nold] = self.ydata
1558 if self.ydata.size >= 1:
1559 if fillmethod == 'repeat':
1560 data[:-nl] = self.ydata[0]
1561 data[-nl + nold:] = self.ydata[-1]
1562 elif fillmethod == 'median':
1563 v = num.median(self.ydata)
1564 data[:-nl] = v
1565 data[-nl + nold:] = v
1566 elif fillmethod == 'mean':
1567 v = num.mean(self.ydata)
1568 data[:-nl] = v
1569 data[-nl + nold:] = v
1571 self.drop_growbuffer()
1572 self.ydata = data
1574 self.tmin += nl * self.deltat
1575 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1577 self._update_ids()
1579 def transfer(self,
1580 tfade=0.,
1581 freqlimits=None,
1582 transfer_function=None,
1583 cut_off_fading=True,
1584 demean=True,
1585 invert=False):
1587 '''
1588 Return new trace with transfer function applied (convolution).
1590 :param tfade: rise/fall time in seconds of taper applied in timedomain
1591 at both ends of trace.
1592 :param freqlimits: 4-tuple with corner frequencies in Hz.
1593 :param transfer_function: FrequencyResponse object; must provide a
1594 method 'evaluate(freqs)', which returns the transfer function
1595 coefficients at the frequencies 'freqs'.
1596 :param cut_off_fading: whether to cut off rise/fall interval in output
1597 trace.
1598 :param demean: remove mean before applying transfer function
1599 :param invert: set to True to do a deconvolution
1600 '''
1602 if transfer_function is None:
1603 transfer_function = FrequencyResponse()
1605 if self.tmax - self.tmin <= tfade*2.:
1606 raise TraceTooShort(
1607 'Trace %s.%s.%s.%s too short for fading length setting. '
1608 'trace length = %g, fading length = %g'
1609 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1611 if freqlimits is None and (
1612 transfer_function is None or transfer_function.is_scalar()):
1614 # special case for flat responses
1616 output = self.copy()
1617 data = self.ydata
1618 ndata = data.size
1620 if transfer_function is not None:
1621 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1623 if invert:
1624 c = 1.0/c
1626 data *= c
1628 if tfade != 0.0:
1629 data *= costaper(
1630 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1631 ndata, self.deltat)
1633 output.ydata = data
1635 else:
1636 ndata = self.ydata.size
1637 ntrans = nextpow2(ndata*1.2)
1638 coefs = self._get_tapered_coefs(
1639 ntrans, freqlimits, transfer_function, invert=invert)
1641 data = self.ydata
1643 data_pad = num.zeros(ntrans, dtype=float)
1644 data_pad[:ndata] = data
1645 if demean:
1646 data_pad[:ndata] -= data.mean()
1648 if tfade != 0.0:
1649 data_pad[:ndata] *= costaper(
1650 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1651 ndata, self.deltat)
1653 fdata = num.fft.rfft(data_pad)
1654 fdata *= coefs
1655 ddata = num.fft.irfft(fdata)
1656 output = self.copy()
1657 output.ydata = ddata[:ndata]
1659 if cut_off_fading and tfade != 0.0:
1660 try:
1661 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1662 except NoData:
1663 raise TraceTooShort(
1664 'Trace %s.%s.%s.%s too short for fading length setting. '
1665 'trace length = %g, fading length = %g'
1666 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1667 else:
1668 output.ydata = output.ydata.copy()
1670 return output
1672 def differentiate(self, n=1, order=4, inplace=True):
1673 '''
1674 Approximate first or second derivative of the trace.
1676 :param n: 1 for first derivative, 2 for second
1677 :param order: order of the approximation 2 and 4 are supported
1678 :param inplace: if ``True`` the trace is differentiated in place,
1679 otherwise a new trace object with the derivative is returned.
1681 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1683 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1684 '''
1686 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1688 if inplace:
1689 self.ydata = ddata
1690 else:
1691 output = self.copy(data=False)
1692 output.set_ydata(ddata)
1693 return output
1695 def drop_chain_cache(self):
1696 if self._pchain:
1697 self._pchain.clear()
1699 def init_chain(self):
1700 self._pchain = pchain.Chain(
1701 do_downsample,
1702 do_extend,
1703 do_pre_taper,
1704 do_fft,
1705 do_filter,
1706 do_ifft)
1708 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1709 if setup.domain == 'frequency_domain':
1710 _, _, data = self._pchain(
1711 (self, deltat),
1712 (tmin, tmax),
1713 (setup.taper,),
1714 (setup.filter,),
1715 (setup.filter,),
1716 nocache=nocache)
1718 return num.abs(data), num.abs(data)
1720 else:
1721 processed = self._pchain(
1722 (self, deltat),
1723 (tmin, tmax),
1724 (setup.taper,),
1725 (setup.filter,),
1726 (setup.filter,),
1727 (),
1728 nocache=nocache)
1730 if setup.domain == 'time_domain':
1731 data = processed.get_ydata()
1733 elif setup.domain == 'envelope':
1734 processed = processed.envelope(inplace=False)
1736 elif setup.domain == 'absolute':
1737 processed.set_ydata(num.abs(processed.get_ydata()))
1739 return processed.get_ydata(), processed
1741 def misfit(self, candidate, setup, nocache=False, debug=False):
1742 '''
1743 Calculate misfit and normalization factor against candidate trace.
1745 :param candidate: :py:class:`Trace` object
1746 :param setup: :py:class:`MisfitSetup` object
1747 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1748 normalization divisor
1750 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1751 with the higher sampling rate will be downsampled.
1752 '''
1754 a = self
1755 b = candidate
1757 for tr in (a, b):
1758 if not tr._pchain:
1759 tr.init_chain()
1761 deltat = max(a.deltat, b.deltat)
1762 tmin = min(a.tmin, b.tmin) - deltat
1763 tmax = max(a.tmax, b.tmax) + deltat
1765 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1766 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1768 if setup.domain != 'cc_max_norm':
1769 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1770 else:
1771 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1772 ccmax = ctr.max()[1]
1773 m = 0.5 - 0.5 * ccmax
1774 n = 0.5
1776 if debug:
1777 return m, n, aproc, bproc
1778 else:
1779 return m, n
1781 def spectrum(self, pad_to_pow2=False, tfade=None):
1782 '''
1783 Get FFT spectrum of trace.
1785 :param pad_to_pow2: whether to zero-pad the data to next larger
1786 power-of-two length
1787 :param tfade: ``None`` or a time length in seconds, to apply cosine
1788 shaped tapers to both
1790 :returns: a tuple with (frequencies, values)
1791 '''
1793 ndata = self.ydata.size
1795 if pad_to_pow2:
1796 ntrans = nextpow2(ndata)
1797 else:
1798 ntrans = ndata
1800 if tfade is None:
1801 ydata = self.ydata
1802 else:
1803 ydata = self.ydata * costaper(
1804 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1805 ndata, self.deltat)
1807 fydata = num.fft.rfft(ydata, ntrans)
1808 df = 1./(ntrans*self.deltat)
1809 fxdata = num.arange(len(fydata))*df
1810 return fxdata, fydata
1812 def multi_filter(self, filter_freqs, bandwidth):
1814 class Gauss(FrequencyResponse):
1815 f0 = Float.T()
1816 a = Float.T(default=1.0)
1818 def __init__(self, f0, a=1.0, **kwargs):
1819 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1821 def evaluate(self, freqs):
1822 omega0 = 2.*math.pi*self.f0
1823 omega = 2.*math.pi*freqs
1824 return num.exp(-((omega-omega0)
1825 / (self.a*omega0))**2)
1827 freqs, coefs = self.spectrum()
1828 n = self.data_len()
1829 nfilt = len(filter_freqs)
1830 signal_tf = num.zeros((nfilt, n))
1831 centroid_freqs = num.zeros(nfilt)
1832 for ifilt, f0 in enumerate(filter_freqs):
1833 taper = Gauss(f0, a=bandwidth)
1834 weights = taper.evaluate(freqs)
1835 nhalf = freqs.size
1836 analytic_spec = num.zeros(n, dtype=complex)
1837 analytic_spec[:nhalf] = coefs*weights
1839 enorm = num.abs(analytic_spec[:nhalf])**2
1840 enorm /= num.sum(enorm)
1842 if n % 2 == 0:
1843 analytic_spec[1:nhalf-1] *= 2.
1844 else:
1845 analytic_spec[1:nhalf] *= 2.
1847 analytic = num.fft.ifft(analytic_spec)
1848 signal_tf[ifilt, :] = num.abs(analytic)
1850 enorm = num.abs(analytic_spec[:nhalf])**2
1851 enorm /= num.sum(enorm)
1852 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1854 return centroid_freqs, signal_tf
1856 def _get_tapered_coefs(
1857 self, ntrans, freqlimits, transfer_function, invert=False):
1859 deltaf = 1./(self.deltat*ntrans)
1860 nfreqs = ntrans//2 + 1
1861 transfer = num.ones(nfreqs, dtype=complex)
1862 hi = snapper(nfreqs, deltaf)
1863 if freqlimits is not None:
1864 a, b, c, d = freqlimits
1865 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1866 + hi(a)*deltaf
1868 if invert:
1869 coeffs = transfer_function.evaluate(freqs)
1870 if num.any(coeffs == 0.0):
1871 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1873 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1874 else:
1875 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1877 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1878 else:
1879 if invert:
1880 raise Exception(
1881 'transfer: `freqlimits` must be given when `invert` is '
1882 'set to `True`')
1884 freqs = num.arange(nfreqs) * deltaf
1885 tapered_transfer = transfer_function.evaluate(freqs)
1887 tapered_transfer[0] = 0.0 # don't introduce static offsets
1888 return tapered_transfer
1890 def fill_template(self, template, **additional):
1891 '''
1892 Fill string template with trace metadata.
1894 Uses normal python '%(placeholder)s' string templates. The following
1895 placeholders are considered: ``network``, ``station``, ``location``,
1896 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1897 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1898 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1899 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1900 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1901 microseconds, those with ``'_year'`` contain only the year.
1902 '''
1904 template = template.replace('%n', '%(network)s')\
1905 .replace('%s', '%(station)s')\
1906 .replace('%l', '%(location)s')\
1907 .replace('%c', '%(channel)s')\
1908 .replace('%b', '%(tmin)s')\
1909 .replace('%e', '%(tmax)s')\
1910 .replace('%j', '%(julianday)s')
1912 params = dict(
1913 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1914 params['tmin'] = util.time_to_str(
1915 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1916 params['tmax'] = util.time_to_str(
1917 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1918 params['tmin_ms'] = util.time_to_str(
1919 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1920 params['tmax_ms'] = util.time_to_str(
1921 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1922 params['tmin_us'] = util.time_to_str(
1923 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1924 params['tmax_us'] = util.time_to_str(
1925 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1926 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1927 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1928 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1929 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1930 params['julianday'] = util.julian_day_of_year(self.tmin)
1931 params.update(additional)
1932 return template % params
1934 def plot(self):
1935 '''
1936 Show trace with matplotlib.
1938 See also: :py:meth:`Trace.snuffle`.
1939 '''
1941 import pylab
1942 pylab.plot(self.get_xdata(), self.get_ydata())
1943 name = '%s %s %s - %s' % (
1944 self.channel,
1945 self.station,
1946 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmin)),
1947 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmax)))
1949 pylab.title(name)
1950 pylab.show()
1952 def snuffle(self, **kwargs):
1953 '''
1954 Show trace in a snuffler window.
1956 :param stations: list of :py:class:`pyrocko.model.Station` objects or
1957 ``None``
1958 :param events: list of :py:class:`pyrocko.model.Event` objects or
1959 ``None``
1960 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker`
1961 objects or ``None``
1962 :param ntracks: float, number of tracks to be shown initially (default:
1963 12)
1964 :param follow: time interval (in seconds) for real time follow mode or
1965 ``None``
1966 :param controls: bool, whether to show the main controls (default:
1967 ``True``)
1968 :param opengl: bool, whether to use opengl (default: ``False``)
1969 '''
1971 return snuffle([self], **kwargs)
1974def snuffle(traces, **kwargs):
1975 '''
1976 Show traces in a snuffler window.
1978 :param stations: list of :py:class:`pyrocko.model.Station` objects or
1979 ``None``
1980 :param events: list of :py:class:`pyrocko.model.Event` objects or ``None``
1981 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker`
1982 objects or ``None``
1983 :param ntracks: float, number of tracks to be shown initially (default: 12)
1984 :param follow: time interval (in seconds) for real time follow mode or
1985 ``None``
1986 :param controls: bool, whether to show the main controls (default:
1987 ``True``)
1988 :param opengl: bool, whether to use opengl (default: ``False``)
1989 '''
1991 from pyrocko import pile
1992 from pyrocko.gui.snuffler import snuffler
1993 p = pile.Pile()
1994 if traces:
1995 trf = pile.MemTracesFile(None, traces)
1996 p.add_file(trf)
1997 return snuffler.snuffle(p, **kwargs)
2000class InfiniteResponse(Exception):
2001 '''
2002 This exception is raised by :py:class:`Trace` operations when deconvolution
2003 of a frequency response (instrument response transfer function) would
2004 result in a division by zero.
2005 '''
2008class MisalignedTraces(Exception):
2009 '''
2010 This exception is raised by some :py:class:`Trace` operations when tmin,
2011 tmax or number of samples do not match.
2012 '''
2014 pass
2017class NoData(Exception):
2018 '''
2019 This exception is raised by some :py:class:`Trace` operations when no or
2020 not enough data is available.
2021 '''
2023 pass
2026class AboveNyquist(Exception):
2027 '''
2028 This exception is raised by some :py:class:`Trace` operations when given
2029 frequencies are above the Nyquist frequency.
2030 '''
2032 pass
2035class TraceTooShort(Exception):
2036 '''
2037 This exception is raised by some :py:class:`Trace` operations when the
2038 trace is too short.
2039 '''
2041 pass
2044class ResamplingFailed(Exception):
2045 pass
2048def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2050 '''
2051 Get data range given traces grouped by selected pattern.
2053 :param key: a callable which takes as single argument a trace and returns a
2054 key for the grouping of the results. If this is ``None``, the default,
2055 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2056 used.
2057 :param mode: ``'minmax'`` or floating point number. If this is
2058 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2059 number, mean +- standard deviation times ``mode`` is used.
2061 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2062 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2063 extreme values on either end.
2065 :returns: a dict with the combined data ranges.
2067 Examples::
2069 ranges = minmax(traces, lambda tr: tr.channel)
2070 print ranges['N'] # print min & max of all traces with channel == 'N'
2071 print ranges['E'] # print min & max of all traces with channel == 'E'
2073 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2074 print ranges['GR', 'HAM3'] # print min & max of all traces with
2075 # network == 'GR' and station == 'HAM3'
2077 ranges = minmax(traces, lambda tr: None)
2078 print ranges[None] # prints min & max of all traces
2079 '''
2081 if key is None:
2082 key = _default_key
2084 ranges = defaultdict(list)
2085 for trace in traces:
2086 if isinstance(mode, str) and mode == 'minmax':
2087 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2088 else:
2089 mean = trace.ydata.mean()
2090 std = trace.ydata.std()
2091 mi, ma = mean-std*mode, mean+std*mode
2093 k = key(trace)
2094 ranges[k].append((mi, ma))
2096 for k in ranges:
2097 mins, maxs = num.array(ranges[k]).T
2098 if outer_mode == 'minmax':
2099 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2100 elif outer_mode == 'robust':
2101 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2103 return ranges
2106def minmaxtime(traces, key=None):
2108 '''
2109 Get time range given traces grouped by selected pattern.
2111 :param key: a callable which takes as single argument a trace and returns a
2112 key for the grouping of the results. If this is ``None``, the default,
2113 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2114 used.
2116 :returns: a dict with the combined data ranges.
2117 '''
2119 if key is None:
2120 key = _default_key
2122 ranges = {}
2123 for trace in traces:
2124 mi, ma = trace.tmin, trace.tmax
2125 k = key(trace)
2126 if k not in ranges:
2127 ranges[k] = mi, ma
2128 else:
2129 tmi, tma = ranges[k]
2130 ranges[k] = min(tmi, mi), max(tma, ma)
2132 return ranges
2135def degapper(
2136 traces,
2137 maxgap=5,
2138 fillmethod='interpolate',
2139 deoverlap='use_second',
2140 maxlap=None):
2142 '''
2143 Try to connect traces and remove gaps.
2145 This method will combine adjacent traces, which match in their network,
2146 station, location and channel attributes. Overlapping parts are handled
2147 according to the ``deoverlap`` argument.
2149 :param traces: input traces, must be sorted by their full_id attribute.
2150 :param maxgap: maximum number of samples to interpolate.
2151 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2152 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2153 second trace (default), 'use_first' to use data from first trace,
2154 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2155 values.
2156 :param maxlap: maximum number of samples of overlap which are removed
2158 :returns: list of traces
2159 '''
2161 in_traces = traces
2162 out_traces = []
2163 if not in_traces:
2164 return out_traces
2165 out_traces.append(in_traces.pop(0))
2166 while in_traces:
2168 a = out_traces[-1]
2169 b = in_traces.pop(0)
2171 avirt, bvirt = a.ydata is None, b.ydata is None
2172 assert avirt == bvirt, \
2173 'traces given to degapper() must either all have data or have ' \
2174 'no data.'
2176 virtual = avirt and bvirt
2178 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2179 and a.data_len() >= 1 and b.data_len() >= 1
2180 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2182 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2183 idist = int(round(dist))
2184 if abs(dist - idist) > 0.05 and idist <= maxgap:
2185 # logger.warning('Cannot degap traces with displaced sampling '
2186 # '(%s, %s, %s, %s)' % a.nslc_id)
2187 pass
2188 else:
2189 if 1 < idist <= maxgap:
2190 if not virtual:
2191 if fillmethod == 'interpolate':
2192 filler = a.ydata[-1] + (
2193 ((1.0 + num.arange(idist-1, dtype=float))
2194 / idist) * (b.ydata[0]-a.ydata[-1])
2195 ).astype(a.ydata.dtype)
2196 elif fillmethod == 'zeros':
2197 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2198 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2199 a.tmax = b.tmax
2200 if a.mtime and b.mtime:
2201 a.mtime = max(a.mtime, b.mtime)
2202 continue
2204 elif idist == 1:
2205 if not virtual:
2206 a.ydata = num.concatenate((a.ydata, b.ydata))
2207 a.tmax = b.tmax
2208 if a.mtime and b.mtime:
2209 a.mtime = max(a.mtime, b.mtime)
2210 continue
2212 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2213 if b.tmax > a.tmax:
2214 if not virtual:
2215 na = a.ydata.size
2216 n = -idist+1
2217 if deoverlap == 'use_second':
2218 a.ydata = num.concatenate(
2219 (a.ydata[:-n], b.ydata))
2220 elif deoverlap in ('use_first', 'crossfade_cos'):
2221 a.ydata = num.concatenate(
2222 (a.ydata, b.ydata[n:]))
2223 elif deoverlap == 'add':
2224 a.ydata[-n:] += b.ydata[:n]
2225 a.ydata = num.concatenate(
2226 (a.ydata, b.ydata[n:]))
2227 else:
2228 assert False, 'unknown deoverlap method'
2230 if deoverlap == 'crossfade_cos':
2231 n = -idist+1
2232 taper = 0.5-0.5*num.cos(
2233 (1.+num.arange(n))/(1.+n)*num.pi)
2234 a.ydata[na-n:na] *= 1.-taper
2235 a.ydata[na-n:na] += b.ydata[:n] * taper
2237 a.tmax = b.tmax
2238 if a.mtime and b.mtime:
2239 a.mtime = max(a.mtime, b.mtime)
2240 continue
2241 else:
2242 # make short second trace vanish
2243 continue
2245 if b.data_len() >= 1:
2246 out_traces.append(b)
2248 for tr in out_traces:
2249 tr._update_ids()
2251 return out_traces
2254def rotate(traces, azimuth, in_channels, out_channels):
2255 '''
2256 2D rotation of traces.
2258 :param traces: list of input traces
2259 :param azimuth: difference of the azimuths of the component directions
2260 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2261 :param in_channels: names of the input channels (e.g. 'N', 'E')
2262 :param out_channels: names of the output channels (e.g. 'R', 'T')
2263 :returns: list of rotated traces
2264 '''
2266 phi = azimuth/180.*math.pi
2267 cphi = math.cos(phi)
2268 sphi = math.sin(phi)
2269 rotated = []
2270 in_channels = tuple(_channels_to_names(in_channels))
2271 out_channels = tuple(_channels_to_names(out_channels))
2272 for a in traces:
2273 for b in traces:
2274 if ((a.channel, b.channel) == in_channels and
2275 a.nslc_id[:3] == b.nslc_id[:3] and
2276 abs(a.deltat-b.deltat) < a.deltat*0.001):
2277 tmin = max(a.tmin, b.tmin)
2278 tmax = min(a.tmax, b.tmax)
2280 if tmin < tmax:
2281 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2282 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2283 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2284 logger.warning(
2285 'Cannot rotate traces with displaced sampling '
2286 '(%s, %s, %s, %s)' % a.nslc_id)
2287 continue
2289 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2290 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2291 ac.set_ydata(acydata)
2292 bc.set_ydata(bcydata)
2294 ac.set_codes(channel=out_channels[0])
2295 bc.set_codes(channel=out_channels[1])
2296 rotated.append(ac)
2297 rotated.append(bc)
2299 return rotated
2302def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2303 '''
2304 Rotate traces from NE to RT system.
2306 :param n:
2307 North trace.
2308 :type n:
2309 :py:class:`~pyrocko.trace.Trace`
2311 :param e:
2312 East trace.
2313 :type e:
2314 :py:class:`~pyrocko.trace.Trace`
2316 :param source:
2317 Source of the recorded signal.
2318 :type source:
2319 :py:class:`pyrocko.gf.seismosizer.Source`
2321 :param receiver:
2322 Receiver of the recorded signal.
2323 :type receiver:
2324 :py:class:`pyrocko.model.location.Location`
2326 :param out_channels:
2327 Channel codes of the output channels (radial, transversal).
2328 Default is ('R', 'T').
2329 .
2330 :type out_channels
2331 optional, tuple[str, str]
2333 :returns:
2334 Rotated traces (radial, transversal).
2335 :rtype:
2336 tuple[
2337 :py:class:`~pyrocko.trace.Trace`,
2338 :py:class:`~pyrocko.trace.Trace`]
2339 '''
2340 azimuth = orthodrome.azimuth(receiver, source) + 180.
2341 in_channels = n.channel, e.channel
2342 out = rotate(
2343 [n, e], azimuth,
2344 in_channels=in_channels,
2345 out_channels=out_channels)
2347 assert len(out) == 2
2348 for tr in out:
2349 if tr.channel == out_channels[0]:
2350 r = tr
2351 elif tr.channel == out_channels[1]:
2352 t = tr
2353 else:
2354 assert False
2356 return r, t
2359def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2360 out_channels=('L', 'Q', 'T')):
2361 '''
2362 Rotate traces from ZNE to LQT system.
2364 :param traces: list of traces in arbitrary order
2365 :param backazimuth: backazimuth in degrees clockwise from north
2366 :param incidence: incidence angle in degrees from vertical
2367 :param in_channels: input channel names
2368 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2369 :returns: list of transformed traces
2370 '''
2371 i = incidence/180.*num.pi
2372 b = backazimuth/180.*num.pi
2374 ci = num.cos(i)
2375 cb = num.cos(b)
2376 si = num.sin(i)
2377 sb = num.sin(b)
2379 rotmat = num.array(
2380 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2381 return project(traces, rotmat, in_channels, out_channels)
2384def _decompose(a):
2385 '''
2386 Decompose matrix into independent submatrices.
2387 '''
2389 def depends(iout, a):
2390 row = a[iout, :]
2391 return set(num.arange(row.size).compress(row != 0.0))
2393 def provides(iin, a):
2394 col = a[:, iin]
2395 return set(num.arange(col.size).compress(col != 0.0))
2397 a = num.asarray(a)
2398 outs = set(range(a.shape[0]))
2399 systems = []
2400 while outs:
2401 iout = outs.pop()
2403 gout = set()
2404 for iin in depends(iout, a):
2405 gout.update(provides(iin, a))
2407 if not gout:
2408 continue
2410 gin = set()
2411 for iout2 in gout:
2412 gin.update(depends(iout2, a))
2414 if not gin:
2415 continue
2417 for iout2 in gout:
2418 if iout2 in outs:
2419 outs.remove(iout2)
2421 gin = list(gin)
2422 gin.sort()
2423 gout = list(gout)
2424 gout.sort()
2426 systems.append((gin, gout, a[gout, :][:, gin]))
2428 return systems
2431def _channels_to_names(channels):
2432 from pyrocko import squirrel
2433 names = []
2434 for ch in channels:
2435 if isinstance(ch, model.Channel):
2436 names.append(ch.name)
2437 elif isinstance(ch, squirrel.Channel):
2438 names.append(ch.codes.channel)
2439 else:
2440 names.append(ch)
2442 return names
2445def project(traces, matrix, in_channels, out_channels):
2446 '''
2447 Affine transform of three-component traces.
2449 Compute matrix-vector product of three-component traces, to e.g. rotate
2450 traces into a different basis. The traces are distinguished and ordered by
2451 their channel attribute. The tranform is applied to overlapping parts of
2452 any appropriate combinations of the input traces. This should allow this
2453 function to be robust with data gaps. It also tries to apply the
2454 tranformation to subsets of the channels, if this is possible, so that, if
2455 for example a vertical compontent is missing, horizontal components can
2456 still be rotated.
2458 :param traces: list of traces in arbitrary order
2459 :param matrix: tranformation matrix
2460 :param in_channels: input channel names
2461 :param out_channels: output channel names
2462 :returns: list of transformed traces
2463 '''
2465 in_channels = tuple(_channels_to_names(in_channels))
2466 out_channels = tuple(_channels_to_names(out_channels))
2467 systems = _decompose(matrix)
2469 # fallback to full matrix if some are not quadratic
2470 for iins, iouts, submatrix in systems:
2471 if submatrix.shape[0] != submatrix.shape[1]:
2472 if len(in_channels) != 3 or len(out_channels) != 3:
2473 return []
2474 else:
2475 return _project3(traces, matrix, in_channels, out_channels)
2477 projected = []
2478 for iins, iouts, submatrix in systems:
2479 in_cha = tuple([in_channels[iin] for iin in iins])
2480 out_cha = tuple([out_channels[iout] for iout in iouts])
2481 if submatrix.shape[0] == 1:
2482 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2483 elif submatrix.shape[1] == 2:
2484 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2485 else:
2486 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2488 return projected
2491def project_dependencies(matrix, in_channels, out_channels):
2492 '''
2493 Figure out what dependencies project() would produce.
2494 '''
2496 in_channels = tuple(_channels_to_names(in_channels))
2497 out_channels = tuple(_channels_to_names(out_channels))
2498 systems = _decompose(matrix)
2500 subpro = []
2501 for iins, iouts, submatrix in systems:
2502 if submatrix.shape[0] != submatrix.shape[1]:
2503 subpro.append((matrix, in_channels, out_channels))
2505 if not subpro:
2506 for iins, iouts, submatrix in systems:
2507 in_cha = tuple([in_channels[iin] for iin in iins])
2508 out_cha = tuple([out_channels[iout] for iout in iouts])
2509 subpro.append((submatrix, in_cha, out_cha))
2511 deps = {}
2512 for mat, in_cha, out_cha in subpro:
2513 for oc in out_cha:
2514 if oc not in deps:
2515 deps[oc] = []
2517 for ic in in_cha:
2518 deps[oc].append(ic)
2520 return deps
2523def _project1(traces, matrix, in_channels, out_channels):
2524 assert len(in_channels) == 1
2525 assert len(out_channels) == 1
2526 assert matrix.shape == (1, 1)
2528 projected = []
2529 for a in traces:
2530 if not (a.channel,) == in_channels:
2531 continue
2533 ac = a.copy()
2534 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2535 ac.set_codes(channel=out_channels[0])
2536 projected.append(ac)
2538 return projected
2541def _project2(traces, matrix, in_channels, out_channels):
2542 assert len(in_channels) == 2
2543 assert len(out_channels) == 2
2544 assert matrix.shape == (2, 2)
2545 projected = []
2546 for a in traces:
2547 for b in traces:
2548 if not ((a.channel, b.channel) == in_channels and
2549 a.nslc_id[:3] == b.nslc_id[:3] and
2550 abs(a.deltat-b.deltat) < a.deltat*0.001):
2551 continue
2553 tmin = max(a.tmin, b.tmin)
2554 tmax = min(a.tmax, b.tmax)
2556 if tmin > tmax:
2557 continue
2559 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2560 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2561 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2562 logger.warning(
2563 'Cannot project traces with displaced sampling '
2564 '(%s, %s, %s, %s)' % a.nslc_id)
2565 continue
2567 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2568 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2570 ac.set_ydata(acydata)
2571 bc.set_ydata(bcydata)
2573 ac.set_codes(channel=out_channels[0])
2574 bc.set_codes(channel=out_channels[1])
2576 projected.append(ac)
2577 projected.append(bc)
2579 return projected
2582def _project3(traces, matrix, in_channels, out_channels):
2583 assert len(in_channels) == 3
2584 assert len(out_channels) == 3
2585 assert matrix.shape == (3, 3)
2586 projected = []
2587 for a in traces:
2588 for b in traces:
2589 for c in traces:
2590 if not ((a.channel, b.channel, c.channel) == in_channels
2591 and a.nslc_id[:3] == b.nslc_id[:3]
2592 and b.nslc_id[:3] == c.nslc_id[:3]
2593 and abs(a.deltat-b.deltat) < a.deltat*0.001
2594 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2596 continue
2598 tmin = max(a.tmin, b.tmin, c.tmin)
2599 tmax = min(a.tmax, b.tmax, c.tmax)
2601 if tmin >= tmax:
2602 continue
2604 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2605 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2606 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2607 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2608 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2610 logger.warning(
2611 'Cannot project traces with displaced sampling '
2612 '(%s, %s, %s, %s)' % a.nslc_id)
2613 continue
2615 acydata = num.dot(
2616 matrix[0],
2617 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2618 bcydata = num.dot(
2619 matrix[1],
2620 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2621 ccydata = num.dot(
2622 matrix[2],
2623 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2625 ac.set_ydata(acydata)
2626 bc.set_ydata(bcydata)
2627 cc.set_ydata(ccydata)
2629 ac.set_codes(channel=out_channels[0])
2630 bc.set_codes(channel=out_channels[1])
2631 cc.set_codes(channel=out_channels[2])
2633 projected.append(ac)
2634 projected.append(bc)
2635 projected.append(cc)
2637 return projected
2640def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2641 '''
2642 Cross correlation of two traces.
2644 :param a,b: input traces
2645 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2646 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2647 :param use_fft: bool, whether to do cross correlation in spectral domain
2649 :returns: trace containing cross correlation coefficients
2651 This function computes the cross correlation between two traces. It
2652 evaluates the discrete equivalent of
2654 .. math::
2656 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2658 where the star denotes complex conjugate. Note, that the arguments here are
2659 swapped when compared with the :py:func:`numpy.correlate` function,
2660 which is internally called. This function should be safe even with older
2661 versions of NumPy, where the correlate function has some problems.
2663 A trace containing the cross correlation coefficients is returned. The time
2664 information of the output trace is set so that the returned cross
2665 correlation can be viewed directly as a function of time lag.
2667 Example::
2669 # align two traces a and b containing a time shifted similar signal:
2670 c = pyrocko.trace.correlate(a,b)
2671 t, coef = c.max() # get time and value of maximum
2672 b.shift(-t) # align b with a
2674 '''
2676 assert_same_sampling_rate(a, b)
2678 ya, yb = a.ydata, b.ydata
2680 # need reversed order here:
2681 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2682 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2684 if normalization == 'normal':
2685 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2686 yc = yc/normfac
2688 elif normalization == 'gliding':
2689 if mode != 'valid':
2690 assert False, 'gliding normalization currently only available ' \
2691 'with "valid" mode.'
2693 if ya.size < yb.size:
2694 yshort, ylong = ya, yb
2695 else:
2696 yshort, ylong = yb, ya
2698 epsilon = 0.00001
2699 normfac_short = num.sqrt(num.sum(yshort**2))
2700 normfac = normfac_short * num.sqrt(
2701 moving_sum(ylong**2, yshort.size, mode='valid')) \
2702 + normfac_short*epsilon
2704 if yb.size <= ya.size:
2705 normfac = normfac[::-1]
2707 yc /= normfac
2709 c = a.copy()
2710 c.set_ydata(yc)
2711 c.set_codes(*merge_codes(a, b, '~'))
2712 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2714 return c
2717def deconvolve(
2718 a, b, waterlevel,
2719 tshift=0.,
2720 pad=0.5,
2721 fd_taper=None,
2722 pad_to_pow2=True):
2724 same_sampling_rate(a, b)
2725 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2726 deltat = a.deltat
2727 npad = int(round(a.data_len()*pad + tshift / deltat))
2729 ndata = max(a.data_len(), b.data_len())
2730 ndata_pad = ndata + npad
2732 if pad_to_pow2:
2733 ntrans = nextpow2(ndata_pad)
2734 else:
2735 ntrans = ndata
2737 aspec = num.fft.rfft(a.ydata, ntrans)
2738 bspec = num.fft.rfft(b.ydata, ntrans)
2740 out = aspec * num.conj(bspec)
2742 bautocorr = bspec*num.conj(bspec)
2743 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2745 out /= denom
2746 df = 1/(ntrans*deltat)
2748 if fd_taper is not None:
2749 fd_taper(out, 0.0, df)
2751 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2752 c = a.copy(data=False)
2753 c.set_ydata(ydata[:ndata])
2754 c.set_codes(*merge_codes(a, b, '/'))
2755 return c
2758def assert_same_sampling_rate(a, b, eps=1.0e-6):
2759 assert same_sampling_rate(a, b, eps), \
2760 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2763def same_sampling_rate(a, b, eps=1.0e-6):
2764 '''
2765 Check if two traces have the same sampling rate.
2767 :param a,b: input traces
2768 :param eps: relative tolerance
2769 '''
2771 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2774def fix_deltat_rounding_errors(deltat):
2775 '''
2776 Try to undo sampling rate rounding errors.
2778 Fix rounding errors of sampling intervals when these are read from single
2779 precision floating point values.
2781 Assumes that the true sampling rate or sampling interval was an integer
2782 value. No correction will be applied if this would change the sampling
2783 rate by more than 0.001%.
2784 '''
2786 if deltat <= 1.0:
2787 deltat_new = 1.0 / round(1.0 / deltat)
2788 else:
2789 deltat_new = round(deltat)
2791 if abs(deltat_new - deltat) / deltat > 1e-5:
2792 deltat_new = deltat
2794 return deltat_new
2797def merge_codes(a, b, sep='-'):
2798 '''
2799 Merge network-station-location-channel codes of a pair of traces.
2800 '''
2802 o = []
2803 for xa, xb in zip(a.nslc_id, b.nslc_id):
2804 if xa == xb:
2805 o.append(xa)
2806 else:
2807 o.append(sep.join((xa, xb)))
2808 return o
2811class Taper(Object):
2812 '''
2813 Base class for tapers.
2815 Does nothing by default.
2816 '''
2818 def __call__(self, y, x0, dx):
2819 pass
2822class CosTaper(Taper):
2823 '''
2824 Cosine Taper.
2826 :param a: start of fading in
2827 :param b: end of fading in
2828 :param c: start of fading out
2829 :param d: end of fading out
2830 '''
2832 a = Float.T()
2833 b = Float.T()
2834 c = Float.T()
2835 d = Float.T()
2837 def __init__(self, a, b, c, d):
2838 Taper.__init__(self, a=a, b=b, c=c, d=d)
2840 def __call__(self, y, x0, dx):
2841 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2843 def span(self, y, x0, dx):
2844 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2846 def time_span(self):
2847 return self.a, self.d
2850class CosFader(Taper):
2851 '''
2852 Cosine Fader.
2854 :param xfade: fade in and fade out time in seconds (optional)
2855 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2857 Only one argument can be set. The other should to be ``None``.
2858 '''
2860 xfade = Float.T(optional=True)
2861 xfrac = Float.T(optional=True)
2863 def __init__(self, xfade=None, xfrac=None):
2864 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2865 assert (xfade is None) != (xfrac is None)
2866 self._xfade = xfade
2867 self._xfrac = xfrac
2869 def __call__(self, y, x0, dx):
2871 xfade = self._xfade
2873 xlen = (y.size - 1)*dx
2874 if xfade is None:
2875 xfade = xlen * self._xfrac
2877 a = x0
2878 b = x0 + xfade
2879 c = x0 + xlen - xfade
2880 d = x0 + xlen
2882 apply_costaper(a, b, c, d, y, x0, dx)
2884 def span(self, y, x0, dx):
2885 return 0, y.size
2887 def time_span(self):
2888 return None, None
2891def none_min(li):
2892 if None in li:
2893 return None
2894 else:
2895 return min(x for x in li if x is not None)
2898def none_max(li):
2899 if None in li:
2900 return None
2901 else:
2902 return max(x for x in li if x is not None)
2905class MultiplyTaper(Taper):
2906 '''
2907 Multiplication of several tapers.
2908 '''
2910 tapers = List.T(Taper.T())
2912 def __init__(self, tapers=None):
2913 if tapers is None:
2914 tapers = []
2916 Taper.__init__(self, tapers=tapers)
2918 def __call__(self, y, x0, dx):
2919 for taper in self.tapers:
2920 taper(y, x0, dx)
2922 def span(self, y, x0, dx):
2923 spans = []
2924 for taper in self.tapers:
2925 spans.append(taper.span(y, x0, dx))
2927 mins, maxs = list(zip(*spans))
2928 return min(mins), max(maxs)
2930 def time_span(self):
2931 spans = []
2932 for taper in self.tapers:
2933 spans.append(taper.time_span())
2935 mins, maxs = list(zip(*spans))
2936 return none_min(mins), none_max(maxs)
2939class GaussTaper(Taper):
2940 '''
2941 Frequency domain Gaussian filter.
2942 '''
2944 alpha = Float.T()
2946 def __init__(self, alpha):
2947 Taper.__init__(self, alpha=alpha)
2948 self._alpha = alpha
2950 def __call__(self, y, x0, dx):
2951 f = x0 + num.arange(y.size)*dx
2952 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2955cached_coefficients = {}
2958def _get_cached_filter_coefs(order, corners, btype):
2959 ck = (order, tuple(corners), btype)
2960 if ck not in cached_coefficients:
2961 if len(corners) == 0:
2962 cached_coefficients[ck] = signal.butter(
2963 order, corners[0], btype=btype)
2964 else:
2965 cached_coefficients[ck] = signal.butter(
2966 order, corners, btype=btype)
2968 return cached_coefficients[ck]
2971class _globals(object):
2972 _numpy_has_correlate_flip_bug = None
2975def _default_key(tr):
2976 return (tr.network, tr.station, tr.location, tr.channel)
2979def numpy_has_correlate_flip_bug():
2980 '''
2981 Check if NumPy's correlate function reveals old behaviour.
2982 '''
2984 if _globals._numpy_has_correlate_flip_bug is None:
2985 a = num.array([0, 0, 1, 0, 0, 0, 0])
2986 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
2987 ab = num.correlate(a, b, mode='same')
2988 ba = num.correlate(b, a, mode='same')
2989 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
2991 return _globals._numpy_has_correlate_flip_bug
2994def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
2995 '''
2996 Call :py:func:`numpy.correlate` with fixes.
2998 c[k] = sum_i a[i+k] * conj(b[i])
3000 Note that the result produced by newer numpy.correlate is always flipped
3001 with respect to the formula given in its documentation (if ascending k
3002 assumed for the output).
3003 '''
3005 if use_fft:
3006 if a.size < b.size:
3007 c = signal.fftconvolve(b[::-1], a, mode=mode)
3008 else:
3009 c = signal.fftconvolve(a, b[::-1], mode=mode)
3010 return c
3012 else:
3013 buggy = numpy_has_correlate_flip_bug()
3015 a = num.asarray(a)
3016 b = num.asarray(b)
3018 if buggy:
3019 b = num.conj(b)
3021 c = num.correlate(a, b, mode=mode)
3023 if buggy and a.size < b.size:
3024 return c[::-1]
3025 else:
3026 return c
3029def numpy_correlate_emulate(a, b, mode='valid'):
3030 '''
3031 Slow version of :py:func:`numpy.correlate` for comparison.
3032 '''
3034 a = num.asarray(a)
3035 b = num.asarray(b)
3036 kmin = -(b.size-1)
3037 klen = a.size-kmin
3038 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3039 kmin = int(kmin)
3040 kmax = int(kmax)
3041 klen = kmax - kmin + 1
3042 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
3043 for k in range(kmin, kmin+klen):
3044 imin = max(0, -k)
3045 ilen = min(b.size, a.size-k) - imin
3046 c[k-kmin] = num.sum(
3047 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3049 return c
3052def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3053 '''
3054 Get range of lags for which :py:func:`numpy.correlate` produces values.
3055 '''
3057 a = num.asarray(a)
3058 b = num.asarray(b)
3060 kmin = -(b.size-1)
3061 if mode == 'full':
3062 klen = a.size-kmin
3063 elif mode == 'same':
3064 klen = max(a.size, b.size)
3065 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3066 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3067 elif mode == 'valid':
3068 klen = abs(a.size - b.size) + 1
3069 kmin += min(a.size, b.size) - 1
3071 return kmin, kmin + klen - 1
3074def autocorr(x, nshifts):
3075 '''
3076 Compute biased estimate of the first autocorrelation coefficients.
3078 :param x: input array
3079 :param nshifts: number of coefficients to calculate
3080 '''
3082 mean = num.mean(x)
3083 std = num.std(x)
3084 n = x.size
3085 xdm = x - mean
3086 r = num.zeros(nshifts)
3087 for k in range(nshifts):
3088 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3090 return r
3093def yulewalker(x, order):
3094 '''
3095 Compute autoregression coefficients using Yule-Walker method.
3097 :param x: input array
3098 :param order: number of coefficients to produce
3100 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3101 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3102 recursion which is normally used.
3103 '''
3105 gamma = autocorr(x, order+1)
3106 d = gamma[1:1+order]
3107 a = num.zeros((order, order))
3108 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3109 for i in range(order):
3110 ioff = order-i
3111 a[i, :] = gamma2[ioff:ioff+order]
3113 return num.dot(num.linalg.inv(a), -d)
3116def moving_avg(x, n):
3117 n = int(n)
3118 cx = x.cumsum()
3119 nn = len(x)
3120 y = num.zeros(nn, dtype=cx.dtype)
3121 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3122 y[:n//2] = y[n//2]
3123 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3124 return y
3127def moving_sum(x, n, mode='valid'):
3128 n = int(n)
3129 cx = x.cumsum()
3130 nn = len(x)
3132 if mode == 'valid':
3133 if nn-n+1 <= 0:
3134 return num.zeros(0, dtype=cx.dtype)
3135 y = num.zeros(nn-n+1, dtype=cx.dtype)
3136 y[0] = cx[n-1]
3137 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3139 if mode == 'full':
3140 y = num.zeros(nn+n-1, dtype=cx.dtype)
3141 if n <= nn:
3142 y[0:n] = cx[0:n]
3143 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3144 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3145 else:
3146 y[0:nn] = cx[0:nn]
3147 y[nn:n] = cx[nn-1]
3148 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3150 if mode == 'same':
3151 n1 = (n-1)//2
3152 y = num.zeros(nn, dtype=cx.dtype)
3153 if n <= nn:
3154 y[0:n-n1] = cx[n1:n]
3155 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3156 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3157 else:
3158 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3159 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3160 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3162 return y
3165def nextpow2(i):
3166 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3169def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3170 def snap(x):
3171 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3172 return snap
3175def snapper(nmax, delta, snapfun=math.ceil):
3176 def snap(x):
3177 return max(0, min(int(snapfun(x/delta)), nmax))
3178 return snap
3181def apply_costaper(a, b, c, d, y, x0, dx):
3182 hi = snapper_w_offset(y.size, x0, dx)
3183 y[:hi(a)] = 0.
3184 y[hi(a):hi(b)] *= 0.5 \
3185 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3186 y[hi(c):hi(d)] *= 0.5 \
3187 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3188 y[hi(d):] = 0.
3191def span_costaper(a, b, c, d, y, x0, dx):
3192 hi = snapper_w_offset(y.size, x0, dx)
3193 return hi(a), hi(d) - hi(a)
3196def costaper(a, b, c, d, nfreqs, deltaf):
3197 hi = snapper(nfreqs, deltaf)
3198 tap = num.zeros(nfreqs)
3199 tap[hi(a):hi(b)] = 0.5 \
3200 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3201 tap[hi(b):hi(c)] = 1.
3202 tap[hi(c):hi(d)] = 0.5 \
3203 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3205 return tap
3208def t2ind(t, tdelta, snap=round):
3209 return int(snap(t/tdelta))
3212def hilbert(x, N=None):
3213 '''
3214 Return the hilbert transform of x of length N.
3216 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3217 '''
3219 x = num.asarray(x)
3220 if N is None:
3221 N = len(x)
3222 if N <= 0:
3223 raise ValueError('N must be positive.')
3224 if num.iscomplexobj(x):
3225 logger.warning('imaginary part of x ignored.')
3226 x = num.real(x)
3228 Xf = num.fft.fft(x, N, axis=0)
3229 h = num.zeros(N)
3230 if N % 2 == 0:
3231 h[0] = h[N//2] = 1
3232 h[1:N//2] = 2
3233 else:
3234 h[0] = 1
3235 h[1:(N+1)//2] = 2
3237 if len(x.shape) > 1:
3238 h = h[:, num.newaxis]
3239 x = num.fft.ifft(Xf*h)
3240 return x
3243def near(a, b, eps):
3244 return abs(a-b) < eps
3247def coroutine(func):
3248 def wrapper(*args, **kwargs):
3249 gen = func(*args, **kwargs)
3250 next(gen)
3251 return gen
3253 wrapper.__name__ = func.__name__
3254 wrapper.__dict__ = func.__dict__
3255 wrapper.__doc__ = func.__doc__
3256 return wrapper
3259class States(object):
3260 '''
3261 Utility to store channel-specific state in coroutines.
3262 '''
3264 def __init__(self):
3265 self._states = {}
3267 def get(self, tr):
3268 k = tr.nslc_id
3269 if k in self._states:
3270 tmin, deltat, dtype, value = self._states[k]
3271 if (near(tmin, tr.tmin, deltat/100.)
3272 and near(deltat, tr.deltat, deltat/10000.)
3273 and dtype == tr.ydata.dtype):
3275 return value
3277 return None
3279 def set(self, tr, value):
3280 k = tr.nslc_id
3281 if k in self._states and self._states[k][-1] is not value:
3282 self.free(self._states[k][-1])
3284 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3286 def free(self, value):
3287 pass
3290@coroutine
3291def co_list_append(list):
3292 while True:
3293 list.append((yield))
3296class ScipyBug(Exception):
3297 pass
3300@coroutine
3301def co_lfilter(target, b, a):
3302 '''
3303 Successively filter broken continuous trace data (coroutine).
3305 Create coroutine which takes :py:class:`Trace` objects, filters their data
3306 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3307 objects containing the filtered data to target. This is useful, if one
3308 wants to filter a long continuous time series, which is split into many
3309 successive traces without producing filter artifacts at trace boundaries.
3311 Filter states are kept *per channel*, specifically, for each (network,
3312 station, location, channel) combination occuring in the input traces, a
3313 separate state is created and maintained. This makes it possible to filter
3314 multichannel or multistation data with only one :py:func:`co_lfilter`
3315 instance.
3317 Filter state is reset, when gaps occur.
3319 Use it like this::
3321 from pyrocko.trace import co_lfilter, co_list_append
3323 filtered_traces = []
3324 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3325 for trace in traces:
3326 pipe.send(trace)
3328 pipe.close()
3330 '''
3332 try:
3333 states = States()
3334 output = None
3335 while True:
3336 input = (yield)
3338 zi = states.get(input)
3339 if zi is None:
3340 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3342 output = input.copy(data=False)
3343 try:
3344 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3345 except ValueError:
3346 raise ScipyBug(
3347 'signal.lfilter failed: could be related to a bug '
3348 'in some older scipy versions, e.g. on opensuse42.1')
3350 output.set_ydata(ydata)
3351 states.set(input, zf)
3352 target.send(output)
3354 except GeneratorExit:
3355 target.close()
3358def co_antialias(target, q, n=None, ftype='fir'):
3359 b, a, n = util.decimate_coeffs(q, n, ftype)
3360 anti = co_lfilter(target, b, a)
3361 return anti
3364@coroutine
3365def co_dropsamples(target, q, nfir):
3366 try:
3367 states = States()
3368 while True:
3369 tr = (yield)
3370 newdeltat = q * tr.deltat
3371 ioffset = states.get(tr)
3372 if ioffset is None:
3373 # for fir filter, the first nfir samples are pulluted by
3374 # boundary effects; cut it off.
3375 # for iir this may be (much) more, we do not correct for that.
3376 # put sample instances to a time which is a multiple of the
3377 # new sampling interval.
3378 newtmin_want = math.ceil(
3379 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3380 - (nfir/2*tr.deltat)
3381 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3382 if ioffset < 0:
3383 ioffset = ioffset % q
3385 newtmin_have = tr.tmin + ioffset * tr.deltat
3386 newtr = tr.copy(data=False)
3387 newtr.deltat = newdeltat
3388 # because the fir kernel shifts data by nfir/2 samples:
3389 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3390 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3391 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3392 target.send(newtr)
3394 except GeneratorExit:
3395 target.close()
3398def co_downsample(target, q, n=None, ftype='fir'):
3399 '''
3400 Successively downsample broken continuous trace data (coroutine).
3402 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3403 data and sends new :py:class:`Trace` objects containing the downsampled
3404 data to target. This is useful, if one wants to downsample a long
3405 continuous time series, which is split into many successive traces without
3406 producing filter artifacts and gaps at trace boundaries.
3408 Filter states are kept *per channel*, specifically, for each (network,
3409 station, location, channel) combination occuring in the input traces, a
3410 separate state is created and maintained. This makes it possible to filter
3411 multichannel or multistation data with only one :py:func:`co_lfilter`
3412 instance.
3414 Filter state is reset, when gaps occur. The sampling instances are choosen
3415 so that they occur at (or as close as possible) to even multiples of the
3416 sampling interval of the downsampled trace (based on system time).
3417 '''
3419 b, a, n = util.decimate_coeffs(q, n, ftype)
3420 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3423@coroutine
3424def co_downsample_to(target, deltat):
3426 decimators = {}
3427 try:
3428 while True:
3429 tr = (yield)
3430 ratio = deltat / tr.deltat
3431 rratio = round(ratio)
3432 if abs(rratio - ratio)/ratio > 0.0001:
3433 raise util.UnavailableDecimation('ratio = %g' % ratio)
3435 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3436 if deci_seq not in decimators:
3437 pipe = target
3438 for q in deci_seq[::-1]:
3439 pipe = co_downsample(pipe, q)
3441 decimators[deci_seq] = pipe
3443 decimators[deci_seq].send(tr)
3445 except GeneratorExit:
3446 for g in decimators.values():
3447 g.close()
3450class DomainChoice(StringChoice):
3451 choices = [
3452 'time_domain',
3453 'frequency_domain',
3454 'envelope',
3455 'absolute',
3456 'cc_max_norm']
3459class MisfitSetup(Object):
3460 '''
3461 Contains misfit setup to be used in :py:func:`trace.misfit`
3463 :param description: Description of the setup
3464 :param norm: L-norm classifier
3465 :param taper: Object of :py:class:`Taper`
3466 :param filter: Object of :py:class:`FrequencyResponse`
3467 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3468 'cc_max_norm']
3470 Can be dumped to a yaml file.
3471 '''
3473 xmltagname = 'misfitsetup'
3474 description = String.T(optional=True)
3475 norm = Int.T(optional=False)
3476 taper = Taper.T(optional=False)
3477 filter = FrequencyResponse.T(optional=True)
3478 domain = DomainChoice.T(default='time_domain')
3481def equalize_sampling_rates(trace_1, trace_2):
3482 '''
3483 Equalize sampling rates of two traces (reduce higher sampling rate to
3484 lower).
3486 :param trace_1: :py:class:`Trace` object
3487 :param trace_2: :py:class:`Trace` object
3489 Returns a copy of the resampled trace if resampling is needed.
3490 '''
3492 if same_sampling_rate(trace_1, trace_2):
3493 return trace_1, trace_2
3495 if trace_1.deltat < trace_2.deltat:
3496 t1_out = trace_1.copy()
3497 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3498 logger.debug('Trace downsampled (return copy of trace): %s'
3499 % '.'.join(t1_out.nslc_id))
3500 return t1_out, trace_2
3502 elif trace_1.deltat > trace_2.deltat:
3503 t2_out = trace_2.copy()
3504 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3505 logger.debug('Trace downsampled (return copy of trace): %s'
3506 % '.'.join(t2_out.nslc_id))
3507 return trace_1, t2_out
3510def Lx_norm(u, v, norm=2):
3511 '''
3512 Calculate the misfit denominator *m* and the normalization devisor *n*
3513 according to norm.
3515 The normalization divisor *n* is calculated from ``v``.
3517 :param u: :py:class:`numpy.array`
3518 :param v: :py:class:`numpy.array`
3519 :param norm: (default = 2)
3521 ``u`` and ``v`` must be of same size.
3522 '''
3524 if norm == 1:
3525 return (
3526 num.sum(num.abs(v-u)),
3527 num.sum(num.abs(v)))
3529 elif norm == 2:
3530 return (
3531 num.sqrt(num.sum((v-u)**2)),
3532 num.sqrt(num.sum(v**2)))
3534 else:
3535 return (
3536 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3537 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3540def do_downsample(tr, deltat):
3541 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3542 tr = tr.copy()
3543 tr.downsample_to(deltat, snap=True, demean=False)
3544 else:
3545 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3546 tr = tr.copy()
3547 tr.snap()
3548 return tr
3551def do_extend(tr, tmin, tmax):
3552 if tmin < tr.tmin or tmax > tr.tmax:
3553 tr = tr.copy()
3554 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3556 return tr
3559def do_pre_taper(tr, taper):
3560 return tr.taper(taper, inplace=False, chop=True)
3563def do_fft(tr, filter):
3564 if filter is None:
3565 return tr
3566 else:
3567 ndata = tr.ydata.size
3568 nfft = nextpow2(ndata)
3569 padded = num.zeros(nfft, dtype=float)
3570 padded[:ndata] = tr.ydata
3571 spectrum = num.fft.rfft(padded)
3572 df = 1.0 / (tr.deltat * nfft)
3573 frequencies = num.arange(spectrum.size)*df
3574 return [tr, frequencies, spectrum]
3577def do_filter(inp, filter):
3578 if filter is None:
3579 return inp
3580 else:
3581 tr, frequencies, spectrum = inp
3582 spectrum *= filter.evaluate(frequencies)
3583 return [tr, frequencies, spectrum]
3586def do_ifft(inp):
3587 if isinstance(inp, Trace):
3588 return inp
3589 else:
3590 tr, _, spectrum = inp
3591 ndata = tr.ydata.size
3592 tr = tr.copy(data=False)
3593 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3594 return tr
3597def check_alignment(t1, t2):
3598 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3599 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3600 t1.ydata.shape != t2.ydata.shape:
3601 raise MisalignedTraces(
3602 'Cannot calculate misfit of %s and %s due to misaligned '
3603 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))