Coverage for /usr/local/lib/python3.11/dist-packages/pyrocko/trace.py: 76%
1747 statements
« prev ^ index » next coverage.py v6.5.0, created at 2024-07-05 06:26 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2024-07-05 06:26 +0000
1# https://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
7Basic signal processing for seismic waveforms.
8'''
10import time
11import math
12import copy
13import logging
14import hashlib
15from collections import defaultdict
17import numpy as num
18from scipy import signal
20from pyrocko import util, orthodrome, pchain, model, signal_ext
21from pyrocko.util import reuse
22from pyrocko.guts import Object, Float, Int, String, List, \
23 StringChoice, Timestamp
24from pyrocko.guts_array import Array
25from pyrocko.model import squirrel_content
27# backward compatibility
28from pyrocko.util import UnavailableDecimation # noqa
29from pyrocko.response import ( # noqa
30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
31 ButterworthResponse, SampledResponse, IntegrationResponse,
32 DifferentiationResponse, MultiplyResponse)
35guts_prefix = 'pf'
37logger = logging.getLogger('pyrocko.trace')
40g_tapered_coeffs_cache = {}
41g_one_response = FrequencyResponse()
44@squirrel_content
45class Trace(Object):
47 '''
48 Create new trace object.
50 A ``Trace`` object represents a single continuous strip of evenly sampled
51 time series data. It is built from a 1D NumPy array containing the data
52 samples and some attributes describing its beginning and ending time, its
53 sampling rate and four string identifiers (its network, station, location
54 and channel code).
56 :param network: network code
57 :param station: station code
58 :param location: location code
59 :param channel: channel code
60 :param extra: extra code
61 :param tmin: system time of first sample in [s]
62 :param tmax: system time of last sample in [s] (if set to ``None`` it is
63 computed from length of ``ydata``)
64 :param deltat: sampling interval in [s]
65 :param ydata: 1D numpy array with data samples (can be ``None`` when
66 ``tmax`` is not ``None``)
67 :param mtime: optional modification time
69 :param meta: additional meta information (not used, but maintained by the
70 library)
72 The length of the network, station, location and channel codes is not
73 resricted by this software, but data formats like SAC, Mini-SEED or GSE
74 have different limits on the lengths of these codes. The codes set here are
75 silently truncated when the trace is stored
76 '''
78 network = String.T(default='', help='Deployment/network code (1-8)')
79 station = String.T(default='STA', help='Station code (1-5)')
80 location = String.T(default='', help='Location code (0-2)')
81 channel = String.T(default='', help='Channel code (3)')
82 extra = String.T(default='', help='Extra/custom code')
84 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
85 tmax = Timestamp.T()
87 deltat = Float.T(default=1.0)
88 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
90 mtime = Timestamp.T(optional=True)
92 cached_frequencies = {}
94 def __init__(self, network='', station='STA', location='', channel='',
95 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
96 meta=None, extra=''):
98 Object.__init__(self, init_props=False)
100 time_float = util.get_time_float()
102 if not isinstance(tmin, time_float):
103 tmin = Trace.tmin.regularize_extra(tmin)
105 if tmax is not None and not isinstance(tmax, time_float):
106 tmax = Trace.tmax.regularize_extra(tmax)
108 if mtime is not None and not isinstance(mtime, time_float):
109 mtime = Trace.mtime.regularize_extra(mtime)
111 if ydata is not None and not isinstance(ydata, num.ndarray):
112 ydata = Trace.ydata.regularize_extra(ydata)
114 self._growbuffer = None
116 tmin = time_float(tmin)
117 if tmax is not None:
118 tmax = time_float(tmax)
120 if mtime is None:
121 mtime = time_float(time.time())
123 self.network, self.station, self.location, self.channel, \
124 self.extra = [
125 reuse(x) for x in (
126 network, station, location, channel, extra)]
128 self.tmin = tmin
129 self.deltat = deltat
131 if tmax is None:
132 if ydata is not None:
133 self.tmax = self.tmin + (ydata.size-1)*self.deltat
134 else:
135 raise Exception(
136 'fixme: trace must be created with tmax or ydata')
137 else:
138 n = int(round((tmax - self.tmin) / self.deltat)) + 1
139 self.tmax = self.tmin + (n - 1) * self.deltat
141 self.meta = meta
142 self.ydata = ydata
143 self.mtime = mtime
144 self._update_ids()
145 self.file = None
146 self._pchain = None
148 def __str__(self):
149 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
150 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
151 s += ' timerange: %s - %s\n' % (
152 util.time_to_str(self.tmin, format=fmt),
153 util.time_to_str(self.tmax, format=fmt))
155 s += ' delta t: %g\n' % self.deltat
156 if self.meta:
157 for k in sorted(self.meta.keys()):
158 s += ' %s: %s\n' % (k, self.meta[k])
159 return s
161 @property
162 def codes(self):
163 from pyrocko.squirrel import model
164 return model.CodesNSLCE(
165 self.network, self.station, self.location, self.channel,
166 self.extra)
168 @property
169 def time_span(self):
170 return self.tmin, self.tmax
172 @property
173 def summary_entries(self):
174 return (
175 self.__class__.__name__,
176 str(self.codes),
177 self.str_time_span,
178 '%g' % (1.0/self.deltat))
180 @property
181 def summary_stats_entries(self):
182 return tuple('%13.7g' % v for v in (
183 self.ydata.min(),
184 self.ydata.max(),
185 self.ydata.mean(),
186 self.ydata.std()))
188 @property
189 def summary(self):
190 return util.fmt_summary(self.summary_entries, (10, 20, 55, 0))
192 @property
193 def summary_stats(self):
194 return self.summary + ' | ' + util.fmt_summary(
195 self.summary_stats_entries, (12, 12, 12, 12))
197 def __getstate__(self):
198 return (self.network, self.station, self.location, self.channel,
199 self.tmin, self.tmax, self.deltat, self.mtime,
200 self.ydata, self.meta, self.extra)
202 def __setstate__(self, state):
203 if len(state) == 11:
204 self.network, self.station, self.location, self.channel, \
205 self.tmin, self.tmax, self.deltat, self.mtime, \
206 self.ydata, self.meta, self.extra = state
208 elif len(state) == 12:
209 # backward compatibility 0
210 self.network, self.station, self.location, self.channel, \
211 self.tmin, self.tmax, self.deltat, self.mtime, \
212 self.ydata, self.meta, _, self.extra = state
214 elif len(state) == 10:
215 # backward compatibility 1
216 self.network, self.station, self.location, self.channel, \
217 self.tmin, self.tmax, self.deltat, self.mtime, \
218 self.ydata, self.meta = state
220 self.extra = ''
222 else:
223 # backward compatibility 2
224 self.network, self.station, self.location, self.channel, \
225 self.tmin, self.tmax, self.deltat, self.mtime = state
226 self.ydata = None
227 self.meta = None
228 self.extra = ''
230 self._growbuffer = None
231 self._update_ids()
233 def hash(self, unsafe=False):
234 sha1 = hashlib.sha1()
235 for code in self.nslc_id:
236 sha1.update(code.encode())
237 sha1.update(self.tmin.hex().encode())
238 sha1.update(self.tmax.hex().encode())
239 sha1.update(self.deltat.hex().encode())
241 if self.ydata is not None:
242 if not unsafe:
243 sha1.update(memoryview(self.ydata))
244 else:
245 sha1.update(memoryview(self.ydata[:32]))
246 sha1.update(memoryview(self.ydata[-32:]))
248 return sha1.hexdigest()
250 def name(self):
251 '''
252 Get a short string description.
253 '''
255 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
256 util.time_to_str(self.tmin),
257 util.time_to_str(self.tmax)))
259 return s
261 def __eq__(self, other):
262 return (
263 isinstance(other, Trace)
264 and self.network == other.network
265 and self.station == other.station
266 and self.location == other.location
267 and self.channel == other.channel
268 and (abs(self.deltat - other.deltat)
269 < (self.deltat + other.deltat)*1e-6)
270 and abs(self.tmin-other.tmin) < self.deltat*0.01
271 and abs(self.tmax-other.tmax) < self.deltat*0.01
272 and num.all(self.ydata == other.ydata))
274 def almost_equal(self, other, rtol=1e-5, atol=0.0):
275 return (
276 self.network == other.network
277 and self.station == other.station
278 and self.location == other.location
279 and self.channel == other.channel
280 and (abs(self.deltat - other.deltat)
281 < (self.deltat + other.deltat)*1e-6)
282 and abs(self.tmin-other.tmin) < self.deltat*0.01
283 and abs(self.tmax-other.tmax) < self.deltat*0.01
284 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
286 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
288 assert self.network == other.network, \
289 'network codes differ: %s, %s' % (self.network, other.network)
290 assert self.station == other.station, \
291 'station codes differ: %s, %s' % (self.station, other.station)
292 assert self.location == other.location, \
293 'location codes differ: %s, %s' % (self.location, other.location)
294 assert self.channel == other.channel, 'channel codes differ'
295 assert (abs(self.deltat - other.deltat)
296 < (self.deltat + other.deltat)*1e-6), \
297 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
298 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
299 'start times differ: %s, %s' % (
300 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
301 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
302 'end times differ: %s, %s' % (
303 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
305 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
306 'trace values differ'
308 def __hash__(self):
309 return id(self)
311 def __call__(self, t, clip=False, snap=round):
312 it = int(snap((t-self.tmin)/self.deltat))
313 if clip:
314 it = max(0, min(it, self.ydata.size-1))
315 else:
316 if it < 0 or self.ydata.size <= it:
317 raise IndexError()
319 return self.tmin+it*self.deltat, self.ydata[it]
321 def interpolate(self, t, clip=False):
322 '''
323 Value of trace between supporting points through linear interpolation.
325 :param t: time instant
326 :param clip: whether to clip indices to trace ends
327 '''
329 t0, y0 = self(t, clip=clip, snap=math.floor)
330 t1, y1 = self(t, clip=clip, snap=math.ceil)
331 if t0 == t1:
332 return y0
333 else:
334 return y0+(t-t0)/(t1-t0)*(y1-y0)
336 def index_clip(self, i):
337 '''
338 Clip index to valid range.
339 '''
341 return min(max(0, i), self.ydata.size)
343 def add(self, other, interpolate=True, left=0., right=0.):
344 '''
345 Add values of other trace (self += other).
347 Add values of ``other`` trace to the values of ``self``, where it
348 intersects with ``other``. This method does not change the extent of
349 ``self``. If ``interpolate`` is ``True`` (the default), the values of
350 ``other`` to be added are interpolated at sampling instants of
351 ``self``. Linear interpolation is performed. In this case the sampling
352 rate of ``other`` must be equal to or lower than that of ``self``. If
353 ``interpolate`` is ``False``, the sampling rates of the two traces must
354 match.
355 '''
357 if interpolate:
358 assert self.deltat <= other.deltat \
359 or same_sampling_rate(self, other)
361 other_xdata = other.get_xdata()
362 xdata = self.get_xdata()
363 self.ydata += num.interp(
364 xdata, other_xdata, other.ydata, left=left, right=left)
365 else:
366 assert self.deltat == other.deltat
367 ioff = int(round((other.tmin-self.tmin)/self.deltat))
368 ibeg = max(0, ioff)
369 iend = min(self.data_len(), ioff+other.data_len())
370 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
372 def mult(self, other, interpolate=True):
373 '''
374 Muliply with values of other trace ``(self *= other)``.
376 Multiply values of ``other`` trace to the values of ``self``, where it
377 intersects with ``other``. This method does not change the extent of
378 ``self``. If ``interpolate`` is ``True`` (the default), the values of
379 ``other`` to be multiplied are interpolated at sampling instants of
380 ``self``. Linear interpolation is performed. In this case the sampling
381 rate of ``other`` must be equal to or lower than that of ``self``. If
382 ``interpolate`` is ``False``, the sampling rates of the two traces must
383 match.
384 '''
386 if interpolate:
387 assert self.deltat <= other.deltat or \
388 same_sampling_rate(self, other)
390 other_xdata = other.get_xdata()
391 xdata = self.get_xdata()
392 self.ydata *= num.interp(
393 xdata, other_xdata, other.ydata, left=0., right=0.)
394 else:
395 assert self.deltat == other.deltat
396 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
397 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
398 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
399 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
401 ibeg1 = self.index_clip(ibeg1)
402 iend1 = self.index_clip(iend1)
403 ibeg2 = self.index_clip(ibeg2)
404 iend2 = self.index_clip(iend2)
406 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
408 def max(self):
409 '''
410 Get time and value of data maximum.
411 '''
413 i = num.argmax(self.ydata)
414 return self.tmin + i*self.deltat, self.ydata[i]
416 def min(self):
417 '''
418 Get time and value of data minimum.
419 '''
421 i = num.argmin(self.ydata)
422 return self.tmin + i*self.deltat, self.ydata[i]
424 def absmax(self):
425 '''
426 Get time and value of maximum of the absolute of data.
427 '''
429 tmi, mi = self.min()
430 tma, ma = self.max()
431 if abs(mi) > abs(ma):
432 return tmi, abs(mi)
433 else:
434 return tma, abs(ma)
436 def set_codes(
437 self, network=None, station=None, location=None, channel=None,
438 extra=None):
440 '''
441 Set network, station, location, and channel codes.
442 '''
444 if network is not None:
445 self.network = network
446 if station is not None:
447 self.station = station
448 if location is not None:
449 self.location = location
450 if channel is not None:
451 self.channel = channel
452 if extra is not None:
453 self.extra = extra
455 self._update_ids()
457 def set_network(self, network):
458 self.network = network
459 self._update_ids()
461 def set_station(self, station):
462 self.station = station
463 self._update_ids()
465 def set_location(self, location):
466 self.location = location
467 self._update_ids()
469 def set_channel(self, channel):
470 self.channel = channel
471 self._update_ids()
473 def overlaps(self, tmin, tmax):
474 '''
475 Check if trace has overlap with a given time span.
476 '''
477 return not (tmax < self.tmin or self.tmax < tmin)
479 def is_relevant(self, tmin, tmax, selector=None):
480 '''
481 Check if trace has overlap with a given time span and matches a
482 condition callback. (internal use)
483 '''
485 return not (tmax <= self.tmin or self.tmax < tmin) \
486 and (selector is None or selector(self))
488 def _update_ids(self):
489 '''
490 Update dependent ids.
491 '''
493 self.full_id = (
494 self.network, self.station, self.location, self.channel, self.tmin)
495 self.nslc_id = reuse(
496 (self.network, self.station, self.location, self.channel))
498 def prune_from_reuse_cache(self):
499 util.deuse(self.nslc_id)
500 util.deuse(self.network)
501 util.deuse(self.station)
502 util.deuse(self.location)
503 util.deuse(self.channel)
505 def set_mtime(self, mtime):
506 '''
507 Set modification time of the trace.
508 '''
510 self.mtime = mtime
512 def get_xdata(self):
513 '''
514 Create array for time axis.
515 '''
517 if self.ydata is None:
518 raise NoData()
520 return self.tmin \
521 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
523 def get_ydata(self):
524 '''
525 Get data array.
526 '''
528 if self.ydata is None:
529 raise NoData()
531 return self.ydata
533 def set_ydata(self, new_ydata):
534 '''
535 Replace data array.
536 '''
538 self.drop_growbuffer()
539 self.ydata = new_ydata
540 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
542 def data_len(self):
543 if self.ydata is not None:
544 return self.ydata.size
545 else:
546 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
548 def drop_data(self):
549 '''
550 Forget data, make dataless trace.
551 '''
553 self.drop_growbuffer()
554 self.ydata = None
556 def drop_growbuffer(self):
557 '''
558 Detach the traces grow buffer.
559 '''
561 self._growbuffer = None
562 self._pchain = None
564 def copy(self, data=True):
565 '''
566 Make a deep copy of the trace.
567 '''
569 tracecopy = copy.copy(self)
570 tracecopy.drop_growbuffer()
571 if data:
572 tracecopy.ydata = self.ydata.copy()
573 tracecopy.meta = copy.deepcopy(self.meta)
574 return tracecopy
576 def crop_zeros(self):
577 '''
578 Remove any zeros at beginning and end.
579 '''
581 indices = num.where(self.ydata != 0.0)[0]
582 if indices.size == 0:
583 raise NoData()
585 ibeg = indices[0]
586 iend = indices[-1]+1
587 if ibeg == 0 and iend == self.ydata.size-1:
588 return
590 self.drop_growbuffer()
591 self.ydata = self.ydata[ibeg:iend].copy()
592 self.tmin = self.tmin+ibeg*self.deltat
593 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
594 self._update_ids()
596 def append(self, data):
597 '''
598 Append data to the end of the trace.
600 To make this method efficient when successively very few or even single
601 samples are appended, a larger grow buffer is allocated upon first
602 invocation. The traces data is then changed to be a view into the
603 currently filled portion of the grow buffer array.
604 '''
606 assert self.ydata.dtype == data.dtype
607 newlen = data.size + self.ydata.size
608 if self._growbuffer is None or self._growbuffer.size < newlen:
609 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
610 self._growbuffer[:self.ydata.size] = self.ydata
611 self._growbuffer[self.ydata.size:newlen] = data
612 self.ydata = self._growbuffer[:newlen]
613 self.tmax = self.tmin + (newlen-1)*self.deltat
615 def chop(
616 self, tmin, tmax, inplace=True, include_last=False,
617 snap=(round, round), want_incomplete=True):
619 '''
620 Cut the trace to given time span.
622 If the ``inplace`` argument is True (the default) the trace is cut in
623 place, otherwise a new trace with the cut part is returned. By
624 default, the indices where to start and end the trace data array are
625 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
626 using Python's :py:func:`round` function. This behaviour can be changed
627 with the ``snap`` argument, which takes a tuple of two functions (one
628 for the lower and one for the upper end) to be used instead of
629 :py:func:`round`. The last sample is by default not included unless
630 ``include_last`` is set to True. If the given time span exceeds the
631 available time span of the trace, the available part is returned,
632 unless ``want_incomplete`` is set to False - in that case, a
633 :py:exc:`NoData` exception is raised. This exception is always raised,
634 when the requested time span does dot overlap with the trace's time
635 span.
636 '''
638 if want_incomplete:
639 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
640 raise NoData()
641 else:
642 if tmin < self.tmin or self.tmax < tmax:
643 raise NoData()
645 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
646 iplus = 0
647 if include_last:
648 iplus = 1
650 iend = min(
651 self.data_len(),
652 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
654 if ibeg >= iend:
655 raise NoData()
657 obj = self
658 if not inplace:
659 obj = self.copy(data=False)
661 self.drop_growbuffer()
662 if self.ydata is not None:
663 obj.ydata = self.ydata[ibeg:iend].copy()
664 else:
665 obj.ydata = None
667 obj.tmin = obj.tmin+ibeg*obj.deltat
668 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
670 obj._update_ids()
672 return obj
674 def downsample(
675 self, ndecimate, snap=False, demean=False, ftype='fir-remez',
676 cut=False):
678 '''
679 Downsample (decimate) trace by a given integer factor.
681 Antialiasing filter details:
683 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum
684 ripple of 0.05 dB in the passband.
685 * ``fir``: A FIR filter using a Hamming window and 31 taps and a
686 well-behaved phase response.
687 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm
688 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency.
690 Comparison of the digital filters:
692 .. figure :: ../../static/downsampling-filter-comparison.png
693 :width: 60%
694 :alt: Comparison of the downsampling filters.
696 See also :py:meth:`Trace.downsample_to`.
698 :param ndecimate:
699 Decimation factor, avoid values larger than 8.
700 :type ndecimate:
701 int
703 :param snap:
704 Whether to put the new sampling instants closest to multiples of
705 the sampling rate (according to absolute time).
706 :type snap:
707 bool
709 :param demean:
710 Whether to demean the signal before filtering.
711 :type demean:
712 bool
714 :param ftype:
715 Which FIR filter to use, choose from ``'iir'``, ``'fir'``,
716 ``'fir-remez'``. Default is ``'fir-remez'``.
718 :param cut:
719 Whether to cut off samples in the beginning of the trace which
720 are polluted by artifacts of the anti-aliasing filter.
721 :type cut:
722 bool
723 '''
724 newdeltat = self.deltat*ndecimate
725 b, a, n = util.decimate_coeffs(ndecimate, None, ftype)
726 if snap:
727 ilag = int(round((math.ceil(
728 (self.tmin+(n//2 if cut else 0)*self.deltat) /
729 newdeltat) * newdeltat - self.tmin) / self.deltat))
730 else:
731 ilag = (n//2 if cut else 0)
733 data = self.ydata.astype(num.float64)
734 if data.size != 0:
735 if demean:
736 data -= num.mean(data)
737 y = signal.lfilter(b, a, data)
738 self.ydata = y[n//2+ilag::ndecimate].copy()
739 else:
740 self.ydata = data
742 self.tmin += ilag * self.deltat
743 self.deltat = reuse(self.deltat*ndecimate)
744 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
745 self._update_ids()
747 def downsample_to(
748 self, deltat, snap=False, allow_upsample_max=1, demean=False,
749 ftype='fir-remez', cut=False):
751 '''
752 Downsample to given sampling rate.
754 Tries to downsample the trace to a target sampling interval of
755 ``deltat``. This runs :py:meth:`downsample` one or several times. If
756 ``allow_upsample_max`` is set to a value larger than 1, intermediate
757 upsampling steps are allowed, in order to increase the number of
758 possible downsampling ratios.
760 If the requested ratio is not supported, an exception of type
761 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
763 The downsampled trace will be shorter than the input trace because the
764 anti-aliasing filter introduces edge effects. If `cut` is selected,
765 additionally, polluted samples in the beginning of the trace are
766 removed. The approximate amount of cutoff which will be produced by a
767 given downsampling configuration can be estimated with
768 :py:func:`downsample_tpad`.
770 See also: :meth:`Trace.downsample`.
772 :param deltat:
773 Desired sampling interval in [s].
774 :type deltat:
775 float
777 :param allow_upsample_max:
778 Maximum allowed upsampling factor of the signal to achieve the
779 desired sampling interval. Default is ``1``.
780 :type allow_upsample_max:
781 int
783 :param snap:
784 Whether to put the new sampling instants closest to multiples of
785 the sampling rate (according to absolute time).
786 :type snap:
787 bool
789 :param demean:
790 Whether to demean the signal before filtering.
791 :type demean:
792 bool
794 :param ftype:
795 Which FIR filter to use, choose from ``'iir'``, ``'fir'``,
796 ``'fir-remez'``. Default is ``'fir-remez'``.
798 :param cut:
799 Whether to cut off samples in the beginning of the trace which
800 are polluted by artifacts of the anti-aliasing filter.
801 :type demean:
802 bool
803 '''
805 upsratio, deci_seq = _configure_downsampling(
806 self.deltat, deltat, allow_upsample_max)
808 if demean:
809 self.drop_growbuffer()
810 self.ydata = self.ydata.astype(num.float64)
811 self.ydata -= num.mean(self.ydata)
813 if upsratio > 1:
814 self.drop_growbuffer()
815 ydata = self.ydata
816 self.ydata = num.zeros(
817 ydata.size*upsratio-(upsratio-1), ydata.dtype)
818 self.ydata[::upsratio] = ydata
819 for i in range(1, upsratio):
820 self.ydata[i::upsratio] = \
821 float(i)/upsratio * ydata[:-1] \
822 + float(upsratio-i)/upsratio * ydata[1:]
823 self.deltat = self.deltat/upsratio
825 for i, ndecimate in enumerate(deci_seq):
826 self.downsample(
827 ndecimate, snap=snap, demean=False, ftype=ftype, cut=cut)
829 def resample(self, deltat):
830 '''
831 Resample to given sampling rate ``deltat``.
833 Resampling is performed in the frequency domain.
834 '''
836 ndata = self.ydata.size
837 ntrans = nextpow2(ndata)
838 fntrans2 = ntrans * self.deltat/deltat
839 ntrans2 = int(round(fntrans2))
840 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
841 ndata2 = int(round(ndata*self.deltat/deltat2))
842 if abs(fntrans2 - ntrans2) > 1e-7:
843 logger.warning(
844 'resample: requested deltat %g could not be matched exactly: '
845 '%g' % (deltat, deltat2))
847 data = self.ydata
848 data_pad = num.zeros(ntrans, dtype=float)
849 data_pad[:ndata] = data
850 fdata = num.fft.rfft(data_pad)
851 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
852 n = min(fdata.size, fdata2.size)
853 fdata2[:n] = fdata[:n]
854 data2 = num.fft.irfft(fdata2)
855 data2 = data2[:ndata2]
856 data2 *= float(ntrans2) / float(ntrans)
857 self.deltat = deltat2
858 self.set_ydata(data2)
860 def resample_simple(self, deltat):
861 tyear = 3600*24*365.
863 if deltat == self.deltat:
864 return
866 if abs(self.deltat - deltat) * tyear/deltat < deltat:
867 logger.warning(
868 'resample_simple: less than one sample would have to be '
869 'inserted/deleted per year. Doing nothing.')
870 return
872 ninterval = int(round(deltat / (self.deltat - deltat)))
873 if abs(ninterval) < 20:
874 logger.error(
875 'resample_simple: sample insertion/deletion interval less '
876 'than 20. results would be erroneous.')
877 raise ResamplingFailed()
879 delete = False
880 if ninterval < 0:
881 ninterval = - ninterval
882 delete = True
884 tyearbegin = util.year_start(self.tmin)
886 nmin = int(round((self.tmin - tyearbegin)/deltat))
888 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
889 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
890 if nindices > 0:
891 indices = ibegin + num.arange(nindices) * ninterval
892 data_split = num.split(self.ydata, indices)
893 data = []
894 for ln, h in zip(data_split[:-1], data_split[1:]):
895 if delete:
896 ln = ln[:-1]
898 data.append(ln)
899 if not delete:
900 if ln.size == 0:
901 v = h[0]
902 else:
903 v = 0.5*(ln[-1] + h[0])
904 data.append(num.array([v], dtype=ln.dtype))
906 data.append(data_split[-1])
908 ydata_new = num.concatenate(data)
910 self.tmin = tyearbegin + nmin * deltat
911 self.deltat = deltat
912 self.set_ydata(ydata_new)
914 def stretch(self, tmin_new, tmax_new):
915 '''
916 Stretch signal while preserving sample rate using sinc interpolation.
918 :param tmin_new: new time of first sample
919 :param tmax_new: new time of last sample
921 This method can be used to correct for a small linear time drift or to
922 introduce sub-sample time shifts. The amount of stretching is limited
923 to 10% by the implementation and is expected to be much smaller than
924 that by the approximations used.
925 '''
927 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
928 t_control = num.array([tmin_new, tmax_new], dtype=float)
930 r = (tmax_new - tmin_new) / self.deltat + 1.0
931 n_new = int(round(r))
932 if abs(n_new - r) > 0.001:
933 n_new = int(math.floor(r))
935 assert n_new >= 2
937 tmax_new = tmin_new + (n_new-1) * self.deltat
939 ydata_new = num.empty(n_new, dtype=float)
940 signal_ext.antidrift(i_control, t_control,
941 self.ydata.astype(float),
942 tmin_new, self.deltat, ydata_new)
944 self.tmin = tmin_new
945 self.set_ydata(ydata_new)
946 self._update_ids()
948 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
949 raise_exception=False):
951 '''
952 Check if a given frequency is above the Nyquist frequency of the trace.
954 :param intro: string used to introduce the warning/error message
955 :param warn: whether to emit a warning
956 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
957 exception.
958 '''
960 if frequency >= 0.5/self.deltat:
961 message = '%s (%g Hz) is equal to or higher than nyquist ' \
962 'frequency (%g Hz). (Trace %s)' \
963 % (intro, frequency, 0.5/self.deltat, self.name())
964 if warn:
965 logger.warning(message)
966 if raise_exception:
967 raise AboveNyquist(message)
969 def lowpass(self, order, corner, nyquist_warn=True,
970 nyquist_exception=False, demean=True):
972 '''
973 Apply Butterworth lowpass to the trace.
975 :param order: order of the filter
976 :param corner: corner frequency of the filter
978 Mean is removed before filtering.
979 '''
981 self.nyquist_check(
982 corner, 'Corner frequency of lowpass', nyquist_warn,
983 nyquist_exception)
985 (b, a) = _get_cached_filter_coeffs(
986 order, [corner*2.0*self.deltat], btype='low')
988 if len(a) != order+1 or len(b) != order+1:
989 logger.warning(
990 'Erroneous filter coefficients returned by '
991 'scipy.signal.butter(). You may need to downsample the '
992 'signal before filtering.')
994 data = self.ydata.astype(num.float64)
995 if demean:
996 data -= num.mean(data)
997 self.drop_growbuffer()
998 self.ydata = signal.lfilter(b, a, data)
1000 def highpass(self, order, corner, nyquist_warn=True,
1001 nyquist_exception=False, demean=True):
1003 '''
1004 Apply butterworth highpass to the trace.
1006 :param order: order of the filter
1007 :param corner: corner frequency of the filter
1009 Mean is removed before filtering.
1010 '''
1012 self.nyquist_check(
1013 corner, 'Corner frequency of highpass', nyquist_warn,
1014 nyquist_exception)
1016 (b, a) = _get_cached_filter_coeffs(
1017 order, [corner*2.0*self.deltat], btype='high')
1019 data = self.ydata.astype(num.float64)
1020 if len(a) != order+1 or len(b) != order+1:
1021 logger.warning(
1022 'Erroneous filter coefficients returned by '
1023 'scipy.signal.butter(). You may need to downsample the '
1024 'signal before filtering.')
1025 if demean:
1026 data -= num.mean(data)
1027 self.drop_growbuffer()
1028 self.ydata = signal.lfilter(b, a, data)
1030 def bandpass(self, order, corner_hp, corner_lp, demean=True):
1031 '''
1032 Apply butterworth bandpass to the trace.
1034 :param order: order of the filter
1035 :param corner_hp: lower corner frequency of the filter
1036 :param corner_lp: upper corner frequency of the filter
1038 Mean is removed before filtering.
1039 '''
1041 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
1042 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
1043 (b, a) = _get_cached_filter_coeffs(
1044 order,
1045 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1046 btype='band')
1047 data = self.ydata.astype(num.float64)
1048 if demean:
1049 data -= num.mean(data)
1050 self.drop_growbuffer()
1051 self.ydata = signal.lfilter(b, a, data)
1053 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1054 '''
1055 Apply bandstop (attenuates frequencies in band) to the trace.
1057 :param order: order of the filter
1058 :param corner_hp: lower corner frequency of the filter
1059 :param corner_lp: upper corner frequency of the filter
1061 Mean is removed before filtering.
1062 '''
1064 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1065 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1066 (b, a) = _get_cached_filter_coeffs(
1067 order,
1068 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1069 btype='bandstop')
1070 data = self.ydata.astype(num.float64)
1071 if demean:
1072 data -= num.mean(data)
1073 self.drop_growbuffer()
1074 self.ydata = signal.lfilter(b, a, data)
1076 def envelope(self, inplace=True):
1077 '''
1078 Calculate the envelope of the trace.
1080 :param inplace: calculate envelope in place
1082 The calculation follows:
1084 .. math::
1086 Y' = \\sqrt{Y^2+H(Y)^2}
1088 where H is the Hilbert-Transform of the signal Y.
1089 '''
1091 y = self.ydata.astype(float)
1092 env = num.abs(hilbert(y))
1093 if inplace:
1094 self.drop_growbuffer()
1095 self.ydata = env
1096 else:
1097 tr = self.copy(data=False)
1098 tr.ydata = env
1099 return tr
1101 def taper(self, taperer, inplace=True, chop=False):
1102 '''
1103 Apply a :py:class:`Taper` to the trace.
1105 :param taperer: instance of :py:class:`Taper` subclass
1106 :param inplace: apply taper inplace
1107 :param chop: if ``True``: exclude tapered parts from the resulting
1108 trace
1109 '''
1111 if not inplace:
1112 tr = self.copy()
1113 else:
1114 tr = self
1116 if chop:
1117 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1118 tr.shift(i*tr.deltat)
1119 tr.set_ydata(tr.ydata[i:i+n])
1121 taperer(tr.ydata, tr.tmin, tr.deltat)
1123 if not inplace:
1124 return tr
1126 def whiten(self, order=6):
1127 '''
1128 Whiten signal in time domain using autoregression and recursive filter.
1130 :param order: order of the autoregression process
1131 '''
1133 b, a = self.whitening_coefficients(order)
1134 self.drop_growbuffer()
1135 self.ydata = signal.lfilter(b, a, self.ydata)
1137 def whitening_coefficients(self, order=6):
1138 ar = yulewalker(self.ydata, order)
1139 b, a = [1.] + ar.tolist(), [1.]
1140 return b, a
1142 def ampspec_whiten(
1143 self,
1144 width,
1145 td_taper='auto',
1146 fd_taper='auto',
1147 pad_to_pow2=True,
1148 demean=True):
1150 '''
1151 Whiten signal via frequency domain using moving average on amplitude
1152 spectra.
1154 :param width: width of smoothing kernel [Hz]
1155 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1156 ``None`` or ``'auto'``.
1157 :param fd_taper: frequency domain taper, object of type
1158 :py:class:`Taper` or ``None`` or ``'auto'``.
1159 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1160 of 2^n
1161 :param demean: whether to demean the signal before tapering
1163 The signal is first demeaned and then tapered using ``td_taper``. Then,
1164 the spectrum is calculated and inversely weighted with a smoothed
1165 version of its amplitude spectrum. A moving average is used for the
1166 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1167 Finally, the smoothed and tapered spectrum is back-transformed into the
1168 time domain.
1170 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1171 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1172 '''
1174 ndata = self.data_len()
1176 if pad_to_pow2:
1177 ntrans = nextpow2(ndata)
1178 else:
1179 ntrans = ndata
1181 df = 1./(ntrans*self.deltat)
1182 nw = int(round(width/df))
1183 if ndata//2+1 <= nw:
1184 raise TraceTooShort(
1185 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1187 if td_taper == 'auto':
1188 td_taper = CosFader(1./width)
1190 if fd_taper == 'auto':
1191 fd_taper = CosFader(width)
1193 if td_taper:
1194 self.taper(td_taper)
1196 ydata = self.get_ydata().astype(float)
1197 if demean:
1198 ydata -= ydata.mean()
1200 spec = num.fft.rfft(ydata, ntrans)
1202 amp = num.abs(spec)
1203 nspec = amp.size
1204 csamp = num.cumsum(amp)
1205 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1206 n1, n2 = nw//2, nw//2 + nspec - nw
1207 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1208 amp_smoothed[:n1] = amp_smoothed[n1]
1209 amp_smoothed[n2:] = amp_smoothed[n2-1]
1211 denom = amp_smoothed * amp
1212 numer = amp
1213 eps = num.mean(denom) * 1e-9
1214 if eps == 0.0:
1215 eps = 1e-9
1217 numer += eps
1218 denom += eps
1219 spec *= numer/denom
1221 if fd_taper:
1222 fd_taper(spec, 0., df)
1224 ydata = num.fft.irfft(spec)
1225 self.set_ydata(ydata[:ndata])
1227 def _get_cached_freqs(self, nf, deltaf):
1228 ck = (nf, deltaf)
1229 if ck not in Trace.cached_frequencies:
1230 Trace.cached_frequencies[ck] = deltaf * num.arange(
1231 nf, dtype=float)
1233 return Trace.cached_frequencies[ck]
1235 def bandpass_fft(self, corner_hp, corner_lp):
1236 '''
1237 Apply boxcar bandbpass to trace (in spectral domain).
1238 '''
1240 n = len(self.ydata)
1241 n2 = nextpow2(n)
1242 data = num.zeros(n2, dtype=num.float64)
1243 data[:n] = self.ydata
1244 fdata = num.fft.rfft(data)
1245 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1246 fdata[0] = 0.0
1247 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1248 data = num.fft.irfft(fdata)
1249 self.drop_growbuffer()
1250 self.ydata = data[:n]
1252 def shift(self, tshift):
1253 '''
1254 Time shift the trace.
1255 '''
1257 self.tmin += tshift
1258 self.tmax += tshift
1259 self._update_ids()
1261 def snap(self, inplace=True, interpolate=False):
1262 '''
1263 Shift trace samples to nearest even multiples of the sampling rate.
1265 :param inplace: (boolean) snap traces inplace
1267 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1268 both, the snapped and the original trace is smaller than 0.01 x deltat,
1269 :py:func:`snap` returns the unsnapped instance of the original trace.
1270 '''
1272 tmin = round(self.tmin/self.deltat)*self.deltat
1273 tmax = tmin + (self.ydata.size-1)*self.deltat
1275 if inplace:
1276 xself = self
1277 else:
1278 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1279 abs(self.tmax - tmax) < 1e-2*self.deltat:
1280 return self
1282 xself = self.copy()
1284 if interpolate:
1285 n = xself.data_len()
1286 ydata_new = num.empty(n, dtype=float)
1287 i_control = num.array([0, n-1], dtype=num.int64)
1288 tref = tmin
1289 t_control = num.array(
1290 [float(xself.tmin-tref), float(xself.tmax-tref)],
1291 dtype=float)
1293 signal_ext.antidrift(i_control, t_control,
1294 xself.ydata.astype(float),
1295 float(tmin-tref), xself.deltat, ydata_new)
1297 xself.ydata = ydata_new
1299 xself.tmin = tmin
1300 xself.tmax = tmax
1301 xself._update_ids()
1303 return xself
1305 def fix_deltat_rounding_errors(self):
1306 '''
1307 Try to undo sampling rate rounding errors.
1309 See :py:func:`fix_deltat_rounding_errors`.
1310 '''
1312 self.deltat = fix_deltat_rounding_errors(self.deltat)
1313 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1315 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1316 '''
1317 Run special STA/LTA filter where the short time window is centered on
1318 the long time window.
1320 :param tshort: length of short time window in [s]
1321 :param tlong: length of long time window in [s]
1322 :param quad: whether to square the data prior to applying the STA/LTA
1323 filter
1324 :param scalingmethod: integer key to select how output values are
1325 scaled / normalized (``1``, ``2``, or ``3``)
1327 ============= ====================================== ===========
1328 Scalingmethod Implementation Range
1329 ============= ====================================== ===========
1330 ``1`` As/Al* Ts/Tl [0,1]
1331 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1332 ``3`` Like ``2`` but clipping range at zero [0,1]
1333 ============= ====================================== ===========
1335 '''
1337 nshort = int(round(tshort/self.deltat))
1338 nlong = int(round(tlong/self.deltat))
1340 assert nshort < nlong
1341 if nlong > len(self.ydata):
1342 raise TraceTooShort(
1343 'Samples in trace: %s, samples needed: %s'
1344 % (len(self.ydata), nlong))
1346 if quad:
1347 sqrdata = self.ydata**2
1348 else:
1349 sqrdata = self.ydata
1351 mavg_short = moving_avg(sqrdata, nshort)
1352 mavg_long = moving_avg(sqrdata, nlong)
1354 self.drop_growbuffer()
1356 if scalingmethod not in (1, 2, 3):
1357 raise Exception('Invalid argument to scalingrange argument.')
1359 if scalingmethod == 1:
1360 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1361 elif scalingmethod in (2, 3):
1362 self.ydata = (mavg_short/mavg_long - 1.) \
1363 / ((float(nlong)/float(nshort)) - 1)
1365 if scalingmethod == 3:
1366 self.ydata = num.maximum(self.ydata, 0.)
1368 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1369 '''
1370 Run special STA/LTA filter where the short time window is overlapping
1371 with the last part of the long time window.
1373 :param tshort: length of short time window in [s]
1374 :param tlong: length of long time window in [s]
1375 :param quad: whether to square the data prior to applying the STA/LTA
1376 filter
1377 :param scalingmethod: integer key to select how output values are
1378 scaled / normalized (``1``, ``2``, or ``3``)
1380 ============= ====================================== ===========
1381 Scalingmethod Implementation Range
1382 ============= ====================================== ===========
1383 ``1`` As/Al* Ts/Tl [0,1]
1384 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1385 ``3`` Like ``2`` but clipping range at zero [0,1]
1386 ============= ====================================== ===========
1388 With ``scalingmethod=1``, the values produced by this variant of the
1389 STA/LTA are equivalent to
1391 .. math::
1392 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1393 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1395 where :math:`f_j` are the input samples, :math:`s` are the number of
1396 samples in the short time window and :math:`l` are the number of
1397 samples in the long time window.
1398 '''
1400 n = self.data_len()
1401 tmin = self.tmin
1403 nshort = max(1, int(round(tshort/self.deltat)))
1404 nlong = max(1, int(round(tlong/self.deltat)))
1406 assert nshort < nlong
1408 if nlong > len(self.ydata):
1409 raise TraceTooShort(
1410 'Samples in trace: %s, samples needed: %s'
1411 % (len(self.ydata), nlong))
1413 if quad:
1414 sqrdata = self.ydata**2
1415 else:
1416 sqrdata = self.ydata
1418 nshift = int(math.floor(0.5 * (nlong - nshort)))
1419 if nlong % 2 != 0 and nshort % 2 == 0:
1420 nshift += 1
1422 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1423 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1425 self.drop_growbuffer()
1427 if scalingmethod not in (1, 2, 3):
1428 raise Exception('Invalid argument to scalingrange argument.')
1430 if scalingmethod == 1:
1431 ydata = mavg_short/mavg_long * nshort/nlong
1432 elif scalingmethod in (2, 3):
1433 ydata = (mavg_short/mavg_long - 1.) \
1434 / ((float(nlong)/float(nshort)) - 1)
1436 if scalingmethod == 3:
1437 ydata = num.maximum(ydata, 0.)
1439 self.set_ydata(ydata)
1441 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1443 self.chop(
1444 tmin + (nlong - nshort) * self.deltat,
1445 tmin + (n - nshort) * self.deltat)
1447 def peaks(self, threshold, tsearch,
1448 deadtime=False,
1449 nblock_duration_detection=100):
1451 '''
1452 Detect peaks above a given threshold (method 1).
1454 From every instant, where the signal rises above ``threshold``, a time
1455 length of ``tsearch`` seconds is searched for a maximum. A list with
1456 tuples (time, value) for each detected peak is returned. The
1457 ``deadtime`` argument turns on a special deadtime duration detection
1458 algorithm useful in combination with recursive STA/LTA filters.
1459 '''
1461 y = self.ydata
1462 above = num.where(y > threshold, 1, 0)
1463 deriv = num.zeros(y.size, dtype=num.int8)
1464 deriv[1:] = above[1:]-above[:-1]
1465 itrig_positions = num.nonzero(deriv > 0)[0]
1466 tpeaks = []
1467 apeaks = []
1468 tzeros = []
1469 tzero = self.tmin
1471 for itrig_pos in itrig_positions:
1472 ibeg = itrig_pos
1473 iend = min(
1474 len(self.ydata),
1475 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1476 ipeak = num.argmax(y[ibeg:iend])
1477 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1478 apeak = y[ibeg+ipeak]
1480 if tpeak < tzero:
1481 continue
1483 if deadtime:
1484 ibeg = itrig_pos
1485 iblock = 0
1486 nblock = nblock_duration_detection
1487 totalsum = 0.
1488 while True:
1489 if ibeg+iblock*nblock >= len(y):
1490 tzero = self.tmin + (len(y)-1) * self.deltat
1491 break
1493 logy = num.log(
1494 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1495 logy[0] += totalsum
1496 ysum = num.cumsum(logy)
1497 totalsum = ysum[-1]
1498 below = num.where(ysum <= 0., 1, 0)
1499 deriv = num.zeros(ysum.size, dtype=num.int8)
1500 deriv[1:] = below[1:]-below[:-1]
1501 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1502 if len(izero_positions) > 0:
1503 tzero = self.tmin + self.deltat * (
1504 ibeg + izero_positions[0])
1505 break
1506 iblock += 1
1507 else:
1508 tzero = ibeg*self.deltat + self.tmin + tsearch
1510 tpeaks.append(tpeak)
1511 apeaks.append(apeak)
1512 tzeros.append(tzero)
1514 if deadtime:
1515 return tpeaks, apeaks, tzeros
1516 else:
1517 return tpeaks, apeaks
1519 def peaks2(self, threshold, tsearch):
1521 '''
1522 Detect peaks above a given threshold (method 2).
1524 This variant of peak detection is a bit more robust (and slower) than
1525 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1526 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1527 iteratively the one with the maximum amplitude ``a[j]`` and time
1528 ``t[j]`` is choosen and potential peaks within
1529 ``t[j] - tsearch, t[j] + tsearch``
1530 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1531 no more potential peaks are left.
1532 '''
1534 a = num.copy(self.ydata)
1536 amin = num.min(a)
1538 a[0] = amin
1539 a[1: -1][num.diff(a, 2) <= 0.] = amin
1540 a[-1] = amin
1542 data = []
1543 while True:
1544 imax = num.argmax(a)
1545 amax = a[imax]
1547 if amax < threshold or amax == amin:
1548 break
1550 data.append((self.tmin + imax * self.deltat, amax))
1552 ntsearch = int(round(tsearch / self.deltat))
1553 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1555 if data:
1556 data.sort()
1557 tpeaks, apeaks = list(zip(*data))
1558 else:
1559 tpeaks, apeaks = [], []
1561 return tpeaks, apeaks
1563 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1564 '''
1565 Extend trace to given span.
1567 :param tmin: begin time of new span
1568 :param tmax: end time of new span
1569 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1570 ``'median'``
1571 '''
1573 nold = self.ydata.size
1575 if tmin is not None:
1576 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1577 else:
1578 nl = 0
1580 if tmax is not None:
1581 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1582 else:
1583 nh = nold - 1
1585 n = nh - nl + 1
1586 data = num.zeros(n, dtype=self.ydata.dtype)
1587 data[-nl:-nl + nold] = self.ydata
1588 if self.ydata.size >= 1:
1589 if fillmethod == 'repeat':
1590 data[:-nl] = self.ydata[0]
1591 data[-nl + nold:] = self.ydata[-1]
1592 elif fillmethod == 'median':
1593 v = num.median(self.ydata)
1594 data[:-nl] = v
1595 data[-nl + nold:] = v
1596 elif fillmethod == 'mean':
1597 v = num.mean(self.ydata)
1598 data[:-nl] = v
1599 data[-nl + nold:] = v
1601 self.drop_growbuffer()
1602 self.ydata = data
1604 self.tmin += nl * self.deltat
1605 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1607 self._update_ids()
1609 def transfer(self,
1610 tfade=0.,
1611 freqlimits=None,
1612 transfer_function=None,
1613 cut_off_fading=True,
1614 demean=True,
1615 invert=False):
1617 '''
1618 Return new trace with transfer function applied (convolution).
1620 :param tfade: rise/fall time in seconds of taper applied in timedomain
1621 at both ends of trace.
1622 :param freqlimits: 4-tuple with corner frequencies in Hz.
1623 :param transfer_function: FrequencyResponse object; must provide a
1624 method 'evaluate(freqs)', which returns the transfer function
1625 coefficients at the frequencies 'freqs'.
1626 :param cut_off_fading: whether to cut off rise/fall interval in output
1627 trace.
1628 :param demean: remove mean before applying transfer function
1629 :param invert: set to True to do a deconvolution
1630 '''
1632 if transfer_function is None:
1633 transfer_function = g_one_response
1635 if self.tmax - self.tmin <= tfade*2.:
1636 raise TraceTooShort(
1637 'Trace %s.%s.%s.%s too short for fading length setting. '
1638 'trace length = %g, fading length = %g'
1639 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1641 if freqlimits is None and (
1642 transfer_function is None or transfer_function.is_scalar()):
1644 # special case for flat responses
1646 output = self.copy()
1647 data = self.ydata
1648 ndata = data.size
1650 if transfer_function is not None:
1651 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1653 if invert:
1654 c = 1.0/c
1656 data *= c
1658 if tfade != 0.0:
1659 data *= costaper(
1660 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1661 ndata, self.deltat)
1663 output.ydata = data
1665 else:
1666 ndata = self.ydata.size
1667 ntrans = nextpow2(ndata*1.2)
1668 coeffs = self._get_tapered_coeffs(
1669 ntrans, freqlimits, transfer_function, invert=invert,
1670 demean=demean)
1672 data = self.ydata
1674 data_pad = num.zeros(ntrans, dtype=float)
1675 data_pad[:ndata] = data
1676 if demean:
1677 data_pad[:ndata] -= data.mean()
1679 if tfade != 0.0:
1680 data_pad[:ndata] *= costaper(
1681 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1682 ndata, self.deltat)
1684 fdata = num.fft.rfft(data_pad)
1685 fdata *= coeffs
1686 ddata = num.fft.irfft(fdata)
1687 output = self.copy()
1688 output.ydata = ddata[:ndata]
1690 if cut_off_fading and tfade != 0.0:
1691 try:
1692 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1693 except NoData:
1694 raise TraceTooShort(
1695 'Trace %s.%s.%s.%s too short for fading length setting. '
1696 'trace length = %g, fading length = %g'
1697 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1698 else:
1699 output.ydata = output.ydata.copy()
1701 return output
1703 def differentiate(self, n=1, order=4, inplace=True):
1704 '''
1705 Approximate first or second derivative of the trace.
1707 :param n: 1 for first derivative, 2 for second
1708 :param order: order of the approximation 2 and 4 are supported
1709 :param inplace: if ``True`` the trace is differentiated in place,
1710 otherwise a new trace object with the derivative is returned.
1712 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1714 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1715 '''
1717 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1719 if inplace:
1720 self.ydata = ddata
1721 else:
1722 output = self.copy(data=False)
1723 output.set_ydata(ddata)
1724 return output
1726 def drop_chain_cache(self):
1727 if self._pchain:
1728 self._pchain.clear()
1730 def init_chain(self):
1731 self._pchain = pchain.Chain(
1732 do_downsample,
1733 do_extend,
1734 do_pre_taper,
1735 do_fft,
1736 do_filter,
1737 do_ifft)
1739 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1740 if setup.domain == 'frequency_domain':
1741 _, _, data = self._pchain(
1742 (self, deltat),
1743 (tmin, tmax),
1744 (setup.taper,),
1745 (setup.filter,),
1746 (setup.filter,),
1747 nocache=nocache)
1749 return num.abs(data), num.abs(data)
1751 else:
1752 processed = self._pchain(
1753 (self, deltat),
1754 (tmin, tmax),
1755 (setup.taper,),
1756 (setup.filter,),
1757 (setup.filter,),
1758 (),
1759 nocache=nocache)
1761 if setup.domain == 'time_domain':
1762 data = processed.get_ydata()
1764 elif setup.domain == 'envelope':
1765 processed = processed.envelope(inplace=False)
1767 elif setup.domain == 'absolute':
1768 processed.set_ydata(num.abs(processed.get_ydata()))
1770 return processed.get_ydata(), processed
1772 def misfit(self, candidate, setup, nocache=False, debug=False):
1773 '''
1774 Calculate misfit and normalization factor against candidate trace.
1776 :param candidate: :py:class:`Trace` object
1777 :param setup: :py:class:`MisfitSetup` object
1778 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1779 normalization divisor
1781 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1782 with the higher sampling rate will be downsampled.
1783 '''
1785 a = self
1786 b = candidate
1788 for tr in (a, b):
1789 if not tr._pchain:
1790 tr.init_chain()
1792 deltat = max(a.deltat, b.deltat)
1793 tmin = min(a.tmin, b.tmin) - deltat
1794 tmax = max(a.tmax, b.tmax) + deltat
1796 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1797 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1799 if setup.domain != 'cc_max_norm':
1800 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1801 else:
1802 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1803 ccmax = ctr.max()[1]
1804 m = 0.5 - 0.5 * ccmax
1805 n = 0.5
1807 if debug:
1808 return m, n, aproc, bproc
1809 else:
1810 return m, n
1812 def spectrum(self, pad_to_pow2=False, tfade=None, ntrans_min=None):
1813 '''
1814 Get FFT spectrum of trace.
1816 :param pad_to_pow2: whether to zero-pad the data to next larger
1817 power-of-two length
1818 :param tfade: ``None`` or a time length in seconds, to apply cosine
1819 shaped tapers to both
1821 :returns: a tuple with (frequencies, values)
1822 '''
1824 if ntrans_min is None:
1825 ndata = self.ydata.size
1826 else:
1827 ndata = ntrans_min
1829 if pad_to_pow2:
1830 ntrans = nextpow2(ndata)
1831 else:
1832 ntrans = ndata
1834 if tfade is None:
1835 ydata = self.ydata
1836 else:
1837 ydata = self.ydata * costaper(
1838 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1839 ndata, self.deltat)
1841 fydata = num.fft.rfft(ydata, ntrans)
1842 df = 1./(ntrans*self.deltat)
1843 fxdata = num.arange(len(fydata))*df
1844 return fxdata, fydata
1846 def multi_filter(self, filter_freqs, bandwidth):
1848 class Gauss(FrequencyResponse):
1849 f0 = Float.T()
1850 a = Float.T(default=1.0)
1852 def __init__(self, f0, a=1.0, **kwargs):
1853 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1855 def evaluate(self, freqs):
1856 omega0 = 2.*math.pi*self.f0
1857 omega = 2.*math.pi*freqs
1858 return num.exp(-((omega-omega0)
1859 / (self.a*omega0))**2)
1861 freqs, coeffs = self.spectrum()
1862 n = self.data_len()
1863 nfilt = len(filter_freqs)
1864 signal_tf = num.zeros((nfilt, n))
1865 centroid_freqs = num.zeros(nfilt)
1866 for ifilt, f0 in enumerate(filter_freqs):
1867 taper = Gauss(f0, a=bandwidth)
1868 weights = taper.evaluate(freqs)
1869 nhalf = freqs.size
1870 analytic_spec = num.zeros(n, dtype=complex)
1871 analytic_spec[:nhalf] = coeffs*weights
1873 enorm = num.abs(analytic_spec[:nhalf])**2
1874 enorm /= num.sum(enorm)
1876 if n % 2 == 0:
1877 analytic_spec[1:nhalf-1] *= 2.
1878 else:
1879 analytic_spec[1:nhalf] *= 2.
1881 analytic = num.fft.ifft(analytic_spec)
1882 signal_tf[ifilt, :] = num.abs(analytic)
1884 enorm = num.abs(analytic_spec[:nhalf])**2
1885 enorm /= num.sum(enorm)
1886 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1888 return centroid_freqs, signal_tf
1890 def _get_tapered_coeffs(
1891 self, ntrans, freqlimits, transfer_function, invert=False,
1892 demean=True):
1894 cache_key = (
1895 ntrans, self.deltat, freqlimits, transfer_function.uuid, invert,
1896 demean)
1898 if cache_key in g_tapered_coeffs_cache:
1899 return g_tapered_coeffs_cache[cache_key]
1901 deltaf = 1./(self.deltat*ntrans)
1902 nfreqs = ntrans//2 + 1
1903 transfer = num.ones(nfreqs, dtype=complex)
1904 hi = snapper(nfreqs, deltaf)
1905 if freqlimits is not None:
1906 kmin, kmax = hi(freqlimits[0]), hi(freqlimits[3])
1907 freqs = num.arange(kmin, kmax)*deltaf
1908 coeffs = transfer_function.evaluate(freqs)
1909 if invert:
1910 if num.any(coeffs == 0.0):
1911 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1913 transfer[kmin:kmax] = 1.0 / coeffs
1914 else:
1915 transfer[kmin:kmax] = coeffs
1917 tapered_transfer = costaper(*freqlimits, nfreqs, deltaf) * transfer
1918 else:
1919 if invert:
1920 raise Exception(
1921 'transfer: `freqlimits` must be given when `invert` is '
1922 'set to `True`')
1924 freqs = num.arange(nfreqs) * deltaf
1925 tapered_transfer = transfer_function.evaluate(freqs)
1927 g_tapered_coeffs_cache[cache_key] = tapered_transfer
1929 if demean:
1930 tapered_transfer[0] = 0.0 # don't introduce static offsets
1932 return tapered_transfer
1934 def fill_template(self, template, **additional):
1935 '''
1936 Fill string template with trace metadata.
1938 Uses normal python '%(placeholder)s' string templates. The following
1939 placeholders are considered: ``network``, ``station``, ``location``,
1940 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1941 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1942 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1943 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1944 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1945 microseconds, those with ``'_year'`` contain only the year.
1946 '''
1948 template = template.replace('%n', '%(network)s')\
1949 .replace('%s', '%(station)s')\
1950 .replace('%l', '%(location)s')\
1951 .replace('%c', '%(channel)s')\
1952 .replace('%b', '%(tmin)s')\
1953 .replace('%e', '%(tmax)s')\
1954 .replace('%j', '%(julianday)s')
1956 params = dict(
1957 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1958 params['tmin'] = util.time_to_str(
1959 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1960 params['tmax'] = util.time_to_str(
1961 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1962 params['tmin_ms'] = util.time_to_str(
1963 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1964 params['tmax_ms'] = util.time_to_str(
1965 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1966 params['tmin_us'] = util.time_to_str(
1967 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1968 params['tmax_us'] = util.time_to_str(
1969 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1970 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1971 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1972 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1973 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1974 params['julianday'] = util.julian_day_of_year(self.tmin)
1975 params.update(additional)
1976 return template % params
1978 def plot(self):
1979 '''
1980 Show trace with matplotlib.
1982 See also: :py:meth:`Trace.snuffle`.
1983 '''
1985 import pylab
1986 pylab.plot(self.get_xdata(), self.get_ydata())
1987 name = '%s %s %s - %s' % (
1988 self.channel,
1989 self.station,
1990 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmin)),
1991 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmax)))
1993 pylab.title(name)
1994 pylab.show()
1996 def snuffle(self, **kwargs):
1997 '''
1998 Show trace in a snuffler window.
2000 :param stations: list of :py:class:`pyrocko.model.station.Station`
2001 objects or ``None``
2002 :param events: list of :py:class:`pyrocko.model.event.Event` objects or
2003 ``None``
2004 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker`
2005 objects or ``None``
2006 :param ntracks: float, number of tracks to be shown initially (default:
2007 12)
2008 :param follow: time interval (in seconds) for real time follow mode or
2009 ``None``
2010 :param controls: bool, whether to show the main controls (default:
2011 ``True``)
2012 :param opengl: bool, whether to use opengl (default: ``False``)
2013 '''
2015 return snuffle([self], **kwargs)
2018def snuffle(traces, **kwargs):
2019 '''
2020 Show traces in a snuffler window.
2022 :param stations: list of :py:class:`pyrocko.model.station.Station` objects
2023 or ``None``
2024 :param events: list of :py:class:`pyrocko.model.event.Event` objects or
2025 ``None``
2026 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker`
2027 objects or ``None``
2028 :param ntracks: int, number of tracks to be shown initially (default: 12)
2029 :param follow: time interval (in seconds) for real time follow mode or
2030 ``None``
2031 :param controls: bool, whether to show the main controls (default:
2032 ``True``)
2033 :param opengl: bool, whether to use opengl (default: ``False``)
2034 '''
2036 from pyrocko import pile
2037 from pyrocko.gui.snuffler import snuffler
2038 p = pile.Pile()
2039 if traces:
2040 trf = pile.MemTracesFile(None, traces)
2041 p.add_file(trf)
2042 return snuffler.snuffle(p, **kwargs)
2045def downsample_tpad(
2046 deltat_in, deltat_out, allow_upsample_max=1, ftype='fir-remez'):
2047 '''
2048 Get approximate amount of cutoff which will be produced by downsampling.
2050 The :py:meth:`Trace.downsample_to` method removes some samples at the
2051 beginning and end of the trace which is downsampled. This function
2052 estimates the approximate length [s] which will be cut off for a given set
2053 of parameters supplied to :py:meth:`Trace.downsample_to`.
2055 :param deltat_in:
2056 Input sampling interval [s].
2057 :type deltat_in:
2058 float
2060 :param deltat_out:
2061 Output samling interval [s].
2062 :type deltat_out:
2063 float
2065 :returns:
2066 Approximate length [s] which will be cut off.
2068 See :py:meth:`Trace.downsample_to` for details.
2069 '''
2071 upsratio, deci_seq = _configure_downsampling(
2072 deltat_in, deltat_out, allow_upsample_max)
2074 tpad = 0.0
2075 deltat = deltat_in / upsratio
2076 for deci in deci_seq:
2077 b, a, n = util.decimate_coeffs(deci, None, ftype)
2078 # n//2 for the antialiasing
2079 # +deci for possible snap to multiples
2080 # +1 for rounding errors
2081 tpad += (n//2 + deci + 1) * deltat
2082 deltat = deltat * deci
2084 return tpad
2087def _configure_downsampling(deltat_in, deltat_out, allow_upsample_max):
2088 for upsratio in range(1, allow_upsample_max+1):
2089 dratio = (upsratio/deltat_in) / (1./deltat_out)
2090 deci_seq = util.decitab(int(round(dratio)))
2091 if abs(dratio - round(dratio)) / dratio < 0.0001 and deci_seq:
2092 return upsratio, [deci for deci in deci_seq if deci != 1]
2094 raise util.UnavailableDecimation('ratio = %g' % (deltat_out / deltat_in))
2097def _all_same(xs):
2098 return all(x == xs[0] for x in xs)
2101def _incompatibilities(traces):
2102 if not traces:
2103 return None
2105 params = [
2106 (tr.ydata.size, tr.ydata.dtype, tr.deltat, tr.tmin)
2107 for tr in traces]
2109 if not _all_same(params):
2110 return params
2111 else:
2112 return None
2115def _raise_incompatible_traces(params):
2116 raise IncompatibleTraces(
2117 'Given traces are incompatible. Sampling rate, start time, '
2118 'number of samples and data type must match.\n%s\n%s' % (
2119 ' %10s %-10s %12s %22s' % (
2120 'nsamples', 'dtype', 'deltat', 'tmin'),
2121 '\n'.join(
2122 ' %10i %-10s %12.5e %22s' % (
2123 nsamples, dtype, deltat, util.time_to_str(tmin))
2124 for (nsamples, dtype, deltat, tmin) in params)))
2127def _ensure_compatible(traces):
2128 params = _incompatibilities(traces)
2129 if params:
2130 _raise_incompatible_traces(params)
2133def _almost_equal(a, b, atol):
2134 return abs(a-b) < atol
2137def get_traces_data_as_array(traces):
2138 '''
2139 Merge data samples from multiple traces into a 2D array.
2141 :param traces:
2142 Input waveforms.
2143 :type traces:
2144 list of :py:class:`pyrocko.Trace <pyrocko.trace.Trace>` objects
2146 :raises:
2147 :py:class:`IncompatibleTraces` if traces have different time
2148 span, sample rate or data type, or if traces is an empty list.
2150 :returns:
2151 2D array as ``data[itrace, isample]``.
2152 :rtype:
2153 :py:class:`numpy.ndarray`
2154 '''
2156 if not traces:
2157 raise IncompatibleTraces('Need at least one trace.')
2159 _ensure_compatible(traces)
2161 return num.vstack([tr.ydata for tr in traces])
2164def make_traces_compatible(
2165 traces,
2166 dtype=None,
2167 deltat=None,
2168 enforce_global_snap=True,
2169 warn_snap=False):
2171 '''
2172 Homogenize sampling rate, time span, sampling instants, and data type.
2174 This function takes a group of traces and tries to make them compatible in
2175 terms of data type and sampling rate, time span, and sampling instants of
2176 time.
2178 If necessary, traces are (in order):
2180 - casted to the same data type.
2181 - downsampled to a common sampling rate, using decimation cascades.
2182 - resampled to common sampling instants in time, using Sinc interpolation.
2183 - cut to the same time span. The longest time span covered by all traces is
2184 used.
2186 :param traces:
2187 Input waveforms.
2188 :type traces:
2189 :py:class:`list` of :py:class:`Trace`
2191 :param dtype:
2192 Force traces to be casted to the given data type. If not specified, the
2193 traces are cast to :py:class:`float`.
2194 :type dtype:
2195 :py:class:`numpy.dtype`
2197 :param deltat:
2198 Sampling interval [s]. If not specified, the longest sampling interval
2199 among the input traces is chosen.
2200 :type deltat:
2201 float
2203 :param enforce_global_snap:
2204 If ``True``, choose sampling instants to be even multiples of the
2205 sampling rate in system time. When set to ``False`` traces are still
2206 resampled to common time instants (if necessary), but they may be
2207 offset to the system time sampling rate multiples.
2208 :type enforce_global_snap:
2209 bool
2211 :param warn_snap:
2212 If set to ``True`` warn, when resampling has to be performed.
2213 :type warn_snap:
2214 bool
2215 '''
2217 eps_snap = 1e-3
2219 if not traces:
2220 return []
2222 traces = list(traces)
2224 dtypes = [tr.ydata.dtype for tr in traces]
2225 if not _all_same(dtypes) or dtype is not None:
2227 if dtype is None:
2228 dtype = float
2229 logger.warning(
2230 'make_traces_compatible: Inconsistent data types - converting '
2231 'sample datatype to %s.' % str(dtype))
2233 for itr, tr in enumerate(traces):
2234 tr_copy = tr.copy(data=False)
2235 tr_copy.set_ydata(tr.ydata.astype(dtype))
2236 traces[itr] = tr_copy
2238 deltats = [tr.deltat for tr in traces]
2239 if not _all_same(deltats) or deltat is not None:
2240 if deltat is None:
2241 deltat = max(deltats)
2242 logger.warning(
2243 'make_traces_compatible: Inconsistent sampling rates - '
2244 'downsampling to lowest rate among input traces: %g Hz.'
2245 % (1.0 / deltat))
2247 for itr, tr in enumerate(traces):
2248 if tr.deltat != deltat:
2249 tr_copy = tr.copy()
2250 tr_copy.downsample_to(deltat, snap=True, cut=True)
2251 traces[itr] = tr_copy
2253 tmins = num.array([tr.tmin for tr in traces])
2254 is_aligned = num.abs(num.round(tmins / deltat) * deltat - tmins) \
2255 > deltat * eps_snap
2257 if enforce_global_snap or any(is_aligned):
2258 tref = util.to_time_float(0.0)
2259 else:
2260 # to keep a common subsample shift
2261 tref = num.max(tmins)
2263 tmins_snap = num.round((tmins - tref) / deltat) * deltat + tref
2264 need_snap = num.abs(tmins_snap - tmins) > deltat * eps_snap
2265 if num.any(need_snap):
2266 if warn_snap:
2267 logger.warning(
2268 'make_traces_compatible: Misaligned sampling - introducing '
2269 'subsample shifts for proper alignment.')
2271 for itr, tr in enumerate(traces):
2272 if need_snap[itr]:
2273 tr_copy = tr.copy()
2274 if tref != 0.0:
2275 tr_copy.shift(-tref)
2277 tr_copy.snap(interpolate=True)
2278 if tref != 0.0:
2279 tr_copy.shift(tref)
2281 traces[itr] = tr_copy
2283 tmins = num.array([tr.tmin for tr in traces])
2284 nsamples = num.array([tr.ydata.size for tr in traces])
2285 tmaxs = tmins + (nsamples - 1) * deltat
2287 tmin = num.max(tmins)
2288 tmax = num.min(tmaxs)
2290 if tmin > tmax:
2291 raise IncompatibleTraces('Traces do not overlap.')
2293 nsamples_must = int(round((tmax - tmin) / deltat)) + 1
2294 for itr, tr in enumerate(traces):
2295 if not (_almost_equal(tr.tmin, tmin, deltat*eps_snap)
2296 and _almost_equal(tr.tmax, tmax, deltat*eps_snap)):
2298 traces[itr] = tr.chop(
2299 tmin, tmax,
2300 inplace=False,
2301 want_incomplete=False,
2302 include_last=True)
2304 xtr = traces[itr]
2305 assert _almost_equal(xtr.tmin, tmin, deltat*eps_snap)
2306 assert int(round((xtr.tmax - xtr.tmin) / deltat)) + 1 == nsamples_must
2307 xtr.tmin = tmin
2308 xtr.tmax = tmax
2309 xtr.deltat = deltat
2310 xtr._update_ids()
2312 return traces
2315class IncompatibleTraces(Exception):
2316 '''
2317 Raised when traces have incompatible sampling rate, time span or data type.
2318 '''
2321class InfiniteResponse(Exception):
2322 '''
2323 This exception is raised by :py:class:`Trace` operations when deconvolution
2324 of a frequency response (instrument response transfer function) would
2325 result in a division by zero.
2326 '''
2329class MisalignedTraces(Exception):
2330 '''
2331 This exception is raised by some :py:class:`Trace` operations when tmin,
2332 tmax or number of samples do not match.
2333 '''
2335 pass
2338class NoData(Exception):
2339 '''
2340 This exception is raised by some :py:class:`Trace` operations when no or
2341 not enough data is available.
2342 '''
2344 pass
2347class AboveNyquist(Exception):
2348 '''
2349 This exception is raised by some :py:class:`Trace` operations when given
2350 frequencies are above the Nyquist frequency.
2351 '''
2353 pass
2356class TraceTooShort(Exception):
2357 '''
2358 This exception is raised by some :py:class:`Trace` operations when the
2359 trace is too short.
2360 '''
2362 pass
2365class ResamplingFailed(Exception):
2366 pass
2369def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2371 '''
2372 Get data range given traces grouped by selected pattern.
2374 :param key: a callable which takes as single argument a trace and returns a
2375 key for the grouping of the results. If this is ``None``, the default,
2376 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2377 used.
2378 :param mode: ``'minmax'`` or floating point number. If this is
2379 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2380 number, mean +- standard deviation times ``mode`` is used.
2382 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2383 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2384 extreme values on either end.
2386 :returns: a dict with the combined data ranges.
2388 Examples::
2390 ranges = minmax(traces, lambda tr: tr.channel)
2391 print ranges['N'] # print min & max of all traces with channel == 'N'
2392 print ranges['E'] # print min & max of all traces with channel == 'E'
2394 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2395 print ranges['GR', 'HAM3'] # print min & max of all traces with
2396 # network == 'GR' and station == 'HAM3'
2398 ranges = minmax(traces, lambda tr: None)
2399 print ranges[None] # prints min & max of all traces
2400 '''
2402 if key is None:
2403 key = _default_key
2405 ranges = defaultdict(list)
2406 for trace in traces:
2407 if isinstance(mode, str) and mode == 'minmax':
2408 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2409 else:
2410 mean = trace.ydata.mean()
2411 std = trace.ydata.std()
2412 mi, ma = mean-std*mode, mean+std*mode
2414 k = key(trace)
2415 ranges[k].append((mi, ma))
2417 for k in ranges:
2418 mins, maxs = num.array(ranges[k]).T
2419 if outer_mode == 'minmax':
2420 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2421 elif outer_mode == 'robust':
2422 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2424 return ranges
2427def minmaxtime(traces, key=None):
2429 '''
2430 Get time range given traces grouped by selected pattern.
2432 :param key: a callable which takes as single argument a trace and returns a
2433 key for the grouping of the results. If this is ``None``, the default,
2434 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2435 used.
2437 :returns: a dict with the combined data ranges.
2438 '''
2440 if key is None:
2441 key = _default_key
2443 ranges = {}
2444 for trace in traces:
2445 mi, ma = trace.tmin, trace.tmax
2446 k = key(trace)
2447 if k not in ranges:
2448 ranges[k] = mi, ma
2449 else:
2450 tmi, tma = ranges[k]
2451 ranges[k] = min(tmi, mi), max(tma, ma)
2453 return ranges
2456def degapper(
2457 traces,
2458 maxgap=5,
2459 fillmethod='interpolate',
2460 deoverlap='use_second',
2461 maxlap=None):
2463 '''
2464 Try to connect traces and remove gaps.
2466 This method will combine adjacent traces, which match in their network,
2467 station, location and channel attributes. Overlapping parts are handled
2468 according to the ``deoverlap`` argument.
2470 :param traces: input traces, must be sorted by their full_id attribute.
2471 :param maxgap: maximum number of samples to interpolate.
2472 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2473 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2474 second trace (default), 'use_first' to use data from first trace,
2475 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2476 values.
2477 :param maxlap: maximum number of samples of overlap which are removed
2479 :returns: list of traces
2480 '''
2482 in_traces = traces
2483 out_traces = []
2484 if not in_traces:
2485 return out_traces
2486 out_traces.append(in_traces.pop(0))
2487 while in_traces:
2489 a = out_traces[-1]
2490 b = in_traces.pop(0)
2492 avirt, bvirt = a.ydata is None, b.ydata is None
2493 assert avirt == bvirt, \
2494 'traces given to degapper() must either all have data or have ' \
2495 'no data.'
2497 virtual = avirt and bvirt
2499 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2500 and a.data_len() >= 1 and b.data_len() >= 1
2501 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2503 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2504 idist = int(round(dist))
2505 if abs(dist - idist) > 0.05 and idist <= maxgap:
2506 # logger.warning('Cannot degap traces with displaced sampling '
2507 # '(%s, %s, %s, %s)' % a.nslc_id)
2508 pass
2509 else:
2510 if 1 < idist <= maxgap:
2511 if not virtual:
2512 if fillmethod == 'interpolate':
2513 filler = a.ydata[-1] + (
2514 ((1.0 + num.arange(idist-1, dtype=float))
2515 / idist) * (b.ydata[0]-a.ydata[-1])
2516 ).astype(a.ydata.dtype)
2517 elif fillmethod == 'zeros':
2518 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2519 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2520 a.tmax = b.tmax
2521 if a.mtime and b.mtime:
2522 a.mtime = max(a.mtime, b.mtime)
2523 continue
2525 elif idist == 1:
2526 if not virtual:
2527 a.ydata = num.concatenate((a.ydata, b.ydata))
2528 a.tmax = b.tmax
2529 if a.mtime and b.mtime:
2530 a.mtime = max(a.mtime, b.mtime)
2531 continue
2533 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2534 if b.tmax > a.tmax:
2535 if not virtual:
2536 na = a.ydata.size
2537 n = -idist+1
2538 if deoverlap == 'use_second':
2539 a.ydata = num.concatenate(
2540 (a.ydata[:-n], b.ydata))
2541 elif deoverlap in ('use_first', 'crossfade_cos'):
2542 a.ydata = num.concatenate(
2543 (a.ydata, b.ydata[n:]))
2544 elif deoverlap == 'add':
2545 a.ydata[-n:] += b.ydata[:n]
2546 a.ydata = num.concatenate(
2547 (a.ydata, b.ydata[n:]))
2548 else:
2549 assert False, 'unknown deoverlap method'
2551 if deoverlap == 'crossfade_cos':
2552 n = -idist+1
2553 taper = 0.5-0.5*num.cos(
2554 (1.+num.arange(n))/(1.+n)*num.pi)
2555 a.ydata[na-n:na] *= 1.-taper
2556 a.ydata[na-n:na] += b.ydata[:n] * taper
2558 a.tmax = b.tmax
2559 if a.mtime and b.mtime:
2560 a.mtime = max(a.mtime, b.mtime)
2561 continue
2562 else:
2563 # make short second trace vanish
2564 continue
2566 if b.data_len() >= 1:
2567 out_traces.append(b)
2569 for tr in out_traces:
2570 tr._update_ids()
2572 return out_traces
2575def rotate(traces, azimuth, in_channels, out_channels):
2576 '''
2577 2D rotation of traces.
2579 :param traces: list of input traces
2580 :param azimuth: difference of the azimuths of the component directions
2581 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2582 :param in_channels: names of the input channels (e.g. 'N', 'E')
2583 :param out_channels: names of the output channels (e.g. 'R', 'T')
2584 :returns: list of rotated traces
2585 '''
2587 phi = azimuth/180.*math.pi
2588 cphi = math.cos(phi)
2589 sphi = math.sin(phi)
2590 rotated = []
2591 in_channels = tuple(_channels_to_names(in_channels))
2592 out_channels = tuple(_channels_to_names(out_channels))
2593 for a in traces:
2594 for b in traces:
2595 if ((a.channel, b.channel) == in_channels and
2596 a.nslc_id[:3] == b.nslc_id[:3] and
2597 abs(a.deltat-b.deltat) < a.deltat*0.001):
2598 tmin = max(a.tmin, b.tmin)
2599 tmax = min(a.tmax, b.tmax)
2601 if tmin < tmax:
2602 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2603 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2604 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2605 logger.warning(
2606 'Cannot rotate traces with displaced sampling '
2607 '(%s, %s, %s, %s)' % a.nslc_id)
2608 continue
2610 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2611 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2612 ac.set_ydata(acydata)
2613 bc.set_ydata(bcydata)
2615 ac.set_codes(channel=out_channels[0])
2616 bc.set_codes(channel=out_channels[1])
2617 rotated.append(ac)
2618 rotated.append(bc)
2620 return rotated
2623def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2624 '''
2625 Rotate traces from NE to RT system.
2627 :param n:
2628 North trace.
2629 :type n:
2630 :py:class:`~pyrocko.trace.Trace`
2632 :param e:
2633 East trace.
2634 :type e:
2635 :py:class:`~pyrocko.trace.Trace`
2637 :param source:
2638 Source of the recorded signal.
2639 :type source:
2640 :py:class:`pyrocko.gf.seismosizer.Source`
2642 :param receiver:
2643 Receiver of the recorded signal.
2644 :type receiver:
2645 :py:class:`pyrocko.model.location.Location`
2647 :param out_channels:
2648 Channel codes of the output channels (radial, transversal).
2649 Default is ('R', 'T').
2651 :type out_channels
2652 optional, tuple[str, str]
2654 :returns:
2655 Rotated traces (radial, transversal).
2656 :rtype:
2657 tuple[
2658 :py:class:`~pyrocko.trace.Trace`,
2659 :py:class:`~pyrocko.trace.Trace`]
2660 '''
2661 azimuth = orthodrome.azimuth(receiver, source) + 180.
2662 in_channels = n.channel, e.channel
2663 out = rotate(
2664 [n, e], azimuth,
2665 in_channels=in_channels,
2666 out_channels=out_channels)
2668 assert len(out) == 2
2669 for tr in out:
2670 if tr.channel == out_channels[0]:
2671 r = tr
2672 elif tr.channel == out_channels[1]:
2673 t = tr
2674 else:
2675 assert False
2677 return r, t
2680def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2681 out_channels=('L', 'Q', 'T')):
2682 '''
2683 Rotate traces from ZNE to LQT system.
2685 :param traces: list of traces in arbitrary order
2686 :param backazimuth: backazimuth in degrees clockwise from north
2687 :param incidence: incidence angle in degrees from vertical
2688 :param in_channels: input channel names
2689 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2690 :returns: list of transformed traces
2691 '''
2692 i = incidence/180.*num.pi
2693 b = backazimuth/180.*num.pi
2695 ci = num.cos(i)
2696 cb = num.cos(b)
2697 si = num.sin(i)
2698 sb = num.sin(b)
2700 rotmat = num.array(
2701 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2702 return project(traces, rotmat, in_channels, out_channels)
2705def _decompose(a):
2706 '''
2707 Decompose matrix into independent submatrices.
2708 '''
2710 def depends(iout, a):
2711 row = a[iout, :]
2712 return set(num.arange(row.size).compress(row != 0.0))
2714 def provides(iin, a):
2715 col = a[:, iin]
2716 return set(num.arange(col.size).compress(col != 0.0))
2718 a = num.asarray(a)
2719 outs = set(range(a.shape[0]))
2720 systems = []
2721 while outs:
2722 iout = outs.pop()
2724 gout = set()
2725 for iin in depends(iout, a):
2726 gout.update(provides(iin, a))
2728 if not gout:
2729 continue
2731 gin = set()
2732 for iout2 in gout:
2733 gin.update(depends(iout2, a))
2735 if not gin:
2736 continue
2738 for iout2 in gout:
2739 if iout2 in outs:
2740 outs.remove(iout2)
2742 gin = list(gin)
2743 gin.sort()
2744 gout = list(gout)
2745 gout.sort()
2747 systems.append((gin, gout, a[gout, :][:, gin]))
2749 return systems
2752def _channels_to_names(channels):
2753 from pyrocko import squirrel
2754 names = []
2755 for ch in channels:
2756 if isinstance(ch, model.Channel):
2757 names.append(ch.name)
2758 elif isinstance(ch, squirrel.Channel):
2759 names.append(ch.codes.channel)
2760 else:
2761 names.append(ch)
2763 return names
2766def project(traces, matrix, in_channels, out_channels):
2767 '''
2768 Affine transform of three-component traces.
2770 Compute matrix-vector product of three-component traces, to e.g. rotate
2771 traces into a different basis. The traces are distinguished and ordered by
2772 their channel attribute. The tranform is applied to overlapping parts of
2773 any appropriate combinations of the input traces. This should allow this
2774 function to be robust with data gaps. It also tries to apply the
2775 tranformation to subsets of the channels, if this is possible, so that, if
2776 for example a vertical compontent is missing, horizontal components can
2777 still be rotated.
2779 :param traces: list of traces in arbitrary order
2780 :param matrix: tranformation matrix
2781 :param in_channels: input channel names
2782 :param out_channels: output channel names
2783 :returns: list of transformed traces
2784 '''
2786 in_channels = tuple(_channels_to_names(in_channels))
2787 out_channels = tuple(_channels_to_names(out_channels))
2788 systems = _decompose(matrix)
2790 # fallback to full matrix if some are not quadratic
2791 for iins, iouts, submatrix in systems:
2792 if submatrix.shape[0] != submatrix.shape[1]:
2793 if len(in_channels) != 3 or len(out_channels) != 3:
2794 return []
2795 else:
2796 return _project3(traces, matrix, in_channels, out_channels)
2798 projected = []
2799 for iins, iouts, submatrix in systems:
2800 in_cha = tuple([in_channels[iin] for iin in iins])
2801 out_cha = tuple([out_channels[iout] for iout in iouts])
2802 if submatrix.shape[0] == 1:
2803 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2804 elif submatrix.shape[1] == 2:
2805 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2806 else:
2807 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2809 return projected
2812def project_dependencies(matrix, in_channels, out_channels):
2813 '''
2814 Figure out what dependencies project() would produce.
2815 '''
2817 in_channels = tuple(_channels_to_names(in_channels))
2818 out_channels = tuple(_channels_to_names(out_channels))
2819 systems = _decompose(matrix)
2821 subpro = []
2822 for iins, iouts, submatrix in systems:
2823 if submatrix.shape[0] != submatrix.shape[1]:
2824 subpro.append((matrix, in_channels, out_channels))
2826 if not subpro:
2827 for iins, iouts, submatrix in systems:
2828 in_cha = tuple([in_channels[iin] for iin in iins])
2829 out_cha = tuple([out_channels[iout] for iout in iouts])
2830 subpro.append((submatrix, in_cha, out_cha))
2832 deps = {}
2833 for mat, in_cha, out_cha in subpro:
2834 for oc in out_cha:
2835 if oc not in deps:
2836 deps[oc] = []
2838 for ic in in_cha:
2839 deps[oc].append(ic)
2841 return deps
2844def _project1(traces, matrix, in_channels, out_channels):
2845 assert len(in_channels) == 1
2846 assert len(out_channels) == 1
2847 assert matrix.shape == (1, 1)
2849 projected = []
2850 for a in traces:
2851 if not (a.channel,) == in_channels:
2852 continue
2854 ac = a.copy()
2855 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2856 ac.set_codes(channel=out_channels[0])
2857 projected.append(ac)
2859 return projected
2862def _project2(traces, matrix, in_channels, out_channels):
2863 assert len(in_channels) == 2
2864 assert len(out_channels) == 2
2865 assert matrix.shape == (2, 2)
2866 projected = []
2867 for a in traces:
2868 for b in traces:
2869 if not ((a.channel, b.channel) == in_channels and
2870 a.nslc_id[:3] == b.nslc_id[:3] and
2871 abs(a.deltat-b.deltat) < a.deltat*0.001):
2872 continue
2874 tmin = max(a.tmin, b.tmin)
2875 tmax = min(a.tmax, b.tmax)
2877 if tmin > tmax:
2878 continue
2880 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2881 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2882 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2883 logger.warning(
2884 'Cannot project traces with displaced sampling '
2885 '(%s, %s, %s, %s)' % a.nslc_id)
2886 continue
2888 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2889 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2891 ac.set_ydata(acydata)
2892 bc.set_ydata(bcydata)
2894 ac.set_codes(channel=out_channels[0])
2895 bc.set_codes(channel=out_channels[1])
2897 projected.append(ac)
2898 projected.append(bc)
2900 return projected
2903def _project3(traces, matrix, in_channels, out_channels):
2904 assert len(in_channels) == 3
2905 assert len(out_channels) == 3
2906 assert matrix.shape == (3, 3)
2907 projected = []
2908 for a in traces:
2909 for b in traces:
2910 for c in traces:
2911 if not ((a.channel, b.channel, c.channel) == in_channels
2912 and a.nslc_id[:3] == b.nslc_id[:3]
2913 and b.nslc_id[:3] == c.nslc_id[:3]
2914 and abs(a.deltat-b.deltat) < a.deltat*0.001
2915 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2917 continue
2919 tmin = max(a.tmin, b.tmin, c.tmin)
2920 tmax = min(a.tmax, b.tmax, c.tmax)
2922 if tmin >= tmax:
2923 continue
2925 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2926 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2927 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2928 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2929 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2931 logger.warning(
2932 'Cannot project traces with displaced sampling '
2933 '(%s, %s, %s, %s)' % a.nslc_id)
2934 continue
2936 acydata = num.dot(
2937 matrix[0],
2938 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2939 bcydata = num.dot(
2940 matrix[1],
2941 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2942 ccydata = num.dot(
2943 matrix[2],
2944 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2946 ac.set_ydata(acydata)
2947 bc.set_ydata(bcydata)
2948 cc.set_ydata(ccydata)
2950 ac.set_codes(channel=out_channels[0])
2951 bc.set_codes(channel=out_channels[1])
2952 cc.set_codes(channel=out_channels[2])
2954 projected.append(ac)
2955 projected.append(bc)
2956 projected.append(cc)
2958 return projected
2961def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2962 '''
2963 Cross correlation of two traces.
2965 :param a,b: input traces
2966 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2967 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2968 :param use_fft: bool, whether to do cross correlation in spectral domain
2970 :returns: trace containing cross correlation coefficients
2972 This function computes the cross correlation between two traces. It
2973 evaluates the discrete equivalent of
2975 .. math::
2977 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2979 where the star denotes complex conjugate. Note, that the arguments here are
2980 swapped when compared with the :py:func:`numpy.correlate` function,
2981 which is internally called. This function should be safe even with older
2982 versions of NumPy, where the correlate function has some problems.
2984 A trace containing the cross correlation coefficients is returned. The time
2985 information of the output trace is set so that the returned cross
2986 correlation can be viewed directly as a function of time lag.
2988 Example::
2990 # align two traces a and b containing a time shifted similar signal:
2991 c = pyrocko.trace.correlate(a,b)
2992 t, coef = c.max() # get time and value of maximum
2993 b.shift(-t) # align b with a
2995 '''
2997 assert_same_sampling_rate(a, b)
2999 ya, yb = a.ydata, b.ydata
3001 # need reversed order here:
3002 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
3003 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
3005 if normalization == 'normal':
3006 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
3007 yc = yc/normfac
3009 elif normalization == 'gliding':
3010 if mode != 'valid':
3011 assert False, 'gliding normalization currently only available ' \
3012 'with "valid" mode.'
3014 if ya.size < yb.size:
3015 yshort, ylong = ya, yb
3016 else:
3017 yshort, ylong = yb, ya
3019 epsilon = 0.00001
3020 normfac_short = num.sqrt(num.sum(yshort**2))
3021 normfac = normfac_short * num.sqrt(
3022 moving_sum(ylong**2, yshort.size, mode='valid')) \
3023 + normfac_short*epsilon
3025 if yb.size <= ya.size:
3026 normfac = normfac[::-1]
3028 yc /= normfac
3030 c = a.copy()
3031 c.set_ydata(yc)
3032 c.set_codes(*merge_codes(a, b, '~'))
3033 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
3035 return c
3038def deconvolve(
3039 a, b, waterlevel,
3040 tshift=0.,
3041 pad=0.5,
3042 fd_taper=None,
3043 pad_to_pow2=True):
3045 same_sampling_rate(a, b)
3046 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
3047 deltat = a.deltat
3048 npad = int(round(a.data_len()*pad + tshift / deltat))
3050 ndata = max(a.data_len(), b.data_len())
3051 ndata_pad = ndata + npad
3053 if pad_to_pow2:
3054 ntrans = nextpow2(ndata_pad)
3055 else:
3056 ntrans = ndata
3058 aspec = num.fft.rfft(a.ydata, ntrans)
3059 bspec = num.fft.rfft(b.ydata, ntrans)
3061 out = aspec * num.conj(bspec)
3063 bautocorr = bspec*num.conj(bspec)
3064 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
3066 out /= denom
3067 df = 1/(ntrans*deltat)
3069 if fd_taper is not None:
3070 fd_taper(out, 0.0, df)
3072 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
3073 c = a.copy(data=False)
3074 c.set_ydata(ydata[:ndata])
3075 c.set_codes(*merge_codes(a, b, '/'))
3076 return c
3079def assert_same_sampling_rate(a, b, eps=1.0e-6):
3080 assert same_sampling_rate(a, b, eps), \
3081 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
3084def same_sampling_rate(a, b, eps=1.0e-6):
3085 '''
3086 Check if two traces have the same sampling rate.
3088 :param a,b: input traces
3089 :param eps: relative tolerance
3090 '''
3092 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
3095def fix_deltat_rounding_errors(deltat):
3096 '''
3097 Try to undo sampling rate rounding errors.
3099 Fix rounding errors of sampling intervals when these are read from single
3100 precision floating point values.
3102 Assumes that the true sampling rate or sampling interval was an integer
3103 value. No correction will be applied if this would change the sampling
3104 rate by more than 0.001%.
3105 '''
3107 if deltat <= 1.0:
3108 deltat_new = 1.0 / round(1.0 / deltat)
3109 else:
3110 deltat_new = round(deltat)
3112 if abs(deltat_new - deltat) / deltat > 1e-5:
3113 deltat_new = deltat
3115 return deltat_new
3118def merge_codes(a, b, sep='-'):
3119 '''
3120 Merge network-station-location-channel codes of a pair of traces.
3121 '''
3123 o = []
3124 for xa, xb in zip(a.nslc_id, b.nslc_id):
3125 if xa == xb:
3126 o.append(xa)
3127 else:
3128 o.append(sep.join((xa, xb)))
3129 return o
3132class Taper(Object):
3133 '''
3134 Base class for tapers.
3136 Does nothing by default.
3137 '''
3139 def __call__(self, y, x0, dx):
3140 pass
3143class CosTaper(Taper):
3144 '''
3145 Cosine Taper.
3147 :param a: start of fading in
3148 :param b: end of fading in
3149 :param c: start of fading out
3150 :param d: end of fading out
3151 '''
3153 a = Float.T()
3154 b = Float.T()
3155 c = Float.T()
3156 d = Float.T()
3158 def __init__(self, a, b, c, d):
3159 Taper.__init__(self, a=a, b=b, c=c, d=d)
3161 def __call__(self, y, x0, dx):
3163 if y.dtype == num.dtype(float):
3164 _apply_costaper = signal_ext.apply_costaper
3165 else:
3166 _apply_costaper = apply_costaper
3168 _apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
3170 def span(self, y, x0, dx):
3171 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
3173 def time_span(self):
3174 return self.a, self.d
3177class CosFader(Taper):
3178 '''
3179 Cosine Fader.
3181 :param xfade: fade in and fade out time in seconds (optional)
3182 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
3184 Only one argument can be set. The other should to be ``None``.
3185 '''
3187 xfade = Float.T(optional=True)
3188 xfrac = Float.T(optional=True)
3190 def __init__(self, xfade=None, xfrac=None):
3191 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
3192 assert (xfade is None) != (xfrac is None)
3193 self._xfade = xfade
3194 self._xfrac = xfrac
3196 def __call__(self, y, x0, dx):
3198 xfade = self._xfade
3200 xlen = (y.size - 1)*dx
3201 if xfade is None:
3202 xfade = xlen * self._xfrac
3204 a = x0
3205 b = x0 + xfade
3206 c = x0 + xlen - xfade
3207 d = x0 + xlen
3209 apply_costaper(a, b, c, d, y, x0, dx)
3211 def span(self, y, x0, dx):
3212 return 0, y.size
3214 def time_span(self):
3215 return None, None
3218def none_min(li):
3219 if None in li:
3220 return None
3221 else:
3222 return min(x for x in li if x is not None)
3225def none_max(li):
3226 if None in li:
3227 return None
3228 else:
3229 return max(x for x in li if x is not None)
3232class MultiplyTaper(Taper):
3233 '''
3234 Multiplication of several tapers.
3235 '''
3237 tapers = List.T(Taper.T())
3239 def __init__(self, tapers=None):
3240 if tapers is None:
3241 tapers = []
3243 Taper.__init__(self, tapers=tapers)
3245 def __call__(self, y, x0, dx):
3246 for taper in self.tapers:
3247 taper(y, x0, dx)
3249 def span(self, y, x0, dx):
3250 spans = []
3251 for taper in self.tapers:
3252 spans.append(taper.span(y, x0, dx))
3254 mins, maxs = list(zip(*spans))
3255 return min(mins), max(maxs)
3257 def time_span(self):
3258 spans = []
3259 for taper in self.tapers:
3260 spans.append(taper.time_span())
3262 mins, maxs = list(zip(*spans))
3263 return none_min(mins), none_max(maxs)
3266class GaussTaper(Taper):
3267 '''
3268 Frequency domain Gaussian filter.
3269 '''
3271 alpha = Float.T()
3273 def __init__(self, alpha):
3274 Taper.__init__(self, alpha=alpha)
3275 self._alpha = alpha
3277 def __call__(self, y, x0, dx):
3278 f = x0 + num.arange(y.size)*dx
3279 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
3282cached_coefficients = {}
3285def _get_cached_filter_coeffs(order, corners, btype):
3286 ck = (order, tuple(corners), btype)
3287 if ck not in cached_coefficients:
3288 if len(corners) == 1:
3289 corners = corners[0]
3291 cached_coefficients[ck] = signal.butter(
3292 order, corners, btype=btype)
3294 return cached_coefficients[ck]
3297class _globals(object):
3298 _numpy_has_correlate_flip_bug = None
3301def _default_key(tr):
3302 return (tr.network, tr.station, tr.location, tr.channel)
3305def numpy_has_correlate_flip_bug():
3306 '''
3307 Check if NumPy's correlate function reveals old behaviour.
3308 '''
3310 if _globals._numpy_has_correlate_flip_bug is None:
3311 a = num.array([0, 0, 1, 0, 0, 0, 0])
3312 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
3313 ab = num.correlate(a, b, mode='same')
3314 ba = num.correlate(b, a, mode='same')
3315 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
3317 return _globals._numpy_has_correlate_flip_bug
3320def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
3321 '''
3322 Call :py:func:`numpy.correlate` with fixes.
3324 c[k] = sum_i a[i+k] * conj(b[i])
3326 Note that the result produced by newer numpy.correlate is always flipped
3327 with respect to the formula given in its documentation (if ascending k
3328 assumed for the output).
3329 '''
3331 if use_fft:
3332 if a.size < b.size:
3333 c = signal.fftconvolve(b[::-1], a, mode=mode)
3334 else:
3335 c = signal.fftconvolve(a, b[::-1], mode=mode)
3336 return c
3338 else:
3339 buggy = numpy_has_correlate_flip_bug()
3341 a = num.asarray(a)
3342 b = num.asarray(b)
3344 if buggy:
3345 b = num.conj(b)
3347 c = num.correlate(a, b, mode=mode)
3349 if buggy and a.size < b.size:
3350 return c[::-1]
3351 else:
3352 return c
3355def numpy_correlate_emulate(a, b, mode='valid'):
3356 '''
3357 Slow version of :py:func:`numpy.correlate` for comparison.
3358 '''
3360 a = num.asarray(a)
3361 b = num.asarray(b)
3362 kmin = -(b.size-1)
3363 klen = a.size-kmin
3364 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3365 kmin = int(kmin)
3366 kmax = int(kmax)
3367 klen = kmax - kmin + 1
3368 c = num.zeros(klen, dtype=num.promote_types(b.dtype, a.dtype))
3369 for k in range(kmin, kmin+klen):
3370 imin = max(0, -k)
3371 ilen = min(b.size, a.size-k) - imin
3372 c[k-kmin] = num.sum(
3373 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3375 return c
3378def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3379 '''
3380 Get range of lags for which :py:func:`numpy.correlate` produces values.
3381 '''
3383 a = num.asarray(a)
3384 b = num.asarray(b)
3386 kmin = -(b.size-1)
3387 if mode == 'full':
3388 klen = a.size-kmin
3389 elif mode == 'same':
3390 klen = max(a.size, b.size)
3391 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3392 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3393 elif mode == 'valid':
3394 klen = abs(a.size - b.size) + 1
3395 kmin += min(a.size, b.size) - 1
3397 return kmin, kmin + klen - 1
3400def autocorr(x, nshifts):
3401 '''
3402 Compute biased estimate of the first autocorrelation coefficients.
3404 :param x: input array
3405 :param nshifts: number of coefficients to calculate
3406 '''
3408 mean = num.mean(x)
3409 std = num.std(x)
3410 n = x.size
3411 xdm = x - mean
3412 r = num.zeros(nshifts)
3413 for k in range(nshifts):
3414 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3416 return r
3419def yulewalker(x, order):
3420 '''
3421 Compute autoregression coefficients using Yule-Walker method.
3423 :param x: input array
3424 :param order: number of coefficients to produce
3426 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3427 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3428 recursion which is normally used.
3429 '''
3431 gamma = autocorr(x, order+1)
3432 d = gamma[1:1+order]
3433 a = num.zeros((order, order))
3434 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3435 for i in range(order):
3436 ioff = order-i
3437 a[i, :] = gamma2[ioff:ioff+order]
3439 return num.dot(num.linalg.inv(a), -d)
3442def moving_avg(x, n):
3443 n = int(n)
3444 cx = x.cumsum()
3445 nn = len(x)
3446 y = num.zeros(nn, dtype=cx.dtype)
3447 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3448 y[:n//2] = y[n//2]
3449 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3450 return y
3453def moving_sum(x, n, mode='valid'):
3454 n = int(n)
3455 cx = x.cumsum()
3456 nn = len(x)
3458 if mode == 'valid':
3459 if nn-n+1 <= 0:
3460 return num.zeros(0, dtype=cx.dtype)
3461 y = num.zeros(nn-n+1, dtype=cx.dtype)
3462 y[0] = cx[n-1]
3463 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3465 if mode == 'full':
3466 y = num.zeros(nn+n-1, dtype=cx.dtype)
3467 if n <= nn:
3468 y[0:n] = cx[0:n]
3469 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3470 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3471 else:
3472 y[0:nn] = cx[0:nn]
3473 y[nn:n] = cx[nn-1]
3474 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3476 if mode == 'same':
3477 n1 = (n-1)//2
3478 y = num.zeros(nn, dtype=cx.dtype)
3479 if n <= nn:
3480 y[0:n-n1] = cx[n1:n]
3481 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3482 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3483 else:
3484 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3485 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3486 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3488 return y
3491def nextpow2(i):
3492 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3495def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3496 def snap(x):
3497 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3498 return snap
3501def snapper(nmax, delta, snapfun=math.ceil):
3502 def snap(x):
3503 return max(0, min(int(snapfun(x/delta)), nmax))
3504 return snap
3507def apply_costaper(a, b, c, d, y, x0, dx):
3508 abcd = num.array((a, b, c, d), dtype=float)
3509 ja, jb, jc, jd = num.clip(num.ceil((abcd-x0)/dx).astype(int), 0, y.size)
3510 y[:ja] = 0.
3511 y[ja:jb] *= 0.5 \
3512 - 0.5*num.cos((dx*num.arange(ja, jb)-(a-x0))/(b-a)*num.pi)
3513 y[jc:jd] *= 0.5 \
3514 + 0.5*num.cos((dx*num.arange(jc, jd)-(c-x0))/(d-c)*num.pi)
3515 y[jd:] = 0.
3518def span_costaper(a, b, c, d, y, x0, dx):
3519 hi = snapper_w_offset(y.size, x0, dx)
3520 return hi(a), hi(d) - hi(a)
3523def costaper(a, b, c, d, nfreqs, deltaf):
3524 hi = snapper(nfreqs, deltaf)
3525 tap = num.zeros(nfreqs)
3526 tap[hi(a):hi(b)] = 0.5 \
3527 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3528 tap[hi(b):hi(c)] = 1.
3529 tap[hi(c):hi(d)] = 0.5 \
3530 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3532 return tap
3535def t2ind(t, tdelta, snap=round):
3536 return int(snap(t/tdelta))
3539def hilbert(x, N=None):
3540 '''
3541 Return the hilbert transform of x of length N.
3543 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3544 '''
3546 x = num.asarray(x)
3547 if N is None:
3548 N = len(x)
3549 if N <= 0:
3550 raise ValueError('N must be positive.')
3551 if num.iscomplexobj(x):
3552 logger.warning('imaginary part of x ignored.')
3553 x = num.real(x)
3555 Xf = num.fft.fft(x, N, axis=0)
3556 h = num.zeros(N)
3557 if N % 2 == 0:
3558 h[0] = h[N//2] = 1
3559 h[1:N//2] = 2
3560 else:
3561 h[0] = 1
3562 h[1:(N+1)//2] = 2
3564 if len(x.shape) > 1:
3565 h = h[:, num.newaxis]
3566 x = num.fft.ifft(Xf*h)
3567 return x
3570def near(a, b, eps):
3571 return abs(a-b) < eps
3574def coroutine(func):
3575 def wrapper(*args, **kwargs):
3576 gen = func(*args, **kwargs)
3577 next(gen)
3578 return gen
3580 wrapper.__name__ = func.__name__
3581 wrapper.__dict__ = func.__dict__
3582 wrapper.__doc__ = func.__doc__
3583 return wrapper
3586class States(object):
3587 '''
3588 Utility to store channel-specific state in coroutines.
3589 '''
3591 def __init__(self):
3592 self._states = {}
3594 def get(self, tr):
3595 k = tr.nslc_id
3596 if k in self._states:
3597 tmin, deltat, dtype, value = self._states[k]
3598 if (near(tmin, tr.tmin, deltat/100.)
3599 and near(deltat, tr.deltat, deltat/10000.)
3600 and dtype == tr.ydata.dtype):
3602 return value
3604 return None
3606 def set(self, tr, value):
3607 k = tr.nslc_id
3608 if k in self._states and self._states[k][-1] is not value:
3609 self.free(self._states[k][-1])
3611 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3613 def free(self, value):
3614 pass
3617@coroutine
3618def co_list_append(list):
3619 while True:
3620 list.append((yield))
3623class ScipyBug(Exception):
3624 pass
3627@coroutine
3628def co_lfilter(target, b, a):
3629 '''
3630 Successively filter broken continuous trace data (coroutine).
3632 Create coroutine which takes :py:class:`Trace` objects, filters their data
3633 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3634 objects containing the filtered data to target. This is useful, if one
3635 wants to filter a long continuous time series, which is split into many
3636 successive traces without producing filter artifacts at trace boundaries.
3638 Filter states are kept *per channel*, specifically, for each (network,
3639 station, location, channel) combination occuring in the input traces, a
3640 separate state is created and maintained. This makes it possible to filter
3641 multichannel or multistation data with only one :py:func:`co_lfilter`
3642 instance.
3644 Filter state is reset, when gaps occur.
3646 Use it like this::
3648 from pyrocko.trace import co_lfilter, co_list_append
3650 filtered_traces = []
3651 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3652 for trace in traces:
3653 pipe.send(trace)
3655 pipe.close()
3657 '''
3659 try:
3660 states = States()
3661 output = None
3662 while True:
3663 input = (yield)
3665 zi = states.get(input)
3666 if zi is None:
3667 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3669 output = input.copy(data=False)
3670 try:
3671 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3672 except ValueError:
3673 raise ScipyBug(
3674 'signal.lfilter failed: could be related to a bug '
3675 'in some older scipy versions, e.g. on opensuse42.1')
3677 output.set_ydata(ydata)
3678 states.set(input, zf)
3679 target.send(output)
3681 except GeneratorExit:
3682 target.close()
3685def co_antialias(target, q, n=None, ftype='fir'):
3686 b, a, n = util.decimate_coeffs(q, n, ftype)
3687 anti = co_lfilter(target, b, a)
3688 return anti
3691@coroutine
3692def co_dropsamples(target, q, nfir):
3693 try:
3694 states = States()
3695 while True:
3696 tr = (yield)
3697 newdeltat = q * tr.deltat
3698 ioffset = states.get(tr)
3699 if ioffset is None:
3700 # for fir filter, the first nfir samples are pulluted by
3701 # boundary effects; cut it off.
3702 # for iir this may be (much) more, we do not correct for that.
3703 # put sample instances to a time which is a multiple of the
3704 # new sampling interval.
3705 newtmin_want = math.ceil(
3706 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3707 - (nfir/2*tr.deltat)
3708 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3709 if ioffset < 0:
3710 ioffset = ioffset % q
3712 newtmin_have = tr.tmin + ioffset * tr.deltat
3713 newtr = tr.copy(data=False)
3714 newtr.deltat = newdeltat
3715 # because the fir kernel shifts data by nfir/2 samples:
3716 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3717 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3718 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3719 target.send(newtr)
3721 except GeneratorExit:
3722 target.close()
3725def co_downsample(target, q, n=None, ftype='fir'):
3726 '''
3727 Successively downsample broken continuous trace data (coroutine).
3729 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3730 data and sends new :py:class:`Trace` objects containing the downsampled
3731 data to target. This is useful, if one wants to downsample a long
3732 continuous time series, which is split into many successive traces without
3733 producing filter artifacts and gaps at trace boundaries.
3735 Filter states are kept *per channel*, specifically, for each (network,
3736 station, location, channel) combination occuring in the input traces, a
3737 separate state is created and maintained. This makes it possible to filter
3738 multichannel or multistation data with only one :py:func:`co_lfilter`
3739 instance.
3741 Filter state is reset, when gaps occur. The sampling instances are choosen
3742 so that they occur at (or as close as possible) to even multiples of the
3743 sampling interval of the downsampled trace (based on system time).
3744 '''
3746 b, a, n = util.decimate_coeffs(q, n, ftype)
3747 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3750@coroutine
3751def co_downsample_to(target, deltat):
3753 decimators = {}
3754 try:
3755 while True:
3756 tr = (yield)
3757 ratio = deltat / tr.deltat
3758 rratio = round(ratio)
3759 if abs(rratio - ratio)/ratio > 0.0001:
3760 raise util.UnavailableDecimation('ratio = %g' % ratio)
3762 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3763 if deci_seq not in decimators:
3764 pipe = target
3765 for q in deci_seq[::-1]:
3766 pipe = co_downsample(pipe, q)
3768 decimators[deci_seq] = pipe
3770 decimators[deci_seq].send(tr)
3772 except GeneratorExit:
3773 for g in decimators.values():
3774 g.close()
3777class DomainChoice(StringChoice):
3778 choices = [
3779 'time_domain',
3780 'frequency_domain',
3781 'envelope',
3782 'absolute',
3783 'cc_max_norm']
3786class MisfitSetup(Object):
3787 '''
3788 Contains misfit setup to be used in :py:meth:`Trace.misfit`
3790 :param description: Description of the setup
3791 :param norm: L-norm classifier
3792 :param taper: Object of :py:class:`Taper`
3793 :param filter: Object of :py:class:`~pyrocko.response.FrequencyResponse`
3794 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3795 'cc_max_norm']
3797 Can be dumped to a yaml file.
3798 '''
3800 xmltagname = 'misfitsetup'
3801 description = String.T(optional=True)
3802 norm = Int.T(optional=False)
3803 taper = Taper.T(optional=False)
3804 filter = FrequencyResponse.T(optional=True)
3805 domain = DomainChoice.T(default='time_domain')
3808def equalize_sampling_rates(trace_1, trace_2):
3809 '''
3810 Equalize sampling rates of two traces (reduce higher sampling rate to
3811 lower).
3813 :param trace_1: :py:class:`Trace` object
3814 :param trace_2: :py:class:`Trace` object
3816 Returns a copy of the resampled trace if resampling is needed.
3817 '''
3819 if same_sampling_rate(trace_1, trace_2):
3820 return trace_1, trace_2
3822 if trace_1.deltat < trace_2.deltat:
3823 t1_out = trace_1.copy()
3824 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3825 logger.debug('Trace downsampled (return copy of trace): %s'
3826 % '.'.join(t1_out.nslc_id))
3827 return t1_out, trace_2
3829 elif trace_1.deltat > trace_2.deltat:
3830 t2_out = trace_2.copy()
3831 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3832 logger.debug('Trace downsampled (return copy of trace): %s'
3833 % '.'.join(t2_out.nslc_id))
3834 return trace_1, t2_out
3837def Lx_norm(u, v, norm=2):
3838 '''
3839 Calculate the misfit denominator *m* and the normalization divisor *n*
3840 according to norm.
3842 The normalization divisor *n* is calculated from ``v``.
3844 :param u: :py:class:`numpy.ndarray`
3845 :param v: :py:class:`numpy.ndarray`
3846 :param norm: (default = 2)
3848 ``u`` and ``v`` must be of same size.
3849 '''
3851 if norm == 1:
3852 return (
3853 num.sum(num.abs(v-u)),
3854 num.sum(num.abs(v)))
3856 elif norm == 2:
3857 return (
3858 num.sqrt(num.sum((v-u)**2)),
3859 num.sqrt(num.sum(v**2)))
3861 else:
3862 return (
3863 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3864 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3867def do_downsample(tr, deltat):
3868 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3869 tr = tr.copy()
3870 tr.downsample_to(deltat, snap=True, demean=False)
3871 else:
3872 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3873 tr = tr.copy()
3874 tr.snap()
3875 return tr
3878def do_extend(tr, tmin, tmax):
3879 if tmin < tr.tmin or tmax > tr.tmax:
3880 tr = tr.copy()
3881 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3883 return tr
3886def do_pre_taper(tr, taper):
3887 return tr.taper(taper, inplace=False, chop=True)
3890def do_fft(tr, filter):
3891 if filter is None:
3892 return tr
3893 else:
3894 ndata = tr.ydata.size
3895 nfft = nextpow2(ndata)
3896 padded = num.zeros(nfft, dtype=float)
3897 padded[:ndata] = tr.ydata
3898 spectrum = num.fft.rfft(padded)
3899 df = 1.0 / (tr.deltat * nfft)
3900 frequencies = num.arange(spectrum.size)*df
3901 return [tr, frequencies, spectrum]
3904def do_filter(inp, filter):
3905 if filter is None:
3906 return inp
3907 else:
3908 tr, frequencies, spectrum = inp
3909 spectrum *= filter.evaluate(frequencies)
3910 return [tr, frequencies, spectrum]
3913def do_ifft(inp):
3914 if isinstance(inp, Trace):
3915 return inp
3916 else:
3917 tr, _, spectrum = inp
3918 ndata = tr.ydata.size
3919 tr = tr.copy(data=False)
3920 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3921 return tr
3924def check_alignment(t1, t2):
3925 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3926 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3927 t1.ydata.shape != t2.ydata.shape:
3928 raise MisalignedTraces(
3929 'Cannot calculate misfit of %s and %s due to misaligned '
3930 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))