Coverage for /usr/local/lib/python3.11/dist-packages/pyrocko/trace.py: 77%
1806 statements
« prev ^ index » next coverage.py v6.5.0, created at 2024-09-24 11:43 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2024-09-24 11:43 +0000
1# https://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
6'''
7Basic signal processing for seismic waveforms.
8'''
10import time
11import math
12import copy
13import logging
14import hashlib
15import re
16from collections import defaultdict
18import numpy as num
19from scipy import signal
21from pyrocko import util, orthodrome, pchain, model, signal_ext
22from pyrocko.util import reuse
23from pyrocko.guts import Object, Float, Int, String, List, \
24 StringChoice, Timestamp
25from pyrocko.guts_array import Array
26from pyrocko.model import squirrel_content
28# backward compatibility
29from pyrocko.util import UnavailableDecimation # noqa
30from pyrocko.response import ( # noqa
31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
32 ButterworthResponse, SampledResponse, IntegrationResponse,
33 DifferentiationResponse, MultiplyResponse)
36guts_prefix = 'pf'
38logger = logging.getLogger('pyrocko.trace')
41g_tapered_coeffs_cache = {}
42g_one_response = FrequencyResponse()
45@squirrel_content
46class Trace(Object):
48 '''
49 Create new trace object.
51 A ``Trace`` object represents a single continuous strip of evenly sampled
52 time series data. It is built from a 1D NumPy array containing the data
53 samples and some attributes describing its beginning and ending time, its
54 sampling rate and four string identifiers (its network, station, location
55 and channel code).
57 :param network: network code
58 :param station: station code
59 :param location: location code
60 :param channel: channel code
61 :param extra: extra code
62 :param tmin: system time of first sample in [s]
63 :param tmax: system time of last sample in [s] (if set to ``None`` it is
64 computed from length of ``ydata``)
65 :param deltat: sampling interval in [s]
66 :param ydata: 1D numpy array with data samples (can be ``None`` when
67 ``tmax`` is not ``None``)
68 :param mtime: optional modification time
70 :param meta: additional meta information (not used, but maintained by the
71 library)
73 The length of the network, station, location and channel codes is not
74 resricted by this software, but data formats like SAC, Mini-SEED or GSE
75 have different limits on the lengths of these codes. The codes set here are
76 silently truncated when the trace is stored
77 '''
79 network = String.T(default='', help='Deployment/network code (1-8)')
80 station = String.T(default='STA', help='Station code (1-5)')
81 location = String.T(default='', help='Location code (0-2)')
82 channel = String.T(default='', help='Channel code (3)')
83 extra = String.T(default='', help='Extra/custom code')
85 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
86 tmax = Timestamp.T()
88 deltat = Float.T(default=1.0)
89 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
91 mtime = Timestamp.T(optional=True)
93 cached_frequencies = {}
95 def __init__(self, network='', station='STA', location='', channel='',
96 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
97 meta=None, extra=''):
99 Object.__init__(self, init_props=False)
101 time_float = util.get_time_float()
103 if not isinstance(tmin, time_float):
104 tmin = Trace.tmin.regularize_extra(tmin)
106 if tmax is not None and not isinstance(tmax, time_float):
107 tmax = Trace.tmax.regularize_extra(tmax)
109 if mtime is not None and not isinstance(mtime, time_float):
110 mtime = Trace.mtime.regularize_extra(mtime)
112 if ydata is not None and not isinstance(ydata, num.ndarray):
113 ydata = Trace.ydata.regularize_extra(ydata)
115 self._growbuffer = None
117 tmin = time_float(tmin)
118 if tmax is not None:
119 tmax = time_float(tmax)
121 if mtime is None:
122 mtime = time_float(time.time())
124 self.network, self.station, self.location, self.channel, \
125 self.extra = [
126 reuse(x) for x in (
127 network, station, location, channel, extra)]
129 self.tmin = tmin
130 self.deltat = deltat
132 if tmax is None:
133 if ydata is not None:
134 self.tmax = self.tmin + (ydata.size-1)*self.deltat
135 else:
136 raise Exception(
137 'fixme: trace must be created with tmax or ydata')
138 else:
139 n = int(round((tmax - self.tmin) / self.deltat)) + 1
140 self.tmax = self.tmin + (n - 1) * self.deltat
142 self.meta = meta
143 self.ydata = ydata
144 self.mtime = mtime
145 self._update_ids()
146 self.file = None
147 self._pchain = None
149 def __str__(self):
150 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
151 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
152 s += ' timerange: %s - %s\n' % (
153 util.time_to_str(self.tmin, format=fmt),
154 util.time_to_str(self.tmax, format=fmt))
156 s += ' delta t: %g\n' % self.deltat
157 if self.meta:
158 for k in sorted(self.meta.keys()):
159 s += ' %s: %s\n' % (k, self.meta[k])
160 return s
162 @property
163 def codes(self):
164 from pyrocko.squirrel import model
165 return model.CodesNSLCE(
166 self.network, self.station, self.location, self.channel,
167 self.extra)
169 @property
170 def time_span(self):
171 return self.tmin, self.tmax
173 @property
174 def summary_entries(self):
175 return (
176 self.__class__.__name__,
177 str(self.codes),
178 self.str_time_span,
179 '%g' % (1.0/self.deltat))
181 @property
182 def summary_stats_entries(self):
183 return tuple('%13.7g' % v for v in (
184 self.ydata.min(),
185 self.ydata.max(),
186 self.ydata.mean(),
187 self.ydata.std()))
189 @property
190 def summary(self):
191 return util.fmt_summary(self.summary_entries, (10, 20, 55, 0))
193 @property
194 def summary_stats(self):
195 return self.summary + ' | ' + util.fmt_summary(
196 self.summary_stats_entries, (12, 12, 12, 12))
198 def __getstate__(self):
199 return (self.network, self.station, self.location, self.channel,
200 self.tmin, self.tmax, self.deltat, self.mtime,
201 self.ydata, self.meta, self.extra)
203 def __setstate__(self, state):
204 if len(state) == 11:
205 self.network, self.station, self.location, self.channel, \
206 self.tmin, self.tmax, self.deltat, self.mtime, \
207 self.ydata, self.meta, self.extra = state
209 elif len(state) == 12:
210 # backward compatibility 0
211 self.network, self.station, self.location, self.channel, \
212 self.tmin, self.tmax, self.deltat, self.mtime, \
213 self.ydata, self.meta, _, self.extra = state
215 elif len(state) == 10:
216 # backward compatibility 1
217 self.network, self.station, self.location, self.channel, \
218 self.tmin, self.tmax, self.deltat, self.mtime, \
219 self.ydata, self.meta = state
221 self.extra = ''
223 else:
224 # backward compatibility 2
225 self.network, self.station, self.location, self.channel, \
226 self.tmin, self.tmax, self.deltat, self.mtime = state
227 self.ydata = None
228 self.meta = None
229 self.extra = ''
231 self._growbuffer = None
232 self._update_ids()
234 def hash(self, unsafe=False):
235 sha1 = hashlib.sha1()
236 for code in self.nslc_id:
237 sha1.update(code.encode())
238 sha1.update(self.tmin.hex().encode())
239 sha1.update(self.tmax.hex().encode())
240 sha1.update(self.deltat.hex().encode())
242 if self.ydata is not None:
243 if not unsafe:
244 sha1.update(memoryview(self.ydata))
245 else:
246 sha1.update(memoryview(self.ydata[:32]))
247 sha1.update(memoryview(self.ydata[-32:]))
249 return sha1.hexdigest()
251 def name(self):
252 '''
253 Get a short string description.
254 '''
256 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
257 util.time_to_str(self.tmin),
258 util.time_to_str(self.tmax)))
260 return s
262 def __eq__(self, other):
263 return (
264 isinstance(other, Trace)
265 and self.network == other.network
266 and self.station == other.station
267 and self.location == other.location
268 and self.channel == other.channel
269 and (abs(self.deltat - other.deltat)
270 < (self.deltat + other.deltat)*1e-6)
271 and abs(self.tmin-other.tmin) < self.deltat*0.01
272 and abs(self.tmax-other.tmax) < self.deltat*0.01
273 and num.all(self.ydata == other.ydata))
275 def almost_equal(self, other, rtol=1e-5, atol=0.0):
276 return (
277 self.network == other.network
278 and self.station == other.station
279 and self.location == other.location
280 and self.channel == other.channel
281 and (abs(self.deltat - other.deltat)
282 < (self.deltat + other.deltat)*1e-6)
283 and abs(self.tmin-other.tmin) < self.deltat*0.01
284 and abs(self.tmax-other.tmax) < self.deltat*0.01
285 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
287 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
289 assert self.network == other.network, \
290 'network codes differ: %s, %s' % (self.network, other.network)
291 assert self.station == other.station, \
292 'station codes differ: %s, %s' % (self.station, other.station)
293 assert self.location == other.location, \
294 'location codes differ: %s, %s' % (self.location, other.location)
295 assert self.channel == other.channel, 'channel codes differ'
296 assert (abs(self.deltat - other.deltat)
297 < (self.deltat + other.deltat)*1e-6), \
298 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
299 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
300 'start times differ: %s, %s' % (
301 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
302 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
303 'end times differ: %s, %s' % (
304 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
306 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
307 'trace values differ'
309 def __hash__(self):
310 return id(self)
312 def __call__(self, t, clip=False, snap=round):
313 it = int(snap((t-self.tmin)/self.deltat))
314 if clip:
315 it = max(0, min(it, self.ydata.size-1))
316 else:
317 if it < 0 or self.ydata.size <= it:
318 raise IndexError()
320 return self.tmin+it*self.deltat, self.ydata[it]
322 def interpolate(self, t, clip=False):
323 '''
324 Value of trace between supporting points through linear interpolation.
326 :param t: time instant
327 :param clip: whether to clip indices to trace ends
328 '''
330 t0, y0 = self(t, clip=clip, snap=math.floor)
331 t1, y1 = self(t, clip=clip, snap=math.ceil)
332 if t0 == t1:
333 return y0
334 else:
335 return y0+(t-t0)/(t1-t0)*(y1-y0)
337 def index_clip(self, i):
338 '''
339 Clip index to valid range.
340 '''
342 return min(max(0, i), self.ydata.size)
344 def add(self, other, interpolate=True, left=0., right=0.):
345 '''
346 Add values of other trace (self += other).
348 Add values of ``other`` trace to the values of ``self``, where it
349 intersects with ``other``. This method does not change the extent of
350 ``self``. If ``interpolate`` is ``True`` (the default), the values of
351 ``other`` to be added are interpolated at sampling instants of
352 ``self``. Linear interpolation is performed. In this case the sampling
353 rate of ``other`` must be equal to or lower than that of ``self``. If
354 ``interpolate`` is ``False``, the sampling rates of the two traces must
355 match.
356 '''
358 if interpolate:
359 assert self.deltat <= other.deltat \
360 or same_sampling_rate(self, other)
362 other_xdata = other.get_xdata()
363 xdata = self.get_xdata()
364 self.ydata += num.interp(
365 xdata, other_xdata, other.ydata, left=left, right=left)
366 else:
367 assert self.deltat == other.deltat
368 ioff = int(round((other.tmin-self.tmin)/self.deltat))
369 ibeg = max(0, ioff)
370 iend = min(self.data_len(), ioff+other.data_len())
371 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
373 def mult(self, other, interpolate=True):
374 '''
375 Muliply with values of other trace ``(self *= other)``.
377 Multiply values of ``other`` trace to the values of ``self``, where it
378 intersects with ``other``. This method does not change the extent of
379 ``self``. If ``interpolate`` is ``True`` (the default), the values of
380 ``other`` to be multiplied are interpolated at sampling instants of
381 ``self``. Linear interpolation is performed. In this case the sampling
382 rate of ``other`` must be equal to or lower than that of ``self``. If
383 ``interpolate`` is ``False``, the sampling rates of the two traces must
384 match.
385 '''
387 if interpolate:
388 assert self.deltat <= other.deltat or \
389 same_sampling_rate(self, other)
391 other_xdata = other.get_xdata()
392 xdata = self.get_xdata()
393 self.ydata *= num.interp(
394 xdata, other_xdata, other.ydata, left=0., right=0.)
395 else:
396 assert self.deltat == other.deltat
397 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
398 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
399 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
400 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
402 ibeg1 = self.index_clip(ibeg1)
403 iend1 = self.index_clip(iend1)
404 ibeg2 = self.index_clip(ibeg2)
405 iend2 = self.index_clip(iend2)
407 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
409 def max(self):
410 '''
411 Get time and value of data maximum.
412 '''
414 i = num.argmax(self.ydata)
415 return self.tmin + i*self.deltat, self.ydata[i]
417 def min(self):
418 '''
419 Get time and value of data minimum.
420 '''
422 i = num.argmin(self.ydata)
423 return self.tmin + i*self.deltat, self.ydata[i]
425 def absmax(self):
426 '''
427 Get time and value of maximum of the absolute of data.
428 '''
430 tmi, mi = self.min()
431 tma, ma = self.max()
432 if abs(mi) > abs(ma):
433 return tmi, abs(mi)
434 else:
435 return tma, abs(ma)
437 def set_codes(
438 self, network=None, station=None, location=None, channel=None,
439 extra=None):
441 '''
442 Set network, station, location, and channel codes.
443 '''
445 if network is not None:
446 self.network = network
447 if station is not None:
448 self.station = station
449 if location is not None:
450 self.location = location
451 if channel is not None:
452 self.channel = channel
453 if extra is not None:
454 self.extra = extra
456 self._update_ids()
458 def set_network(self, network):
459 self.network = network
460 self._update_ids()
462 def set_station(self, station):
463 self.station = station
464 self._update_ids()
466 def set_location(self, location):
467 self.location = location
468 self._update_ids()
470 def set_channel(self, channel):
471 self.channel = channel
472 self._update_ids()
474 def overlaps(self, tmin, tmax):
475 '''
476 Check if trace has overlap with a given time span.
477 '''
478 return not (tmax < self.tmin or self.tmax < tmin)
480 def is_relevant(self, tmin, tmax, selector=None):
481 '''
482 Check if trace has overlap with a given time span and matches a
483 condition callback. (internal use)
484 '''
486 return not (tmax <= self.tmin or self.tmax < tmin) \
487 and (selector is None or selector(self))
489 def _update_ids(self):
490 '''
491 Update dependent ids.
492 '''
494 self.full_id = (
495 self.network, self.station, self.location, self.channel, self.tmin)
496 self.nslc_id = reuse(
497 (self.network, self.station, self.location, self.channel))
499 def prune_from_reuse_cache(self):
500 util.deuse(self.nslc_id)
501 util.deuse(self.network)
502 util.deuse(self.station)
503 util.deuse(self.location)
504 util.deuse(self.channel)
506 def set_mtime(self, mtime):
507 '''
508 Set modification time of the trace.
509 '''
511 self.mtime = mtime
513 def get_xdata(self):
514 '''
515 Create array for time axis.
516 '''
518 if self.ydata is None:
519 raise NoData()
521 return self.tmin \
522 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
524 def get_ydata(self):
525 '''
526 Get data array.
527 '''
529 if self.ydata is None:
530 raise NoData()
532 return self.ydata
534 def set_ydata(self, new_ydata):
535 '''
536 Replace data array.
537 '''
539 self.drop_growbuffer()
540 self.ydata = new_ydata
541 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
543 def data_len(self):
544 if self.ydata is not None:
545 return self.ydata.size
546 else:
547 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
549 def drop_data(self):
550 '''
551 Forget data, make dataless trace.
552 '''
554 self.drop_growbuffer()
555 self.ydata = None
557 def drop_growbuffer(self):
558 '''
559 Detach the traces grow buffer.
560 '''
562 self._growbuffer = None
563 self._pchain = None
565 def copy(self, data=True):
566 '''
567 Make a deep copy of the trace.
568 '''
570 tracecopy = copy.copy(self)
571 tracecopy.drop_growbuffer()
572 if data:
573 tracecopy.ydata = self.ydata.copy()
574 tracecopy.meta = copy.deepcopy(self.meta)
575 return tracecopy
577 def crop_zeros(self):
578 '''
579 Remove any zeros at beginning and end.
580 '''
582 indices = num.where(self.ydata != 0.0)[0]
583 if indices.size == 0:
584 raise NoData()
586 ibeg = indices[0]
587 iend = indices[-1]+1
588 if ibeg == 0 and iend == self.ydata.size-1:
589 return
591 self.drop_growbuffer()
592 self.ydata = self.ydata[ibeg:iend].copy()
593 self.tmin = self.tmin+ibeg*self.deltat
594 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
595 self._update_ids()
597 def append(self, data):
598 '''
599 Append data to the end of the trace.
601 To make this method efficient when successively very few or even single
602 samples are appended, a larger grow buffer is allocated upon first
603 invocation. The traces data is then changed to be a view into the
604 currently filled portion of the grow buffer array.
605 '''
607 assert self.ydata.dtype == data.dtype
608 newlen = data.size + self.ydata.size
609 if self._growbuffer is None or self._growbuffer.size < newlen:
610 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
611 self._growbuffer[:self.ydata.size] = self.ydata
612 self._growbuffer[self.ydata.size:newlen] = data
613 self.ydata = self._growbuffer[:newlen]
614 self.tmax = self.tmin + (newlen-1)*self.deltat
616 def chop(
617 self, tmin, tmax, inplace=True, include_last=False,
618 snap=(round, round), want_incomplete=True):
620 '''
621 Cut the trace to given time span.
623 If the ``inplace`` argument is True (the default) the trace is cut in
624 place, otherwise a new trace with the cut part is returned. By
625 default, the indices where to start and end the trace data array are
626 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
627 using Python's :py:func:`round` function. This behaviour can be changed
628 with the ``snap`` argument, which takes a tuple of two functions (one
629 for the lower and one for the upper end) to be used instead of
630 :py:func:`round`. The last sample is by default not included unless
631 ``include_last`` is set to True. If the given time span exceeds the
632 available time span of the trace, the available part is returned,
633 unless ``want_incomplete`` is set to False - in that case, a
634 :py:exc:`NoData` exception is raised. This exception is always raised,
635 when the requested time span does dot overlap with the trace's time
636 span.
637 '''
639 if want_incomplete:
640 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
641 raise NoData()
642 else:
643 if tmin < self.tmin or self.tmax < tmax:
644 raise NoData()
646 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
647 iplus = 0
648 if include_last:
649 iplus = 1
651 iend = min(
652 self.data_len(),
653 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
655 if ibeg >= iend:
656 raise NoData()
658 obj = self
659 if not inplace:
660 obj = self.copy(data=False)
662 self.drop_growbuffer()
663 if self.ydata is not None:
664 obj.ydata = self.ydata[ibeg:iend].copy()
665 else:
666 obj.ydata = None
668 obj.tmin = obj.tmin+ibeg*obj.deltat
669 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
671 obj._update_ids()
673 return obj
675 def downsample(
676 self, ndecimate, snap=False, demean=False, ftype='fir-remez',
677 cut=False):
679 '''
680 Downsample (decimate) trace by a given integer factor.
682 Antialiasing filter details:
684 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum
685 ripple of 0.05 dB in the passband.
686 * ``fir``: A FIR filter using a Hamming window and 31 taps and a
687 well-behaved phase response.
688 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm
689 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency.
691 Comparison of the digital filters:
693 .. figure :: ../../static/downsampling-filter-comparison.png
694 :width: 60%
695 :alt: Comparison of the downsampling filters.
697 See also :py:meth:`Trace.downsample_to`.
699 :param ndecimate:
700 Decimation factor, avoid values larger than 8.
701 :type ndecimate:
702 int
704 :param snap:
705 Whether to put the new sampling instants closest to multiples of
706 the sampling rate (according to absolute time).
707 :type snap:
708 bool
710 :param demean:
711 Whether to demean the signal before filtering.
712 :type demean:
713 bool
715 :param ftype:
716 Which FIR filter to use, choose from ``'iir'``, ``'fir'``,
717 ``'fir-remez'``. Default is ``'fir-remez'``.
719 :param cut:
720 Whether to cut off samples in the beginning of the trace which
721 are polluted by artifacts of the anti-aliasing filter.
722 :type cut:
723 bool
724 '''
725 newdeltat = self.deltat*ndecimate
726 b, a, n = util.decimate_coeffs(ndecimate, None, ftype)
727 if snap:
728 ilag = int(round((math.ceil(
729 (self.tmin+(n//2 if cut else 0)*self.deltat) /
730 newdeltat) * newdeltat - self.tmin) / self.deltat))
731 else:
732 ilag = (n//2 if cut else 0)
734 data = self.ydata.astype(num.float64)
735 if data.size != 0:
736 if demean:
737 data -= num.mean(data)
738 y = signal.lfilter(b, a, data)
739 self.ydata = y[n//2+ilag::ndecimate].copy()
740 else:
741 self.ydata = data
743 self.tmin += ilag * self.deltat
744 self.deltat = reuse(self.deltat*ndecimate)
745 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
746 self._update_ids()
748 def downsample_to(
749 self, deltat, snap=False, allow_upsample_max=1, demean=False,
750 ftype='fir-remez', cut=False):
752 '''
753 Downsample to given sampling rate.
755 Tries to downsample the trace to a target sampling interval of
756 ``deltat``. This runs :py:meth:`downsample` one or several times. If
757 ``allow_upsample_max`` is set to a value larger than 1, intermediate
758 upsampling steps are allowed, in order to increase the number of
759 possible downsampling ratios.
761 If the requested ratio is not supported, an exception of type
762 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
764 The downsampled trace will be shorter than the input trace because the
765 anti-aliasing filter introduces edge effects. If `cut` is selected,
766 additionally, polluted samples in the beginning of the trace are
767 removed. The approximate amount of cutoff which will be produced by a
768 given downsampling configuration can be estimated with
769 :py:func:`downsample_tpad`.
771 See also: :meth:`Trace.downsample`.
773 :param deltat:
774 Desired sampling interval in [s].
775 :type deltat:
776 float
778 :param allow_upsample_max:
779 Maximum allowed upsampling factor of the signal to achieve the
780 desired sampling interval. Default is ``1``.
781 :type allow_upsample_max:
782 int
784 :param snap:
785 Whether to put the new sampling instants closest to multiples of
786 the sampling rate (according to absolute time).
787 :type snap:
788 bool
790 :param demean:
791 Whether to demean the signal before filtering.
792 :type demean:
793 bool
795 :param ftype:
796 Which FIR filter to use, choose from ``'iir'``, ``'fir'``,
797 ``'fir-remez'``. Default is ``'fir-remez'``.
799 :param cut:
800 Whether to cut off samples in the beginning of the trace which
801 are polluted by artifacts of the anti-aliasing filter.
802 :type demean:
803 bool
804 '''
806 upsratio, deci_seq = _configure_downsampling(
807 self.deltat, deltat, allow_upsample_max)
809 if demean:
810 self.drop_growbuffer()
811 self.ydata = self.ydata.astype(num.float64)
812 self.ydata -= num.mean(self.ydata)
814 if upsratio > 1:
815 self.drop_growbuffer()
816 ydata = self.ydata
817 self.ydata = num.zeros(
818 ydata.size*upsratio-(upsratio-1), ydata.dtype)
819 self.ydata[::upsratio] = ydata
820 for i in range(1, upsratio):
821 self.ydata[i::upsratio] = \
822 float(i)/upsratio * ydata[:-1] \
823 + float(upsratio-i)/upsratio * ydata[1:]
824 self.deltat = self.deltat/upsratio
826 for i, ndecimate in enumerate(deci_seq):
827 self.downsample(
828 ndecimate, snap=snap, demean=False, ftype=ftype, cut=cut)
830 def resample(self, deltat):
831 '''
832 Resample to given sampling rate ``deltat``.
834 Resampling is performed in the frequency domain.
835 '''
837 ndata = self.ydata.size
838 ntrans = nextpow2(ndata)
839 fntrans2 = ntrans * self.deltat/deltat
840 ntrans2 = int(round(fntrans2))
841 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
842 ndata2 = int(round(ndata*self.deltat/deltat2))
843 if abs(fntrans2 - ntrans2) > 1e-7:
844 logger.warning(
845 'resample: requested deltat %g could not be matched exactly: '
846 '%g' % (deltat, deltat2))
848 data = self.ydata
849 data_pad = num.zeros(ntrans, dtype=float)
850 data_pad[:ndata] = data
851 fdata = num.fft.rfft(data_pad)
852 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
853 n = min(fdata.size, fdata2.size)
854 fdata2[:n] = fdata[:n]
855 data2 = num.fft.irfft(fdata2)
856 data2 = data2[:ndata2]
857 data2 *= float(ntrans2) / float(ntrans)
858 self.deltat = deltat2
859 self.set_ydata(data2)
861 def resample_simple(self, deltat):
862 tyear = 3600*24*365.
864 if deltat == self.deltat:
865 return
867 if abs(self.deltat - deltat) * tyear/deltat < deltat:
868 logger.warning(
869 'resample_simple: less than one sample would have to be '
870 'inserted/deleted per year. Doing nothing.')
871 return
873 ninterval = int(round(deltat / (self.deltat - deltat)))
874 if abs(ninterval) < 20:
875 logger.error(
876 'resample_simple: sample insertion/deletion interval less '
877 'than 20. results would be erroneous.')
878 raise ResamplingFailed()
880 delete = False
881 if ninterval < 0:
882 ninterval = - ninterval
883 delete = True
885 tyearbegin = util.year_start(self.tmin)
887 nmin = int(round((self.tmin - tyearbegin)/deltat))
889 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
890 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
891 if nindices > 0:
892 indices = ibegin + num.arange(nindices) * ninterval
893 data_split = num.split(self.ydata, indices)
894 data = []
895 for ln, h in zip(data_split[:-1], data_split[1:]):
896 if delete:
897 ln = ln[:-1]
899 data.append(ln)
900 if not delete:
901 if ln.size == 0:
902 v = h[0]
903 else:
904 v = 0.5*(ln[-1] + h[0])
905 data.append(num.array([v], dtype=ln.dtype))
907 data.append(data_split[-1])
909 ydata_new = num.concatenate(data)
911 self.tmin = tyearbegin + nmin * deltat
912 self.deltat = deltat
913 self.set_ydata(ydata_new)
915 def stretch(self, tmin_new, tmax_new):
916 '''
917 Stretch signal while preserving sample rate using sinc interpolation.
919 :param tmin_new: new time of first sample
920 :param tmax_new: new time of last sample
922 This method can be used to correct for a small linear time drift or to
923 introduce sub-sample time shifts. The amount of stretching is limited
924 to 10% by the implementation and is expected to be much smaller than
925 that by the approximations used.
926 '''
928 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
929 t_control = num.array([tmin_new, tmax_new], dtype=float)
931 r = (tmax_new - tmin_new) / self.deltat + 1.0
932 n_new = int(round(r))
933 if abs(n_new - r) > 0.001:
934 n_new = int(math.floor(r))
936 assert n_new >= 2
938 tmax_new = tmin_new + (n_new-1) * self.deltat
940 ydata_new = num.empty(n_new, dtype=float)
941 signal_ext.antidrift(i_control, t_control,
942 self.ydata.astype(float),
943 tmin_new, self.deltat, ydata_new)
945 self.tmin = tmin_new
946 self.set_ydata(ydata_new)
947 self._update_ids()
949 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
950 raise_exception=False):
952 '''
953 Check if a given frequency is above the Nyquist frequency of the trace.
955 :param intro: string used to introduce the warning/error message
956 :param warn: whether to emit a warning
957 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
958 exception.
959 '''
961 if frequency >= 0.5/self.deltat:
962 message = '%s (%g Hz) is equal to or higher than nyquist ' \
963 'frequency (%g Hz). (Trace %s)' \
964 % (intro, frequency, 0.5/self.deltat, self.name())
965 if warn:
966 logger.warning(message)
967 if raise_exception:
968 raise AboveNyquist(message)
970 def lowpass(self, order, corner, nyquist_warn=True,
971 nyquist_exception=False, demean=True):
973 '''
974 Apply Butterworth lowpass to the trace.
976 :param order: order of the filter
977 :param corner: corner frequency of the filter
979 Mean is removed before filtering.
980 '''
982 self.nyquist_check(
983 corner, 'Corner frequency of lowpass', nyquist_warn,
984 nyquist_exception)
986 (b, a) = _get_cached_filter_coeffs(
987 order, [corner*2.0*self.deltat], btype='low')
989 if len(a) != order+1 or len(b) != order+1:
990 logger.warning(
991 'Erroneous filter coefficients returned by '
992 'scipy.signal.butter(). You may need to downsample the '
993 'signal before filtering.')
995 data = self.ydata.astype(num.float64)
996 if demean:
997 data -= num.mean(data)
998 self.drop_growbuffer()
999 self.ydata = signal.lfilter(b, a, data)
1001 def highpass(self, order, corner, nyquist_warn=True,
1002 nyquist_exception=False, demean=True):
1004 '''
1005 Apply butterworth highpass to the trace.
1007 :param order: order of the filter
1008 :param corner: corner frequency of the filter
1010 Mean is removed before filtering.
1011 '''
1013 self.nyquist_check(
1014 corner, 'Corner frequency of highpass', nyquist_warn,
1015 nyquist_exception)
1017 (b, a) = _get_cached_filter_coeffs(
1018 order, [corner*2.0*self.deltat], btype='high')
1020 data = self.ydata.astype(num.float64)
1021 if len(a) != order+1 or len(b) != order+1:
1022 logger.warning(
1023 'Erroneous filter coefficients returned by '
1024 'scipy.signal.butter(). You may need to downsample the '
1025 'signal before filtering.')
1026 if demean:
1027 data -= num.mean(data)
1028 self.drop_growbuffer()
1029 self.ydata = signal.lfilter(b, a, data)
1031 def bandpass(self, order, corner_hp, corner_lp, demean=True):
1032 '''
1033 Apply butterworth bandpass to the trace.
1035 :param order: order of the filter
1036 :param corner_hp: lower corner frequency of the filter
1037 :param corner_lp: upper corner frequency of the filter
1039 Mean is removed before filtering.
1040 '''
1042 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
1043 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
1044 (b, a) = _get_cached_filter_coeffs(
1045 order,
1046 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1047 btype='band')
1048 data = self.ydata.astype(num.float64)
1049 if demean:
1050 data -= num.mean(data)
1051 self.drop_growbuffer()
1052 self.ydata = signal.lfilter(b, a, data)
1054 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1055 '''
1056 Apply bandstop (attenuates frequencies in band) to the trace.
1058 :param order: order of the filter
1059 :param corner_hp: lower corner frequency of the filter
1060 :param corner_lp: upper corner frequency of the filter
1062 Mean is removed before filtering.
1063 '''
1065 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1066 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1067 (b, a) = _get_cached_filter_coeffs(
1068 order,
1069 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1070 btype='bandstop')
1071 data = self.ydata.astype(num.float64)
1072 if demean:
1073 data -= num.mean(data)
1074 self.drop_growbuffer()
1075 self.ydata = signal.lfilter(b, a, data)
1077 def envelope(self, inplace=True):
1078 '''
1079 Calculate the envelope of the trace.
1081 :param inplace: calculate envelope in place
1083 The calculation follows:
1085 .. math::
1087 Y' = \\sqrt{Y^2+H(Y)^2}
1089 where H is the Hilbert-Transform of the signal Y.
1090 '''
1092 y = self.ydata.astype(float)
1093 env = num.abs(hilbert(y))
1094 if inplace:
1095 self.drop_growbuffer()
1096 self.ydata = env
1097 else:
1098 tr = self.copy(data=False)
1099 tr.ydata = env
1100 return tr
1102 def taper(self, taperer, inplace=True, chop=False):
1103 '''
1104 Apply a :py:class:`Taper` to the trace.
1106 :param taperer: instance of :py:class:`Taper` subclass
1107 :param inplace: apply taper inplace
1108 :param chop: if ``True``: exclude tapered parts from the resulting
1109 trace
1110 '''
1112 if not inplace:
1113 tr = self.copy()
1114 else:
1115 tr = self
1117 if chop:
1118 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1119 tr.shift(i*tr.deltat)
1120 tr.set_ydata(tr.ydata[i:i+n])
1122 taperer(tr.ydata, tr.tmin, tr.deltat)
1124 if not inplace:
1125 return tr
1127 def whiten(self, order=6):
1128 '''
1129 Whiten signal in time domain using autoregression and recursive filter.
1131 :param order: order of the autoregression process
1132 '''
1134 b, a = self.whitening_coefficients(order)
1135 self.drop_growbuffer()
1136 self.ydata = signal.lfilter(b, a, self.ydata)
1138 def whitening_coefficients(self, order=6):
1139 ar = yulewalker(self.ydata, order)
1140 b, a = [1.] + ar.tolist(), [1.]
1141 return b, a
1143 def ampspec_whiten(
1144 self,
1145 width,
1146 td_taper='auto',
1147 fd_taper='auto',
1148 pad_to_pow2=True,
1149 demean=True):
1151 '''
1152 Whiten signal via frequency domain using moving average on amplitude
1153 spectra.
1155 :param width: width of smoothing kernel [Hz]
1156 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1157 ``None`` or ``'auto'``.
1158 :param fd_taper: frequency domain taper, object of type
1159 :py:class:`Taper` or ``None`` or ``'auto'``.
1160 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1161 of 2^n
1162 :param demean: whether to demean the signal before tapering
1164 The signal is first demeaned and then tapered using ``td_taper``. Then,
1165 the spectrum is calculated and inversely weighted with a smoothed
1166 version of its amplitude spectrum. A moving average is used for the
1167 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1168 Finally, the smoothed and tapered spectrum is back-transformed into the
1169 time domain.
1171 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1172 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1173 '''
1175 ndata = self.data_len()
1177 if pad_to_pow2:
1178 ntrans = nextpow2(ndata)
1179 else:
1180 ntrans = ndata
1182 df = 1./(ntrans*self.deltat)
1183 nw = int(round(width/df))
1184 if ndata//2+1 <= nw:
1185 raise TraceTooShort(
1186 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1188 if td_taper == 'auto':
1189 td_taper = CosFader(1./width)
1191 if fd_taper == 'auto':
1192 fd_taper = CosFader(width)
1194 if td_taper:
1195 self.taper(td_taper)
1197 ydata = self.get_ydata().astype(float)
1198 if demean:
1199 ydata -= ydata.mean()
1201 spec = num.fft.rfft(ydata, ntrans)
1203 amp = num.abs(spec)
1204 nspec = amp.size
1205 csamp = num.cumsum(amp)
1206 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1207 n1, n2 = nw//2, nw//2 + nspec - nw
1208 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1209 amp_smoothed[:n1] = amp_smoothed[n1]
1210 amp_smoothed[n2:] = amp_smoothed[n2-1]
1212 denom = amp_smoothed * amp
1213 numer = amp
1214 eps = num.mean(denom) * 1e-9
1215 if eps == 0.0:
1216 eps = 1e-9
1218 numer += eps
1219 denom += eps
1220 spec *= numer/denom
1222 if fd_taper:
1223 fd_taper(spec, 0., df)
1225 ydata = num.fft.irfft(spec)
1226 self.set_ydata(ydata[:ndata])
1228 def _get_cached_freqs(self, nf, deltaf):
1229 ck = (nf, deltaf)
1230 if ck not in Trace.cached_frequencies:
1231 Trace.cached_frequencies[ck] = deltaf * num.arange(
1232 nf, dtype=float)
1234 return Trace.cached_frequencies[ck]
1236 def bandpass_fft(self, corner_hp, corner_lp):
1237 '''
1238 Apply boxcar bandbpass to trace (in spectral domain).
1239 '''
1241 n = len(self.ydata)
1242 n2 = nextpow2(n)
1243 data = num.zeros(n2, dtype=num.float64)
1244 data[:n] = self.ydata
1245 fdata = num.fft.rfft(data)
1246 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1247 fdata[0] = 0.0
1248 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1249 data = num.fft.irfft(fdata)
1250 self.drop_growbuffer()
1251 self.ydata = data[:n]
1253 def shift(self, tshift):
1254 '''
1255 Time shift the trace.
1256 '''
1258 self.tmin += tshift
1259 self.tmax += tshift
1260 self._update_ids()
1262 def snap(self, inplace=True, interpolate=False):
1263 '''
1264 Shift trace samples to nearest even multiples of the sampling rate.
1266 :param inplace: (boolean) snap traces inplace
1268 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1269 both, the snapped and the original trace is smaller than 0.01 x deltat,
1270 :py:func:`snap` returns the unsnapped instance of the original trace.
1271 '''
1273 tmin = round(self.tmin/self.deltat)*self.deltat
1274 tmax = tmin + (self.ydata.size-1)*self.deltat
1276 if inplace:
1277 xself = self
1278 else:
1279 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1280 abs(self.tmax - tmax) < 1e-2*self.deltat:
1281 return self
1283 xself = self.copy()
1285 if interpolate:
1286 n = xself.data_len()
1287 ydata_new = num.empty(n, dtype=float)
1288 i_control = num.array([0, n-1], dtype=num.int64)
1289 tref = tmin
1290 t_control = num.array(
1291 [float(xself.tmin-tref), float(xself.tmax-tref)],
1292 dtype=float)
1294 signal_ext.antidrift(i_control, t_control,
1295 xself.ydata.astype(float),
1296 float(tmin-tref), xself.deltat, ydata_new)
1298 xself.ydata = ydata_new
1300 xself.tmin = tmin
1301 xself.tmax = tmax
1302 xself._update_ids()
1304 return xself
1306 def fix_deltat_rounding_errors(self):
1307 '''
1308 Try to undo sampling rate rounding errors.
1310 See :py:func:`fix_deltat_rounding_errors`.
1311 '''
1313 self.deltat = fix_deltat_rounding_errors(self.deltat)
1314 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1316 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1317 '''
1318 Run special STA/LTA filter where the short time window is centered on
1319 the long time window.
1321 :param tshort: length of short time window in [s]
1322 :param tlong: length of long time window in [s]
1323 :param quad: whether to square the data prior to applying the STA/LTA
1324 filter
1325 :param scalingmethod: integer key to select how output values are
1326 scaled / normalized (``1``, ``2``, or ``3``)
1328 ============= ====================================== ===========
1329 Scalingmethod Implementation Range
1330 ============= ====================================== ===========
1331 ``1`` As/Al* Ts/Tl [0,1]
1332 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1333 ``3`` Like ``2`` but clipping range at zero [0,1]
1334 ============= ====================================== ===========
1336 '''
1338 nshort = int(round(tshort/self.deltat))
1339 nlong = int(round(tlong/self.deltat))
1341 assert nshort < nlong
1342 if nlong > len(self.ydata):
1343 raise TraceTooShort(
1344 'Samples in trace: %s, samples needed: %s'
1345 % (len(self.ydata), nlong))
1347 if quad:
1348 sqrdata = self.ydata**2
1349 else:
1350 sqrdata = self.ydata
1352 mavg_short = moving_avg(sqrdata, nshort)
1353 mavg_long = moving_avg(sqrdata, nlong)
1355 self.drop_growbuffer()
1357 if scalingmethod not in (1, 2, 3):
1358 raise Exception('Invalid argument to scalingrange argument.')
1360 if scalingmethod == 1:
1361 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1362 elif scalingmethod in (2, 3):
1363 self.ydata = (mavg_short/mavg_long - 1.) \
1364 / ((float(nlong)/float(nshort)) - 1)
1366 if scalingmethod == 3:
1367 self.ydata = num.maximum(self.ydata, 0.)
1369 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1370 '''
1371 Run special STA/LTA filter where the short time window is overlapping
1372 with the last part of the long time window.
1374 :param tshort: length of short time window in [s]
1375 :param tlong: length of long time window in [s]
1376 :param quad: whether to square the data prior to applying the STA/LTA
1377 filter
1378 :param scalingmethod: integer key to select how output values are
1379 scaled / normalized (``1``, ``2``, or ``3``)
1381 ============= ====================================== ===========
1382 Scalingmethod Implementation Range
1383 ============= ====================================== ===========
1384 ``1`` As/Al* Ts/Tl [0,1]
1385 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1386 ``3`` Like ``2`` but clipping range at zero [0,1]
1387 ============= ====================================== ===========
1389 With ``scalingmethod=1``, the values produced by this variant of the
1390 STA/LTA are equivalent to
1392 .. math::
1393 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1394 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1396 where :math:`f_j` are the input samples, :math:`s` are the number of
1397 samples in the short time window and :math:`l` are the number of
1398 samples in the long time window.
1399 '''
1401 n = self.data_len()
1402 tmin = self.tmin
1404 nshort = max(1, int(round(tshort/self.deltat)))
1405 nlong = max(1, int(round(tlong/self.deltat)))
1407 assert nshort < nlong
1409 if nlong > len(self.ydata):
1410 raise TraceTooShort(
1411 'Samples in trace: %s, samples needed: %s'
1412 % (len(self.ydata), nlong))
1414 if quad:
1415 sqrdata = self.ydata**2
1416 else:
1417 sqrdata = self.ydata
1419 nshift = int(math.floor(0.5 * (nlong - nshort)))
1420 if nlong % 2 != 0 and nshort % 2 == 0:
1421 nshift += 1
1423 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1424 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1426 self.drop_growbuffer()
1428 if scalingmethod not in (1, 2, 3):
1429 raise Exception('Invalid argument to scalingrange argument.')
1431 if scalingmethod == 1:
1432 ydata = mavg_short/mavg_long * nshort/nlong
1433 elif scalingmethod in (2, 3):
1434 ydata = (mavg_short/mavg_long - 1.) \
1435 / ((float(nlong)/float(nshort)) - 1)
1437 if scalingmethod == 3:
1438 ydata = num.maximum(ydata, 0.)
1440 self.set_ydata(ydata)
1442 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1444 self.chop(
1445 tmin + (nlong - nshort) * self.deltat,
1446 tmin + (n - nshort) * self.deltat)
1448 def peaks(self, threshold, tsearch,
1449 deadtime=False,
1450 nblock_duration_detection=100):
1452 '''
1453 Detect peaks above a given threshold (method 1).
1455 From every instant, where the signal rises above ``threshold``, a time
1456 length of ``tsearch`` seconds is searched for a maximum. A list with
1457 tuples (time, value) for each detected peak is returned. The
1458 ``deadtime`` argument turns on a special deadtime duration detection
1459 algorithm useful in combination with recursive STA/LTA filters.
1460 '''
1462 y = self.ydata
1463 above = num.where(y > threshold, 1, 0)
1464 deriv = num.zeros(y.size, dtype=num.int8)
1465 deriv[1:] = above[1:]-above[:-1]
1466 itrig_positions = num.nonzero(deriv > 0)[0]
1467 tpeaks = []
1468 apeaks = []
1469 tzeros = []
1470 tzero = self.tmin
1472 for itrig_pos in itrig_positions:
1473 ibeg = itrig_pos
1474 iend = min(
1475 len(self.ydata),
1476 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1477 ipeak = num.argmax(y[ibeg:iend])
1478 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1479 apeak = y[ibeg+ipeak]
1481 if tpeak < tzero:
1482 continue
1484 if deadtime:
1485 ibeg = itrig_pos
1486 iblock = 0
1487 nblock = nblock_duration_detection
1488 totalsum = 0.
1489 while True:
1490 if ibeg+iblock*nblock >= len(y):
1491 tzero = self.tmin + (len(y)-1) * self.deltat
1492 break
1494 logy = num.log(
1495 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1496 logy[0] += totalsum
1497 ysum = num.cumsum(logy)
1498 totalsum = ysum[-1]
1499 below = num.where(ysum <= 0., 1, 0)
1500 deriv = num.zeros(ysum.size, dtype=num.int8)
1501 deriv[1:] = below[1:]-below[:-1]
1502 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1503 if len(izero_positions) > 0:
1504 tzero = self.tmin + self.deltat * (
1505 ibeg + izero_positions[0])
1506 break
1507 iblock += 1
1508 else:
1509 tzero = ibeg*self.deltat + self.tmin + tsearch
1511 tpeaks.append(tpeak)
1512 apeaks.append(apeak)
1513 tzeros.append(tzero)
1515 if deadtime:
1516 return tpeaks, apeaks, tzeros
1517 else:
1518 return tpeaks, apeaks
1520 def peaks2(self, threshold, tsearch):
1522 '''
1523 Detect peaks above a given threshold (method 2).
1525 This variant of peak detection is a bit more robust (and slower) than
1526 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1527 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1528 iteratively the one with the maximum amplitude ``a[j]`` and time
1529 ``t[j]`` is choosen and potential peaks within
1530 ``t[j] - tsearch, t[j] + tsearch``
1531 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1532 no more potential peaks are left.
1533 '''
1535 a = num.copy(self.ydata)
1537 amin = num.min(a)
1539 a[0] = amin
1540 a[1: -1][num.diff(a, 2) <= 0.] = amin
1541 a[-1] = amin
1543 data = []
1544 while True:
1545 imax = num.argmax(a)
1546 amax = a[imax]
1548 if amax < threshold or amax == amin:
1549 break
1551 data.append((self.tmin + imax * self.deltat, amax))
1553 ntsearch = int(round(tsearch / self.deltat))
1554 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1556 if data:
1557 data.sort()
1558 tpeaks, apeaks = list(zip(*data))
1559 else:
1560 tpeaks, apeaks = [], []
1562 return tpeaks, apeaks
1564 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1565 '''
1566 Extend trace to given span.
1568 :param tmin: begin time of new span
1569 :param tmax: end time of new span
1570 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1571 ``'median'``
1572 '''
1574 nold = self.ydata.size
1576 if tmin is not None:
1577 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1578 else:
1579 nl = 0
1581 if tmax is not None:
1582 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1583 else:
1584 nh = nold - 1
1586 n = nh - nl + 1
1587 data = num.zeros(n, dtype=self.ydata.dtype)
1588 data[-nl:-nl + nold] = self.ydata
1589 if self.ydata.size >= 1:
1590 if fillmethod == 'repeat':
1591 data[:-nl] = self.ydata[0]
1592 data[-nl + nold:] = self.ydata[-1]
1593 elif fillmethod == 'median':
1594 v = num.median(self.ydata)
1595 data[:-nl] = v
1596 data[-nl + nold:] = v
1597 elif fillmethod == 'mean':
1598 v = num.mean(self.ydata)
1599 data[:-nl] = v
1600 data[-nl + nold:] = v
1602 self.drop_growbuffer()
1603 self.ydata = data
1605 self.tmin += nl * self.deltat
1606 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1608 self._update_ids()
1610 def transfer(self,
1611 tfade=0.,
1612 freqlimits=None,
1613 transfer_function=None,
1614 cut_off_fading=True,
1615 demean=True,
1616 invert=False):
1618 '''
1619 Return new trace with transfer function applied (convolution).
1621 :param tfade: rise/fall time in seconds of taper applied in timedomain
1622 at both ends of trace.
1623 :param freqlimits: 4-tuple with corner frequencies in Hz.
1624 :param transfer_function: FrequencyResponse object; must provide a
1625 method 'evaluate(freqs)', which returns the transfer function
1626 coefficients at the frequencies 'freqs'.
1627 :param cut_off_fading: whether to cut off rise/fall interval in output
1628 trace.
1629 :param demean: remove mean before applying transfer function
1630 :param invert: set to True to do a deconvolution
1631 '''
1633 if transfer_function is None:
1634 transfer_function = g_one_response
1636 if self.tmax - self.tmin <= tfade*2.:
1637 raise TraceTooShort(
1638 'Trace %s.%s.%s.%s too short for fading length setting. '
1639 'trace length = %g, fading length = %g'
1640 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1642 if freqlimits is None and (
1643 transfer_function is None or transfer_function.is_scalar()):
1645 # special case for flat responses
1647 output = self.copy()
1648 data = self.ydata
1649 ndata = data.size
1651 if transfer_function is not None:
1652 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1654 if invert:
1655 c = 1.0/c
1657 data *= c
1659 if tfade != 0.0:
1660 data *= costaper(
1661 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1662 ndata, self.deltat)
1664 output.ydata = data
1666 else:
1667 ndata = self.ydata.size
1668 ntrans = nextpow2(ndata*1.2)
1669 coeffs = self._get_tapered_coeffs(
1670 ntrans, freqlimits, transfer_function, invert=invert,
1671 demean=demean)
1673 data = self.ydata
1675 data_pad = num.zeros(ntrans, dtype=float)
1676 data_pad[:ndata] = data
1677 if demean:
1678 data_pad[:ndata] -= data.mean()
1680 if tfade != 0.0:
1681 data_pad[:ndata] *= costaper(
1682 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1683 ndata, self.deltat)
1685 fdata = num.fft.rfft(data_pad)
1686 fdata *= coeffs
1687 ddata = num.fft.irfft(fdata)
1688 output = self.copy()
1689 output.ydata = ddata[:ndata]
1691 if cut_off_fading and tfade != 0.0:
1692 try:
1693 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1694 except NoData:
1695 raise TraceTooShort(
1696 'Trace %s.%s.%s.%s too short for fading length setting. '
1697 'trace length = %g, fading length = %g'
1698 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1699 else:
1700 output.ydata = output.ydata.copy()
1702 return output
1704 def differentiate(self, n=1, order=4, inplace=True):
1705 '''
1706 Approximate first or second derivative of the trace.
1708 :param n: 1 for first derivative, 2 for second
1709 :param order: order of the approximation 2 and 4 are supported
1710 :param inplace: if ``True`` the trace is differentiated in place,
1711 otherwise a new trace object with the derivative is returned.
1713 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1715 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1716 '''
1718 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1720 if inplace:
1721 self.ydata = ddata
1722 else:
1723 output = self.copy(data=False)
1724 output.set_ydata(ddata)
1725 return output
1727 def drop_chain_cache(self):
1728 if self._pchain:
1729 self._pchain.clear()
1731 def init_chain(self):
1732 self._pchain = pchain.Chain(
1733 do_downsample,
1734 do_extend,
1735 do_pre_taper,
1736 do_fft,
1737 do_filter,
1738 do_ifft)
1740 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1741 if setup.domain == 'frequency_domain':
1742 _, _, data = self._pchain(
1743 (self, deltat),
1744 (tmin, tmax),
1745 (setup.taper,),
1746 (setup.filter,),
1747 (setup.filter,),
1748 nocache=nocache)
1750 return num.abs(data), num.abs(data)
1752 else:
1753 processed = self._pchain(
1754 (self, deltat),
1755 (tmin, tmax),
1756 (setup.taper,),
1757 (setup.filter,),
1758 (setup.filter,),
1759 (),
1760 nocache=nocache)
1762 if setup.domain == 'time_domain':
1763 data = processed.get_ydata()
1765 elif setup.domain == 'envelope':
1766 processed = processed.envelope(inplace=False)
1768 elif setup.domain == 'absolute':
1769 processed.set_ydata(num.abs(processed.get_ydata()))
1771 return processed.get_ydata(), processed
1773 def misfit(self, candidate, setup, nocache=False, debug=False):
1774 '''
1775 Calculate misfit and normalization factor against candidate trace.
1777 :param candidate: :py:class:`Trace` object
1778 :param setup: :py:class:`MisfitSetup` object
1779 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1780 normalization divisor
1782 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1783 with the higher sampling rate will be downsampled.
1784 '''
1786 a = self
1787 b = candidate
1789 for tr in (a, b):
1790 if not tr._pchain:
1791 tr.init_chain()
1793 deltat = max(a.deltat, b.deltat)
1794 tmin = min(a.tmin, b.tmin) - deltat
1795 tmax = max(a.tmax, b.tmax) + deltat
1797 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1798 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1800 if setup.domain != 'cc_max_norm':
1801 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1802 else:
1803 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1804 ccmax = ctr.max()[1]
1805 m = 0.5 - 0.5 * ccmax
1806 n = 0.5
1808 if debug:
1809 return m, n, aproc, bproc
1810 else:
1811 return m, n
1813 def spectrum(self, pad_to_pow2=False, tfade=None, ntrans_min=None):
1814 '''
1815 Get FFT spectrum of trace.
1817 :param pad_to_pow2: whether to zero-pad the data to next larger
1818 power-of-two length
1819 :param tfade: ``None`` or a time length in seconds, to apply cosine
1820 shaped tapers to both
1822 :returns: a tuple with (frequencies, values)
1823 '''
1825 if ntrans_min is None:
1826 ndata = self.ydata.size
1827 else:
1828 ndata = ntrans_min
1830 if pad_to_pow2:
1831 ntrans = nextpow2(ndata)
1832 else:
1833 ntrans = ndata
1835 if tfade is None:
1836 ydata = self.ydata
1837 else:
1838 ydata = self.ydata * costaper(
1839 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1840 ndata, self.deltat)
1842 fydata = num.fft.rfft(ydata, ntrans)
1843 df = 1./(ntrans*self.deltat)
1844 fxdata = num.arange(len(fydata))*df
1845 return fxdata, fydata
1847 def multi_filter(self, filter_freqs, bandwidth):
1849 class Gauss(FrequencyResponse):
1850 f0 = Float.T()
1851 a = Float.T(default=1.0)
1853 def __init__(self, f0, a=1.0, **kwargs):
1854 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1856 def evaluate(self, freqs):
1857 omega0 = 2.*math.pi*self.f0
1858 omega = 2.*math.pi*freqs
1859 return num.exp(-((omega-omega0)
1860 / (self.a*omega0))**2)
1862 freqs, coeffs = self.spectrum()
1863 n = self.data_len()
1864 nfilt = len(filter_freqs)
1865 signal_tf = num.zeros((nfilt, n))
1866 centroid_freqs = num.zeros(nfilt)
1867 for ifilt, f0 in enumerate(filter_freqs):
1868 taper = Gauss(f0, a=bandwidth)
1869 weights = taper.evaluate(freqs)
1870 nhalf = freqs.size
1871 analytic_spec = num.zeros(n, dtype=complex)
1872 analytic_spec[:nhalf] = coeffs*weights
1874 enorm = num.abs(analytic_spec[:nhalf])**2
1875 enorm /= num.sum(enorm)
1877 if n % 2 == 0:
1878 analytic_spec[1:nhalf-1] *= 2.
1879 else:
1880 analytic_spec[1:nhalf] *= 2.
1882 analytic = num.fft.ifft(analytic_spec)
1883 signal_tf[ifilt, :] = num.abs(analytic)
1885 enorm = num.abs(analytic_spec[:nhalf])**2
1886 enorm /= num.sum(enorm)
1887 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1889 return centroid_freqs, signal_tf
1891 def _get_tapered_coeffs(
1892 self, ntrans, freqlimits, transfer_function, invert=False,
1893 demean=True):
1895 cache_key = (
1896 ntrans, self.deltat, freqlimits, transfer_function.uuid, invert,
1897 demean)
1899 if cache_key in g_tapered_coeffs_cache:
1900 return g_tapered_coeffs_cache[cache_key]
1902 deltaf = 1./(self.deltat*ntrans)
1903 nfreqs = ntrans//2 + 1
1904 transfer = num.ones(nfreqs, dtype=complex)
1905 hi = snapper(nfreqs, deltaf)
1906 if freqlimits is not None:
1907 kmin, kmax = hi(freqlimits[0]), hi(freqlimits[3])
1908 freqs = num.arange(kmin, kmax)*deltaf
1909 coeffs = transfer_function.evaluate(freqs)
1910 if invert:
1911 if num.any(coeffs == 0.0):
1912 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1914 transfer[kmin:kmax] = 1.0 / coeffs
1915 else:
1916 transfer[kmin:kmax] = coeffs
1918 tapered_transfer = costaper(*freqlimits, nfreqs, deltaf) * transfer
1919 else:
1920 if invert:
1921 raise Exception(
1922 'transfer: `freqlimits` must be given when `invert` is '
1923 'set to `True`')
1925 freqs = num.arange(nfreqs) * deltaf
1926 tapered_transfer = transfer_function.evaluate(freqs)
1928 g_tapered_coeffs_cache[cache_key] = tapered_transfer
1930 if demean:
1931 tapered_transfer[0] = 0.0 # don't introduce static offsets
1933 return tapered_transfer
1935 def fill_template(self, template, **additional):
1936 '''
1937 Fill string template with trace metadata.
1939 Uses normal python '%(placeholder)s' string templates. The following
1940 placeholders are considered: ``network``, ``station``, ``location``,
1941 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1942 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1943 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1944 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1945 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1946 microseconds, those with ``'_year'`` contain only the year.
1947 '''
1949 template = template.replace('%n', '%(network)s')\
1950 .replace('%s', '%(station)s')\
1951 .replace('%l', '%(location)s')\
1952 .replace('%c', '%(channel)s')\
1953 .replace('%b', '%(tmin)s')\
1954 .replace('%e', '%(tmax)s')\
1955 .replace('%j', '%(julianday)s')
1957 return template % TraceStringFiller(self, additional=additional)
1959 def plot(self):
1960 '''
1961 Show trace with matplotlib.
1963 See also: :py:meth:`Trace.snuffle`.
1964 '''
1966 import pylab
1967 pylab.plot(self.get_xdata(), self.get_ydata())
1968 name = '%s %s %s - %s' % (
1969 self.channel,
1970 self.station,
1971 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmin)),
1972 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmax)))
1974 pylab.title(name)
1975 pylab.show()
1977 def snuffle(self, **kwargs):
1978 '''
1979 Show trace in a snuffler window.
1981 :param stations: list of :py:class:`pyrocko.model.station.Station`
1982 objects or ``None``
1983 :param events: list of :py:class:`pyrocko.model.event.Event` objects or
1984 ``None``
1985 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker`
1986 objects or ``None``
1987 :param ntracks: float, number of tracks to be shown initially (default:
1988 12)
1989 :param follow: time interval (in seconds) for real time follow mode or
1990 ``None``
1991 :param controls: bool, whether to show the main controls (default:
1992 ``True``)
1993 :param opengl: bool, whether to use opengl (default: ``False``)
1994 '''
1996 return snuffle([self], **kwargs)
1999def snuffle(traces, **kwargs):
2000 '''
2001 Show traces in a snuffler window.
2003 :param stations: list of :py:class:`pyrocko.model.station.Station` objects
2004 or ``None``
2005 :param events: list of :py:class:`pyrocko.model.event.Event` objects or
2006 ``None``
2007 :param markers: list of :py:class:`pyrocko.gui.snuffler.marker.Marker`
2008 objects or ``None``
2009 :param ntracks: int, number of tracks to be shown initially (default: 12)
2010 :param follow: time interval (in seconds) for real time follow mode or
2011 ``None``
2012 :param controls: bool, whether to show the main controls (default:
2013 ``True``)
2014 :param opengl: bool, whether to use opengl (default: ``False``)
2015 '''
2017 from pyrocko import pile
2018 from pyrocko.gui.snuffler import snuffler
2019 p = pile.Pile()
2020 if traces:
2021 trf = pile.MemTracesFile(None, traces)
2022 p.add_file(trf)
2023 return snuffler.snuffle(p, **kwargs)
2026def downsample_tpad(
2027 deltat_in, deltat_out, allow_upsample_max=1, ftype='fir-remez'):
2028 '''
2029 Get approximate amount of cutoff which will be produced by downsampling.
2031 The :py:meth:`Trace.downsample_to` method removes some samples at the
2032 beginning and end of the trace which is downsampled. This function
2033 estimates the approximate length [s] which will be cut off for a given set
2034 of parameters supplied to :py:meth:`Trace.downsample_to`.
2036 :param deltat_in:
2037 Input sampling interval [s].
2038 :type deltat_in:
2039 float
2041 :param deltat_out:
2042 Output samling interval [s].
2043 :type deltat_out:
2044 float
2046 :returns:
2047 Approximate length [s] which will be cut off.
2049 See :py:meth:`Trace.downsample_to` for details.
2050 '''
2052 upsratio, deci_seq = _configure_downsampling(
2053 deltat_in, deltat_out, allow_upsample_max)
2055 tpad = 0.0
2056 deltat = deltat_in / upsratio
2057 for deci in deci_seq:
2058 b, a, n = util.decimate_coeffs(deci, None, ftype)
2059 # n//2 for the antialiasing
2060 # +deci for possible snap to multiples
2061 # +1 for rounding errors
2062 tpad += (n//2 + deci + 1) * deltat
2063 deltat = deltat * deci
2065 return tpad
2068def _configure_downsampling(deltat_in, deltat_out, allow_upsample_max):
2069 for upsratio in range(1, allow_upsample_max+1):
2070 dratio = (upsratio/deltat_in) / (1./deltat_out)
2071 deci_seq = util.decitab(int(round(dratio)))
2072 if abs(dratio - round(dratio)) / dratio < 0.0001 and deci_seq:
2073 return upsratio, [deci for deci in deci_seq if deci != 1]
2075 raise util.UnavailableDecimation('ratio = %g' % (deltat_out / deltat_in))
2078def _all_same(xs):
2079 return all(x == xs[0] for x in xs)
2082def _incompatibilities(traces):
2083 if not traces:
2084 return None
2086 params = [
2087 (tr.ydata.size, tr.ydata.dtype, tr.deltat, tr.tmin)
2088 for tr in traces]
2090 if not _all_same(params):
2091 return params
2092 else:
2093 return None
2096def _raise_incompatible_traces(params):
2097 raise IncompatibleTraces(
2098 'Given traces are incompatible. Sampling rate, start time, '
2099 'number of samples and data type must match.\n%s\n%s' % (
2100 ' %10s %-10s %12s %22s' % (
2101 'nsamples', 'dtype', 'deltat', 'tmin'),
2102 '\n'.join(
2103 ' %10i %-10s %12.5e %22s' % (
2104 nsamples, dtype, deltat, util.time_to_str(tmin))
2105 for (nsamples, dtype, deltat, tmin) in params)))
2108def _ensure_compatible(traces):
2109 params = _incompatibilities(traces)
2110 if params:
2111 _raise_incompatible_traces(params)
2114def _almost_equal(a, b, atol):
2115 return abs(a-b) < atol
2118def get_traces_data_as_array(traces):
2119 '''
2120 Merge data samples from multiple traces into a 2D array.
2122 :param traces:
2123 Input waveforms.
2124 :type traces:
2125 list of :py:class:`pyrocko.Trace <pyrocko.trace.Trace>` objects
2127 :raises:
2128 :py:class:`IncompatibleTraces` if traces have different time
2129 span, sample rate or data type, or if traces is an empty list.
2131 :returns:
2132 2D array as ``data[itrace, isample]``.
2133 :rtype:
2134 :py:class:`numpy.ndarray`
2135 '''
2137 if not traces:
2138 raise IncompatibleTraces('Need at least one trace.')
2140 _ensure_compatible(traces)
2142 return num.vstack([tr.ydata for tr in traces])
2145def make_traces_compatible(
2146 traces,
2147 dtype=None,
2148 deltat=None,
2149 enforce_global_snap=True,
2150 warn_snap=False):
2152 '''
2153 Homogenize sampling rate, time span, sampling instants, and data type.
2155 This function takes a group of traces and tries to make them compatible in
2156 terms of data type and sampling rate, time span, and sampling instants of
2157 time.
2159 If necessary, traces are (in order):
2161 - casted to the same data type.
2162 - downsampled to a common sampling rate, using decimation cascades.
2163 - resampled to common sampling instants in time, using Sinc interpolation.
2164 - cut to the same time span. The longest time span covered by all traces is
2165 used.
2167 :param traces:
2168 Input waveforms.
2169 :type traces:
2170 :py:class:`list` of :py:class:`Trace`
2172 :param dtype:
2173 Force traces to be casted to the given data type. If not specified, the
2174 traces are cast to :py:class:`float`.
2175 :type dtype:
2176 :py:class:`numpy.dtype`
2178 :param deltat:
2179 Sampling interval [s]. If not specified, the longest sampling interval
2180 among the input traces is chosen.
2181 :type deltat:
2182 float
2184 :param enforce_global_snap:
2185 If ``True``, choose sampling instants to be even multiples of the
2186 sampling rate in system time. When set to ``False`` traces are still
2187 resampled to common time instants (if necessary), but they may be
2188 offset to the system time sampling rate multiples.
2189 :type enforce_global_snap:
2190 bool
2192 :param warn_snap:
2193 If set to ``True`` warn, when resampling has to be performed.
2194 :type warn_snap:
2195 bool
2196 '''
2198 eps_snap = 1e-3
2200 if not traces:
2201 return []
2203 traces = list(traces)
2205 dtypes = [tr.ydata.dtype for tr in traces]
2206 if not _all_same(dtypes) or dtype is not None:
2208 if dtype is None:
2209 dtype = float
2210 logger.warning(
2211 'make_traces_compatible: Inconsistent data types - converting '
2212 'sample datatype to %s.' % str(dtype))
2214 for itr, tr in enumerate(traces):
2215 tr_copy = tr.copy(data=False)
2216 tr_copy.set_ydata(tr.ydata.astype(dtype))
2217 traces[itr] = tr_copy
2219 deltats = [tr.deltat for tr in traces]
2220 if not _all_same(deltats) or deltat is not None:
2221 if deltat is None:
2222 deltat = max(deltats)
2223 logger.warning(
2224 'make_traces_compatible: Inconsistent sampling rates - '
2225 'downsampling to lowest rate among input traces: %g Hz.'
2226 % (1.0 / deltat))
2228 for itr, tr in enumerate(traces):
2229 if tr.deltat != deltat:
2230 tr_copy = tr.copy()
2231 tr_copy.downsample_to(deltat, snap=True, cut=True)
2232 traces[itr] = tr_copy
2234 tmins = num.array([tr.tmin for tr in traces])
2235 is_aligned = num.abs(num.round(tmins / deltat) * deltat - tmins) \
2236 > deltat * eps_snap
2238 if enforce_global_snap or any(is_aligned):
2239 tref = util.to_time_float(0.0)
2240 else:
2241 # to keep a common subsample shift
2242 tref = num.max(tmins)
2244 tmins_snap = num.round((tmins - tref) / deltat) * deltat + tref
2245 need_snap = num.abs(tmins_snap - tmins) > deltat * eps_snap
2246 if num.any(need_snap):
2247 if warn_snap:
2248 logger.warning(
2249 'make_traces_compatible: Misaligned sampling - introducing '
2250 'subsample shifts for proper alignment.')
2252 for itr, tr in enumerate(traces):
2253 if need_snap[itr]:
2254 tr_copy = tr.copy()
2255 if tref != 0.0:
2256 tr_copy.shift(-tref)
2258 tr_copy.snap(interpolate=True)
2259 if tref != 0.0:
2260 tr_copy.shift(tref)
2262 traces[itr] = tr_copy
2264 tmins = num.array([tr.tmin for tr in traces])
2265 nsamples = num.array([tr.ydata.size for tr in traces])
2266 tmaxs = tmins + (nsamples - 1) * deltat
2268 tmin = num.max(tmins)
2269 tmax = num.min(tmaxs)
2271 if tmin > tmax:
2272 raise IncompatibleTraces('Traces do not overlap.')
2274 nsamples_must = int(round((tmax - tmin) / deltat)) + 1
2275 for itr, tr in enumerate(traces):
2276 if not (_almost_equal(tr.tmin, tmin, deltat*eps_snap)
2277 and _almost_equal(tr.tmax, tmax, deltat*eps_snap)):
2279 traces[itr] = tr.chop(
2280 tmin, tmax,
2281 inplace=False,
2282 want_incomplete=False,
2283 include_last=True)
2285 xtr = traces[itr]
2286 assert _almost_equal(xtr.tmin, tmin, deltat*eps_snap)
2287 assert int(round((xtr.tmax - xtr.tmin) / deltat)) + 1 == nsamples_must
2288 xtr.tmin = tmin
2289 xtr.tmax = tmax
2290 xtr.deltat = deltat
2291 xtr._update_ids()
2293 return traces
2296class IncompatibleTraces(Exception):
2297 '''
2298 Raised when traces have incompatible sampling rate, time span or data type.
2299 '''
2302class InfiniteResponse(Exception):
2303 '''
2304 This exception is raised by :py:class:`Trace` operations when deconvolution
2305 of a frequency response (instrument response transfer function) would
2306 result in a division by zero.
2307 '''
2310class MisalignedTraces(Exception):
2311 '''
2312 This exception is raised by some :py:class:`Trace` operations when tmin,
2313 tmax or number of samples do not match.
2314 '''
2316 pass
2319class OverlappingTraces(Exception):
2320 '''
2321 This exception is raised by some :py:class:`Trace` operations when traces
2322 overlap but should not.
2323 '''
2325 pass
2328class NoData(Exception):
2329 '''
2330 This exception is raised by some :py:class:`Trace` operations when no or
2331 not enough data is available.
2332 '''
2334 pass
2337class AboveNyquist(Exception):
2338 '''
2339 This exception is raised by some :py:class:`Trace` operations when given
2340 frequencies are above the Nyquist frequency.
2341 '''
2343 pass
2346class TraceTooShort(Exception):
2347 '''
2348 This exception is raised by some :py:class:`Trace` operations when the
2349 trace is too short.
2350 '''
2352 pass
2355class ResamplingFailed(Exception):
2356 pass
2359class TraceStringFiller:
2361 def __init__(self, tr, additional={}):
2362 self.tr = tr
2363 self.additional = additional
2365 def __getitem__(self, k):
2366 if k in ('network', 'station', 'location', 'channel', 'extra'):
2367 return getattr(self.tr, k)
2369 method = getattr(self, 'get_' + k, None)
2370 if method:
2371 return method()
2373 return self.additional[k]
2375 def _filename_safe(self, s):
2376 return re.sub(r'[^0-9A-Za-z_-]', '_', s)
2378 def get_network_safe(self):
2379 return self._filename_safe(self.tr.network)
2381 def get_station_safe(self):
2382 return self._filename_safe(self.tr.station)
2384 def get_location_safe(self):
2385 return self._filename_safe(self.tr.location)
2387 def get_channel_safe(self):
2388 return self._filename_safe(self.tr.channel)
2390 def get_extra_safe(self):
2391 return self._filename_safe(self.tr.extra)
2393 def get_network_dsafe(self):
2394 return self._filename_safe(self.tr.network) or '_'
2396 def get_station_dsafe(self):
2397 return self._filename_safe(self.tr.station) or '_'
2399 def get_location_dsafe(self):
2400 return self._filename_safe(self.tr.location) or '_'
2402 def get_channel_dsafe(self):
2403 return self._filename_safe(self.tr.channel) or '_'
2405 def get_extra_dsafe(self):
2406 return self._filename_safe(self.tr.extra) or '_'
2408 def get_tmin(self):
2409 return util.time_to_str(self.tr.tmin, format='%Y-%m-%d_%H-%M-%S')
2411 def get_tmax(self):
2412 return util.time_to_str(self.tr.tmax, format='%Y-%m-%d_%H-%M-%S')
2414 def get_tmin_ms(self):
2415 return util.time_to_str(self.tr.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
2417 def get_tmax_ms(self):
2418 return util.time_to_str(self.tr.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
2420 def get_tmin_us(self):
2421 return util.time_to_str(self.tr.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
2423 def get_tmax_us(self):
2424 return util.time_to_str(self.tr.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
2426 def get_tmin_year(self):
2427 return util.time_to_str(self.tr.tmin, format='%Y')
2429 def get_tmin_month(self):
2430 return util.time_to_str(self.tr.tmin, format='%m')
2432 def get_tmin_day(self):
2433 return util.time_to_str(self.tr.tmin, format='%d')
2435 def get_tmax_year(self):
2436 return util.time_to_str(self.tr.tmax, format='%Y')
2438 def get_tmax_month(self):
2439 return util.time_to_str(self.tr.tmax, format='%m')
2441 def get_tmax_day(self):
2442 return util.time_to_str(self.tr.tmax, format='%d')
2444 def get_julianday(self):
2445 return str(util.julian_day_of_year(self.tr.tmin))
2447 def get_tmin_jday(self):
2448 return util.time_to_str(self.tr.tmin, format='%j')
2450 def get_tmax_jday(self):
2451 return util.time_to_str(self.tr.tmax, format='%j')
2454def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2456 '''
2457 Get data range given traces grouped by selected pattern.
2459 :param key: a callable which takes as single argument a trace and returns a
2460 key for the grouping of the results. If this is ``None``, the default,
2461 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2462 used.
2463 :param mode: ``'minmax'`` or floating point number. If this is
2464 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2465 number, mean +- standard deviation times ``mode`` is used.
2467 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2468 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2469 extreme values on either end.
2471 :returns: a dict with the combined data ranges.
2473 Examples::
2475 ranges = minmax(traces, lambda tr: tr.channel)
2476 print ranges['N'] # print min & max of all traces with channel == 'N'
2477 print ranges['E'] # print min & max of all traces with channel == 'E'
2479 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2480 print ranges['GR', 'HAM3'] # print min & max of all traces with
2481 # network == 'GR' and station == 'HAM3'
2483 ranges = minmax(traces, lambda tr: None)
2484 print ranges[None] # prints min & max of all traces
2485 '''
2487 if key is None:
2488 key = _default_key
2490 ranges = defaultdict(list)
2491 for trace in traces:
2492 if isinstance(mode, str) and mode == 'minmax':
2493 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2494 else:
2495 mean = trace.ydata.mean()
2496 std = trace.ydata.std()
2497 mi, ma = mean-std*mode, mean+std*mode
2499 k = key(trace)
2500 ranges[k].append((mi, ma))
2502 for k in ranges:
2503 mins, maxs = num.array(ranges[k]).T
2504 if outer_mode == 'minmax':
2505 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2506 elif outer_mode == 'robust':
2507 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2509 return ranges
2512def minmaxtime(traces, key=None):
2514 '''
2515 Get time range given traces grouped by selected pattern.
2517 :param key: a callable which takes as single argument a trace and returns a
2518 key for the grouping of the results. If this is ``None``, the default,
2519 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2520 used.
2522 :returns: a dict with the combined data ranges.
2523 '''
2525 if key is None:
2526 key = _default_key
2528 ranges = {}
2529 for trace in traces:
2530 mi, ma = trace.tmin, trace.tmax
2531 k = key(trace)
2532 if k not in ranges:
2533 ranges[k] = mi, ma
2534 else:
2535 tmi, tma = ranges[k]
2536 ranges[k] = min(tmi, mi), max(tma, ma)
2538 return ranges
2541def degapper(
2542 traces,
2543 maxgap=5,
2544 fillmethod='interpolate',
2545 deoverlap='use_second',
2546 maxlap=None):
2548 '''
2549 Try to connect traces and remove gaps.
2551 This method will combine adjacent traces, which match in their network,
2552 station, location and channel attributes. Overlapping parts are handled
2553 according to the ``deoverlap`` argument.
2555 :param traces: input traces, must be sorted by their full_id attribute.
2556 :param maxgap: maximum number of samples to interpolate.
2557 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2558 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2559 second trace (default), 'use_first' to use data from first trace,
2560 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2561 values.
2562 :param maxlap: maximum number of samples of overlap which are removed
2564 :returns: list of traces
2565 '''
2567 in_traces = traces
2568 out_traces = []
2569 if not in_traces:
2570 return out_traces
2571 out_traces.append(in_traces.pop(0))
2572 while in_traces:
2574 a = out_traces[-1]
2575 b = in_traces.pop(0)
2577 avirt, bvirt = a.ydata is None, b.ydata is None
2578 assert avirt == bvirt, \
2579 'traces given to degapper() must either all have data or have ' \
2580 'no data.'
2582 virtual = avirt and bvirt
2584 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2585 and a.data_len() >= 1 and b.data_len() >= 1
2586 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2588 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2589 idist = int(round(dist))
2590 if abs(dist - idist) > 0.05 and idist <= maxgap:
2591 # logger.warning('Cannot degap traces with displaced sampling '
2592 # '(%s, %s, %s, %s)' % a.nslc_id)
2593 pass
2594 else:
2595 if 1 < idist <= maxgap:
2596 if not virtual:
2597 if fillmethod == 'interpolate':
2598 filler = a.ydata[-1] + (
2599 ((1.0 + num.arange(idist-1, dtype=float))
2600 / idist) * (b.ydata[0]-a.ydata[-1])
2601 ).astype(a.ydata.dtype)
2602 elif fillmethod == 'zeros':
2603 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2604 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2605 a.tmax = b.tmax
2606 if a.mtime and b.mtime:
2607 a.mtime = max(a.mtime, b.mtime)
2608 continue
2610 elif idist == 1:
2611 if not virtual:
2612 a.ydata = num.concatenate((a.ydata, b.ydata))
2613 a.tmax = b.tmax
2614 if a.mtime and b.mtime:
2615 a.mtime = max(a.mtime, b.mtime)
2616 continue
2618 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2619 if b.tmax > a.tmax:
2620 if not virtual:
2621 na = a.ydata.size
2622 n = -idist+1
2623 if deoverlap == 'use_second':
2624 a.ydata = num.concatenate(
2625 (a.ydata[:-n], b.ydata))
2626 elif deoverlap in ('use_first', 'crossfade_cos'):
2627 a.ydata = num.concatenate(
2628 (a.ydata, b.ydata[n:]))
2629 elif deoverlap == 'add':
2630 a.ydata[-n:] += b.ydata[:n]
2631 a.ydata = num.concatenate(
2632 (a.ydata, b.ydata[n:]))
2633 else:
2634 assert False, 'unknown deoverlap method'
2636 if deoverlap == 'crossfade_cos':
2637 n = -idist+1
2638 taper = 0.5-0.5*num.cos(
2639 (1.+num.arange(n))/(1.+n)*num.pi)
2640 a.ydata[na-n:na] *= 1.-taper
2641 a.ydata[na-n:na] += b.ydata[:n] * taper
2643 a.tmax = b.tmax
2644 if a.mtime and b.mtime:
2645 a.mtime = max(a.mtime, b.mtime)
2646 continue
2647 else:
2648 # make short second trace vanish
2649 continue
2651 if b.data_len() >= 1:
2652 out_traces.append(b)
2654 for tr in out_traces:
2655 tr._update_ids()
2657 return out_traces
2660def rotate(traces, azimuth, in_channels, out_channels):
2661 '''
2662 2D rotation of traces.
2664 :param traces: list of input traces
2665 :param azimuth: difference of the azimuths of the component directions
2666 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2667 :param in_channels: names of the input channels (e.g. 'N', 'E')
2668 :param out_channels: names of the output channels (e.g. 'R', 'T')
2669 :returns: list of rotated traces
2670 '''
2672 phi = azimuth/180.*math.pi
2673 cphi = math.cos(phi)
2674 sphi = math.sin(phi)
2675 rotated = []
2676 in_channels = tuple(_channels_to_names(in_channels))
2677 out_channels = tuple(_channels_to_names(out_channels))
2678 for a in traces:
2679 for b in traces:
2680 if ((a.channel, b.channel) == in_channels and
2681 a.nslc_id[:3] == b.nslc_id[:3] and
2682 abs(a.deltat-b.deltat) < a.deltat*0.001):
2683 tmin = max(a.tmin, b.tmin)
2684 tmax = min(a.tmax, b.tmax)
2686 if tmin < tmax:
2687 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2688 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2689 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2690 logger.warning(
2691 'Cannot rotate traces with displaced sampling '
2692 '(%s, %s, %s, %s)' % a.nslc_id)
2693 continue
2695 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2696 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2697 ac.set_ydata(acydata)
2698 bc.set_ydata(bcydata)
2700 ac.set_codes(channel=out_channels[0])
2701 bc.set_codes(channel=out_channels[1])
2702 rotated.append(ac)
2703 rotated.append(bc)
2705 return rotated
2708def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2709 '''
2710 Rotate traces from NE to RT system.
2712 :param n:
2713 North trace.
2714 :type n:
2715 :py:class:`~pyrocko.trace.Trace`
2717 :param e:
2718 East trace.
2719 :type e:
2720 :py:class:`~pyrocko.trace.Trace`
2722 :param source:
2723 Source of the recorded signal.
2724 :type source:
2725 :py:class:`pyrocko.gf.seismosizer.Source`
2727 :param receiver:
2728 Receiver of the recorded signal.
2729 :type receiver:
2730 :py:class:`pyrocko.model.location.Location`
2732 :param out_channels:
2733 Channel codes of the output channels (radial, transversal).
2734 Default is ('R', 'T').
2736 :type out_channels
2737 optional, tuple[str, str]
2739 :returns:
2740 Rotated traces (radial, transversal).
2741 :rtype:
2742 tuple[
2743 :py:class:`~pyrocko.trace.Trace`,
2744 :py:class:`~pyrocko.trace.Trace`]
2745 '''
2746 azimuth = orthodrome.azimuth(receiver, source) + 180.
2747 in_channels = n.channel, e.channel
2748 out = rotate(
2749 [n, e], azimuth,
2750 in_channels=in_channels,
2751 out_channels=out_channels)
2753 assert len(out) == 2
2754 for tr in out:
2755 if tr.channel == out_channels[0]:
2756 r = tr
2757 elif tr.channel == out_channels[1]:
2758 t = tr
2759 else:
2760 assert False
2762 return r, t
2765def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2766 out_channels=('L', 'Q', 'T')):
2767 '''
2768 Rotate traces from ZNE to LQT system.
2770 :param traces: list of traces in arbitrary order
2771 :param backazimuth: backazimuth in degrees clockwise from north
2772 :param incidence: incidence angle in degrees from vertical
2773 :param in_channels: input channel names
2774 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2775 :returns: list of transformed traces
2776 '''
2777 i = incidence/180.*num.pi
2778 b = backazimuth/180.*num.pi
2780 ci = num.cos(i)
2781 cb = num.cos(b)
2782 si = num.sin(i)
2783 sb = num.sin(b)
2785 rotmat = num.array(
2786 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2787 return project(traces, rotmat, in_channels, out_channels)
2790def _decompose(a):
2791 '''
2792 Decompose matrix into independent submatrices.
2793 '''
2795 def depends(iout, a):
2796 row = a[iout, :]
2797 return set(num.arange(row.size).compress(row != 0.0))
2799 def provides(iin, a):
2800 col = a[:, iin]
2801 return set(num.arange(col.size).compress(col != 0.0))
2803 a = num.asarray(a)
2804 outs = set(range(a.shape[0]))
2805 systems = []
2806 while outs:
2807 iout = outs.pop()
2809 gout = set()
2810 for iin in depends(iout, a):
2811 gout.update(provides(iin, a))
2813 if not gout:
2814 continue
2816 gin = set()
2817 for iout2 in gout:
2818 gin.update(depends(iout2, a))
2820 if not gin:
2821 continue
2823 for iout2 in gout:
2824 if iout2 in outs:
2825 outs.remove(iout2)
2827 gin = list(gin)
2828 gin.sort()
2829 gout = list(gout)
2830 gout.sort()
2832 systems.append((gin, gout, a[gout, :][:, gin]))
2834 return systems
2837def _channels_to_names(channels):
2838 from pyrocko import squirrel
2839 names = []
2840 for ch in channels:
2841 if isinstance(ch, model.Channel):
2842 names.append(ch.name)
2843 elif isinstance(ch, squirrel.Channel):
2844 names.append(ch.codes.channel)
2845 else:
2846 names.append(ch)
2848 return names
2851def project(traces, matrix, in_channels, out_channels):
2852 '''
2853 Affine transform of three-component traces.
2855 Compute matrix-vector product of three-component traces, to e.g. rotate
2856 traces into a different basis. The traces are distinguished and ordered by
2857 their channel attribute. The tranform is applied to overlapping parts of
2858 any appropriate combinations of the input traces. This should allow this
2859 function to be robust with data gaps. It also tries to apply the
2860 tranformation to subsets of the channels, if this is possible, so that, if
2861 for example a vertical compontent is missing, horizontal components can
2862 still be rotated.
2864 :param traces: list of traces in arbitrary order
2865 :param matrix: tranformation matrix
2866 :param in_channels: input channel names
2867 :param out_channels: output channel names
2868 :returns: list of transformed traces
2869 '''
2871 in_channels = tuple(_channels_to_names(in_channels))
2872 out_channels = tuple(_channels_to_names(out_channels))
2873 systems = _decompose(matrix)
2875 # fallback to full matrix if some are not quadratic
2876 for iins, iouts, submatrix in systems:
2877 if submatrix.shape[0] != submatrix.shape[1]:
2878 if len(in_channels) != 3 or len(out_channels) != 3:
2879 return []
2880 else:
2881 return _project3(traces, matrix, in_channels, out_channels)
2883 projected = []
2884 for iins, iouts, submatrix in systems:
2885 in_cha = tuple([in_channels[iin] for iin in iins])
2886 out_cha = tuple([out_channels[iout] for iout in iouts])
2887 if submatrix.shape[0] == 1:
2888 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2889 elif submatrix.shape[1] == 2:
2890 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2891 else:
2892 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2894 return projected
2897def project_dependencies(matrix, in_channels, out_channels):
2898 '''
2899 Figure out what dependencies project() would produce.
2900 '''
2902 in_channels = tuple(_channels_to_names(in_channels))
2903 out_channels = tuple(_channels_to_names(out_channels))
2904 systems = _decompose(matrix)
2906 subpro = []
2907 for iins, iouts, submatrix in systems:
2908 if submatrix.shape[0] != submatrix.shape[1]:
2909 subpro.append((matrix, in_channels, out_channels))
2911 if not subpro:
2912 for iins, iouts, submatrix in systems:
2913 in_cha = tuple([in_channels[iin] for iin in iins])
2914 out_cha = tuple([out_channels[iout] for iout in iouts])
2915 subpro.append((submatrix, in_cha, out_cha))
2917 deps = {}
2918 for mat, in_cha, out_cha in subpro:
2919 for oc in out_cha:
2920 if oc not in deps:
2921 deps[oc] = []
2923 for ic in in_cha:
2924 deps[oc].append(ic)
2926 return deps
2929def _project1(traces, matrix, in_channels, out_channels):
2930 assert len(in_channels) == 1
2931 assert len(out_channels) == 1
2932 assert matrix.shape == (1, 1)
2934 projected = []
2935 for a in traces:
2936 if not (a.channel,) == in_channels:
2937 continue
2939 ac = a.copy()
2940 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2941 ac.set_codes(channel=out_channels[0])
2942 projected.append(ac)
2944 return projected
2947def _project2(traces, matrix, in_channels, out_channels):
2948 assert len(in_channels) == 2
2949 assert len(out_channels) == 2
2950 assert matrix.shape == (2, 2)
2951 projected = []
2952 for a in traces:
2953 for b in traces:
2954 if not ((a.channel, b.channel) == in_channels and
2955 a.nslc_id[:3] == b.nslc_id[:3] and
2956 abs(a.deltat-b.deltat) < a.deltat*0.001):
2957 continue
2959 tmin = max(a.tmin, b.tmin)
2960 tmax = min(a.tmax, b.tmax)
2962 if tmin > tmax:
2963 continue
2965 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2966 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2967 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2968 logger.warning(
2969 'Cannot project traces with displaced sampling '
2970 '(%s, %s, %s, %s)' % a.nslc_id)
2971 continue
2973 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2974 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2976 ac.set_ydata(acydata)
2977 bc.set_ydata(bcydata)
2979 ac.set_codes(channel=out_channels[0])
2980 bc.set_codes(channel=out_channels[1])
2982 projected.append(ac)
2983 projected.append(bc)
2985 return projected
2988def _project3(traces, matrix, in_channels, out_channels):
2989 assert len(in_channels) == 3
2990 assert len(out_channels) == 3
2991 assert matrix.shape == (3, 3)
2992 projected = []
2993 for a in traces:
2994 for b in traces:
2995 for c in traces:
2996 if not ((a.channel, b.channel, c.channel) == in_channels
2997 and a.nslc_id[:3] == b.nslc_id[:3]
2998 and b.nslc_id[:3] == c.nslc_id[:3]
2999 and abs(a.deltat-b.deltat) < a.deltat*0.001
3000 and abs(b.deltat-c.deltat) < b.deltat*0.001):
3002 continue
3004 tmin = max(a.tmin, b.tmin, c.tmin)
3005 tmax = min(a.tmax, b.tmax, c.tmax)
3007 if tmin >= tmax:
3008 continue
3010 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
3011 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
3012 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
3013 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
3014 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
3016 logger.warning(
3017 'Cannot project traces with displaced sampling '
3018 '(%s, %s, %s, %s)' % a.nslc_id)
3019 continue
3021 acydata = num.dot(
3022 matrix[0],
3023 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
3024 bcydata = num.dot(
3025 matrix[1],
3026 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
3027 ccydata = num.dot(
3028 matrix[2],
3029 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
3031 ac.set_ydata(acydata)
3032 bc.set_ydata(bcydata)
3033 cc.set_ydata(ccydata)
3035 ac.set_codes(channel=out_channels[0])
3036 bc.set_codes(channel=out_channels[1])
3037 cc.set_codes(channel=out_channels[2])
3039 projected.append(ac)
3040 projected.append(bc)
3041 projected.append(cc)
3043 return projected
3046def correlate(a, b, mode='valid', normalization=None, use_fft=False):
3047 '''
3048 Cross correlation of two traces.
3050 :param a,b: input traces
3051 :param mode: ``'valid'``, ``'full'``, or ``'same'``
3052 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
3053 :param use_fft: bool, whether to do cross correlation in spectral domain
3055 :returns: trace containing cross correlation coefficients
3057 This function computes the cross correlation between two traces. It
3058 evaluates the discrete equivalent of
3060 .. math::
3062 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
3064 where the star denotes complex conjugate. Note, that the arguments here are
3065 swapped when compared with the :py:func:`numpy.correlate` function,
3066 which is internally called. This function should be safe even with older
3067 versions of NumPy, where the correlate function has some problems.
3069 A trace containing the cross correlation coefficients is returned. The time
3070 information of the output trace is set so that the returned cross
3071 correlation can be viewed directly as a function of time lag.
3073 Example::
3075 # align two traces a and b containing a time shifted similar signal:
3076 c = pyrocko.trace.correlate(a,b)
3077 t, coef = c.max() # get time and value of maximum
3078 b.shift(-t) # align b with a
3080 '''
3082 assert_same_sampling_rate(a, b)
3084 ya, yb = a.ydata, b.ydata
3086 # need reversed order here:
3087 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
3088 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
3090 if normalization == 'normal':
3091 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
3092 yc = yc/normfac
3094 elif normalization == 'gliding':
3095 if mode != 'valid':
3096 assert False, 'gliding normalization currently only available ' \
3097 'with "valid" mode.'
3099 if ya.size < yb.size:
3100 yshort, ylong = ya, yb
3101 else:
3102 yshort, ylong = yb, ya
3104 epsilon = 0.00001
3105 normfac_short = num.sqrt(num.sum(yshort**2))
3106 normfac = normfac_short * num.sqrt(
3107 moving_sum(ylong**2, yshort.size, mode='valid')) \
3108 + normfac_short*epsilon
3110 if yb.size <= ya.size:
3111 normfac = normfac[::-1]
3113 yc /= normfac
3115 c = a.copy()
3116 c.set_ydata(yc)
3117 c.set_codes(*merge_codes(a, b, '~'))
3118 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
3120 return c
3123def deconvolve(
3124 a, b, waterlevel,
3125 tshift=0.,
3126 pad=0.5,
3127 fd_taper=None,
3128 pad_to_pow2=True):
3130 same_sampling_rate(a, b)
3131 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
3132 deltat = a.deltat
3133 npad = int(round(a.data_len()*pad + tshift / deltat))
3135 ndata = max(a.data_len(), b.data_len())
3136 ndata_pad = ndata + npad
3138 if pad_to_pow2:
3139 ntrans = nextpow2(ndata_pad)
3140 else:
3141 ntrans = ndata
3143 aspec = num.fft.rfft(a.ydata, ntrans)
3144 bspec = num.fft.rfft(b.ydata, ntrans)
3146 out = aspec * num.conj(bspec)
3148 bautocorr = bspec*num.conj(bspec)
3149 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
3151 out /= denom
3152 df = 1/(ntrans*deltat)
3154 if fd_taper is not None:
3155 fd_taper(out, 0.0, df)
3157 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
3158 c = a.copy(data=False)
3159 c.set_ydata(ydata[:ndata])
3160 c.set_codes(*merge_codes(a, b, '/'))
3161 return c
3164def assert_same_sampling_rate(a, b, eps=1.0e-6):
3165 assert same_sampling_rate(a, b, eps), \
3166 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
3169def same_sampling_rate(a, b, eps=1.0e-6):
3170 '''
3171 Check if two traces have the same sampling rate.
3173 :param a,b: input traces
3174 :param eps: relative tolerance
3175 '''
3177 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
3180def fix_deltat_rounding_errors(deltat):
3181 '''
3182 Try to undo sampling rate rounding errors.
3184 Fix rounding errors of sampling intervals when these are read from single
3185 precision floating point values.
3187 Assumes that the true sampling rate or sampling interval was an integer
3188 value. No correction will be applied if this would change the sampling
3189 rate by more than 0.001%.
3190 '''
3192 if deltat <= 1.0:
3193 deltat_new = 1.0 / round(1.0 / deltat)
3194 else:
3195 deltat_new = round(deltat)
3197 if abs(deltat_new - deltat) / deltat > 1e-5:
3198 deltat_new = deltat
3200 return deltat_new
3203def merge_codes(a, b, sep='-'):
3204 '''
3205 Merge network-station-location-channel codes of a pair of traces.
3206 '''
3208 o = []
3209 for xa, xb in zip(a.nslc_id, b.nslc_id):
3210 if xa == xb:
3211 o.append(xa)
3212 else:
3213 o.append(sep.join((xa, xb)))
3214 return o
3217class Taper(Object):
3218 '''
3219 Base class for tapers.
3221 Does nothing by default.
3222 '''
3224 def __call__(self, y, x0, dx):
3225 pass
3228class CosTaper(Taper):
3229 '''
3230 Cosine Taper.
3232 :param a: start of fading in
3233 :param b: end of fading in
3234 :param c: start of fading out
3235 :param d: end of fading out
3236 '''
3238 a = Float.T()
3239 b = Float.T()
3240 c = Float.T()
3241 d = Float.T()
3243 def __init__(self, a, b, c, d):
3244 Taper.__init__(self, a=a, b=b, c=c, d=d)
3246 def __call__(self, y, x0, dx):
3248 if y.dtype == num.dtype(float):
3249 _apply_costaper = signal_ext.apply_costaper
3250 else:
3251 _apply_costaper = apply_costaper
3253 _apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
3255 def span(self, y, x0, dx):
3256 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
3258 def time_span(self):
3259 return self.a, self.d
3262class CosFader(Taper):
3263 '''
3264 Cosine Fader.
3266 :param xfade: fade in and fade out time in seconds (optional)
3267 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
3269 Only one argument can be set. The other should to be ``None``.
3270 '''
3272 xfade = Float.T(optional=True)
3273 xfrac = Float.T(optional=True)
3275 def __init__(self, xfade=None, xfrac=None):
3276 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
3277 assert (xfade is None) != (xfrac is None)
3278 self._xfade = xfade
3279 self._xfrac = xfrac
3281 def __call__(self, y, x0, dx):
3283 xfade = self._xfade
3285 xlen = (y.size - 1)*dx
3286 if xfade is None:
3287 xfade = xlen * self._xfrac
3289 a = x0
3290 b = x0 + xfade
3291 c = x0 + xlen - xfade
3292 d = x0 + xlen
3294 apply_costaper(a, b, c, d, y, x0, dx)
3296 def span(self, y, x0, dx):
3297 return 0, y.size
3299 def time_span(self):
3300 return None, None
3303def none_min(li):
3304 if None in li:
3305 return None
3306 else:
3307 return min(x for x in li if x is not None)
3310def none_max(li):
3311 if None in li:
3312 return None
3313 else:
3314 return max(x for x in li if x is not None)
3317class MultiplyTaper(Taper):
3318 '''
3319 Multiplication of several tapers.
3320 '''
3322 tapers = List.T(Taper.T())
3324 def __init__(self, tapers=None):
3325 if tapers is None:
3326 tapers = []
3328 Taper.__init__(self, tapers=tapers)
3330 def __call__(self, y, x0, dx):
3331 for taper in self.tapers:
3332 taper(y, x0, dx)
3334 def span(self, y, x0, dx):
3335 spans = []
3336 for taper in self.tapers:
3337 spans.append(taper.span(y, x0, dx))
3339 mins, maxs = list(zip(*spans))
3340 return min(mins), max(maxs)
3342 def time_span(self):
3343 spans = []
3344 for taper in self.tapers:
3345 spans.append(taper.time_span())
3347 mins, maxs = list(zip(*spans))
3348 return none_min(mins), none_max(maxs)
3351class GaussTaper(Taper):
3352 '''
3353 Frequency domain Gaussian filter.
3354 '''
3356 alpha = Float.T()
3358 def __init__(self, alpha):
3359 Taper.__init__(self, alpha=alpha)
3360 self._alpha = alpha
3362 def __call__(self, y, x0, dx):
3363 f = x0 + num.arange(y.size)*dx
3364 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
3367cached_coefficients = {}
3370def _get_cached_filter_coeffs(order, corners, btype):
3371 ck = (order, tuple(corners), btype)
3372 if ck not in cached_coefficients:
3373 if len(corners) == 1:
3374 corners = corners[0]
3376 cached_coefficients[ck] = signal.butter(
3377 order, corners, btype=btype)
3379 return cached_coefficients[ck]
3382class _globals(object):
3383 _numpy_has_correlate_flip_bug = None
3386def _default_key(tr):
3387 return (tr.network, tr.station, tr.location, tr.channel)
3390def numpy_has_correlate_flip_bug():
3391 '''
3392 Check if NumPy's correlate function reveals old behaviour.
3393 '''
3395 if _globals._numpy_has_correlate_flip_bug is None:
3396 a = num.array([0, 0, 1, 0, 0, 0, 0])
3397 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
3398 ab = num.correlate(a, b, mode='same')
3399 ba = num.correlate(b, a, mode='same')
3400 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
3402 return _globals._numpy_has_correlate_flip_bug
3405def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
3406 '''
3407 Call :py:func:`numpy.correlate` with fixes.
3409 c[k] = sum_i a[i+k] * conj(b[i])
3411 Note that the result produced by newer numpy.correlate is always flipped
3412 with respect to the formula given in its documentation (if ascending k
3413 assumed for the output).
3414 '''
3416 if use_fft:
3417 if a.size < b.size:
3418 c = signal.fftconvolve(b[::-1], a, mode=mode)
3419 else:
3420 c = signal.fftconvolve(a, b[::-1], mode=mode)
3421 return c
3423 else:
3424 buggy = numpy_has_correlate_flip_bug()
3426 a = num.asarray(a)
3427 b = num.asarray(b)
3429 if buggy:
3430 b = num.conj(b)
3432 c = num.correlate(a, b, mode=mode)
3434 if buggy and a.size < b.size:
3435 return c[::-1]
3436 else:
3437 return c
3440def numpy_correlate_emulate(a, b, mode='valid'):
3441 '''
3442 Slow version of :py:func:`numpy.correlate` for comparison.
3443 '''
3445 a = num.asarray(a)
3446 b = num.asarray(b)
3447 kmin = -(b.size-1)
3448 klen = a.size-kmin
3449 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3450 kmin = int(kmin)
3451 kmax = int(kmax)
3452 klen = kmax - kmin + 1
3453 c = num.zeros(klen, dtype=num.promote_types(b.dtype, a.dtype))
3454 for k in range(kmin, kmin+klen):
3455 imin = max(0, -k)
3456 ilen = min(b.size, a.size-k) - imin
3457 c[k-kmin] = num.sum(
3458 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3460 return c
3463def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3464 '''
3465 Get range of lags for which :py:func:`numpy.correlate` produces values.
3466 '''
3468 a = num.asarray(a)
3469 b = num.asarray(b)
3471 kmin = -(b.size-1)
3472 if mode == 'full':
3473 klen = a.size-kmin
3474 elif mode == 'same':
3475 klen = max(a.size, b.size)
3476 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3477 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3478 elif mode == 'valid':
3479 klen = abs(a.size - b.size) + 1
3480 kmin += min(a.size, b.size) - 1
3482 return kmin, kmin + klen - 1
3485def autocorr(x, nshifts):
3486 '''
3487 Compute biased estimate of the first autocorrelation coefficients.
3489 :param x: input array
3490 :param nshifts: number of coefficients to calculate
3491 '''
3493 mean = num.mean(x)
3494 std = num.std(x)
3495 n = x.size
3496 xdm = x - mean
3497 r = num.zeros(nshifts)
3498 for k in range(nshifts):
3499 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3501 return r
3504def yulewalker(x, order):
3505 '''
3506 Compute autoregression coefficients using Yule-Walker method.
3508 :param x: input array
3509 :param order: number of coefficients to produce
3511 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3512 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3513 recursion which is normally used.
3514 '''
3516 gamma = autocorr(x, order+1)
3517 d = gamma[1:1+order]
3518 a = num.zeros((order, order))
3519 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3520 for i in range(order):
3521 ioff = order-i
3522 a[i, :] = gamma2[ioff:ioff+order]
3524 return num.dot(num.linalg.inv(a), -d)
3527def moving_avg(x, n):
3528 n = int(n)
3529 cx = x.cumsum()
3530 nn = len(x)
3531 y = num.zeros(nn, dtype=cx.dtype)
3532 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3533 y[:n//2] = y[n//2]
3534 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3535 return y
3538def moving_sum(x, n, mode='valid'):
3539 n = int(n)
3540 cx = x.cumsum()
3541 nn = len(x)
3543 if mode == 'valid':
3544 if nn-n+1 <= 0:
3545 return num.zeros(0, dtype=cx.dtype)
3546 y = num.zeros(nn-n+1, dtype=cx.dtype)
3547 y[0] = cx[n-1]
3548 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3550 if mode == 'full':
3551 y = num.zeros(nn+n-1, dtype=cx.dtype)
3552 if n <= nn:
3553 y[0:n] = cx[0:n]
3554 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3555 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3556 else:
3557 y[0:nn] = cx[0:nn]
3558 y[nn:n] = cx[nn-1]
3559 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3561 if mode == 'same':
3562 n1 = (n-1)//2
3563 y = num.zeros(nn, dtype=cx.dtype)
3564 if n <= nn:
3565 y[0:n-n1] = cx[n1:n]
3566 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3567 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3568 else:
3569 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3570 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3571 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3573 return y
3576def nextpow2(i):
3577 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3580def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3581 def snap(x):
3582 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3583 return snap
3586def snapper(nmax, delta, snapfun=math.ceil):
3587 def snap(x):
3588 return max(0, min(int(snapfun(x/delta)), nmax))
3589 return snap
3592def apply_costaper(a, b, c, d, y, x0, dx):
3593 abcd = num.array((a, b, c, d), dtype=float)
3594 ja, jb, jc, jd = num.clip(num.ceil((abcd-x0)/dx).astype(int), 0, y.size)
3595 y[:ja] = 0.
3596 y[ja:jb] *= 0.5 \
3597 - 0.5*num.cos((dx*num.arange(ja, jb)-(a-x0))/(b-a)*num.pi)
3598 y[jc:jd] *= 0.5 \
3599 + 0.5*num.cos((dx*num.arange(jc, jd)-(c-x0))/(d-c)*num.pi)
3600 y[jd:] = 0.
3603def span_costaper(a, b, c, d, y, x0, dx):
3604 hi = snapper_w_offset(y.size, x0, dx)
3605 return hi(a), hi(d) - hi(a)
3608def costaper(a, b, c, d, nfreqs, deltaf):
3609 hi = snapper(nfreqs, deltaf)
3610 tap = num.zeros(nfreqs)
3611 tap[hi(a):hi(b)] = 0.5 \
3612 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3613 tap[hi(b):hi(c)] = 1.
3614 tap[hi(c):hi(d)] = 0.5 \
3615 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3617 return tap
3620def t2ind(t, tdelta, snap=round):
3621 return int(snap(t/tdelta))
3624def hilbert(x, N=None):
3625 '''
3626 Return the hilbert transform of x of length N.
3628 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3629 '''
3631 x = num.asarray(x)
3632 if N is None:
3633 N = len(x)
3634 if N <= 0:
3635 raise ValueError('N must be positive.')
3636 if num.iscomplexobj(x):
3637 logger.warning('imaginary part of x ignored.')
3638 x = num.real(x)
3640 Xf = num.fft.fft(x, N, axis=0)
3641 h = num.zeros(N)
3642 if N % 2 == 0:
3643 h[0] = h[N//2] = 1
3644 h[1:N//2] = 2
3645 else:
3646 h[0] = 1
3647 h[1:(N+1)//2] = 2
3649 if len(x.shape) > 1:
3650 h = h[:, num.newaxis]
3651 x = num.fft.ifft(Xf*h)
3652 return x
3655def near(a, b, eps):
3656 return abs(a-b) < eps
3659def coroutine(func):
3660 def wrapper(*args, **kwargs):
3661 gen = func(*args, **kwargs)
3662 next(gen)
3663 return gen
3665 wrapper.__name__ = func.__name__
3666 wrapper.__dict__ = func.__dict__
3667 wrapper.__doc__ = func.__doc__
3668 return wrapper
3671class States(object):
3672 '''
3673 Utility to store channel-specific state in coroutines.
3674 '''
3676 def __init__(self):
3677 self._states = {}
3679 def get(self, tr):
3680 k = tr.nslc_id
3681 if k in self._states:
3682 tmin, deltat, dtype, value = self._states[k]
3683 if (near(tmin, tr.tmin, deltat/100.)
3684 and near(deltat, tr.deltat, deltat/10000.)
3685 and dtype == tr.ydata.dtype):
3687 return value
3689 return None
3691 def set(self, tr, value):
3692 k = tr.nslc_id
3693 if k in self._states and self._states[k][-1] is not value:
3694 self.free(self._states[k][-1])
3696 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3698 def free(self, value):
3699 pass
3702@coroutine
3703def co_list_append(list):
3704 while True:
3705 list.append((yield))
3708class ScipyBug(Exception):
3709 pass
3712@coroutine
3713def co_lfilter(target, b, a):
3714 '''
3715 Successively filter broken continuous trace data (coroutine).
3717 Create coroutine which takes :py:class:`Trace` objects, filters their data
3718 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3719 objects containing the filtered data to target. This is useful, if one
3720 wants to filter a long continuous time series, which is split into many
3721 successive traces without producing filter artifacts at trace boundaries.
3723 Filter states are kept *per channel*, specifically, for each (network,
3724 station, location, channel) combination occuring in the input traces, a
3725 separate state is created and maintained. This makes it possible to filter
3726 multichannel or multistation data with only one :py:func:`co_lfilter`
3727 instance.
3729 Filter state is reset, when gaps occur.
3731 Use it like this::
3733 from pyrocko.trace import co_lfilter, co_list_append
3735 filtered_traces = []
3736 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3737 for trace in traces:
3738 pipe.send(trace)
3740 pipe.close()
3742 '''
3744 try:
3745 states = States()
3746 output = None
3747 while True:
3748 input = (yield)
3750 zi = states.get(input)
3751 if zi is None:
3752 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3754 output = input.copy(data=False)
3755 try:
3756 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3757 except ValueError:
3758 raise ScipyBug(
3759 'signal.lfilter failed: could be related to a bug '
3760 'in some older scipy versions, e.g. on opensuse42.1')
3762 output.set_ydata(ydata)
3763 states.set(input, zf)
3764 target.send(output)
3766 except GeneratorExit:
3767 target.close()
3770def co_antialias(target, q, n=None, ftype='fir'):
3771 b, a, n = util.decimate_coeffs(q, n, ftype)
3772 anti = co_lfilter(target, b, a)
3773 return anti
3776@coroutine
3777def co_dropsamples(target, q, nfir):
3778 try:
3779 states = States()
3780 while True:
3781 tr = (yield)
3782 newdeltat = q * tr.deltat
3783 ioffset = states.get(tr)
3784 if ioffset is None:
3785 # for fir filter, the first nfir samples are pulluted by
3786 # boundary effects; cut it off.
3787 # for iir this may be (much) more, we do not correct for that.
3788 # put sample instances to a time which is a multiple of the
3789 # new sampling interval.
3790 newtmin_want = math.ceil(
3791 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3792 - (nfir/2*tr.deltat)
3793 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3794 if ioffset < 0:
3795 ioffset = ioffset % q
3797 newtmin_have = tr.tmin + ioffset * tr.deltat
3798 newtr = tr.copy(data=False)
3799 newtr.deltat = newdeltat
3800 # because the fir kernel shifts data by nfir/2 samples:
3801 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3802 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3803 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3804 target.send(newtr)
3806 except GeneratorExit:
3807 target.close()
3810def co_downsample(target, q, n=None, ftype='fir'):
3811 '''
3812 Successively downsample broken continuous trace data (coroutine).
3814 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3815 data and sends new :py:class:`Trace` objects containing the downsampled
3816 data to target. This is useful, if one wants to downsample a long
3817 continuous time series, which is split into many successive traces without
3818 producing filter artifacts and gaps at trace boundaries.
3820 Filter states are kept *per channel*, specifically, for each (network,
3821 station, location, channel) combination occuring in the input traces, a
3822 separate state is created and maintained. This makes it possible to filter
3823 multichannel or multistation data with only one :py:func:`co_lfilter`
3824 instance.
3826 Filter state is reset, when gaps occur. The sampling instances are choosen
3827 so that they occur at (or as close as possible) to even multiples of the
3828 sampling interval of the downsampled trace (based on system time).
3829 '''
3831 b, a, n = util.decimate_coeffs(q, n, ftype)
3832 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3835@coroutine
3836def co_downsample_to(target, deltat):
3838 decimators = {}
3839 try:
3840 while True:
3841 tr = (yield)
3842 ratio = deltat / tr.deltat
3843 rratio = round(ratio)
3844 if abs(rratio - ratio)/ratio > 0.0001:
3845 raise util.UnavailableDecimation('ratio = %g' % ratio)
3847 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3848 if deci_seq not in decimators:
3849 pipe = target
3850 for q in deci_seq[::-1]:
3851 pipe = co_downsample(pipe, q)
3853 decimators[deci_seq] = pipe
3855 decimators[deci_seq].send(tr)
3857 except GeneratorExit:
3858 for g in decimators.values():
3859 g.close()
3862class DomainChoice(StringChoice):
3863 choices = [
3864 'time_domain',
3865 'frequency_domain',
3866 'envelope',
3867 'absolute',
3868 'cc_max_norm']
3871class MisfitSetup(Object):
3872 '''
3873 Contains misfit setup to be used in :py:meth:`Trace.misfit`
3875 :param description: Description of the setup
3876 :param norm: L-norm classifier
3877 :param taper: Object of :py:class:`Taper`
3878 :param filter: Object of :py:class:`~pyrocko.response.FrequencyResponse`
3879 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3880 'cc_max_norm']
3882 Can be dumped to a yaml file.
3883 '''
3885 xmltagname = 'misfitsetup'
3886 description = String.T(optional=True)
3887 norm = Int.T(optional=False)
3888 taper = Taper.T(optional=False)
3889 filter = FrequencyResponse.T(optional=True)
3890 domain = DomainChoice.T(default='time_domain')
3893def equalize_sampling_rates(trace_1, trace_2):
3894 '''
3895 Equalize sampling rates of two traces (reduce higher sampling rate to
3896 lower).
3898 :param trace_1: :py:class:`Trace` object
3899 :param trace_2: :py:class:`Trace` object
3901 Returns a copy of the resampled trace if resampling is needed.
3902 '''
3904 if same_sampling_rate(trace_1, trace_2):
3905 return trace_1, trace_2
3907 if trace_1.deltat < trace_2.deltat:
3908 t1_out = trace_1.copy()
3909 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3910 logger.debug('Trace downsampled (return copy of trace): %s'
3911 % '.'.join(t1_out.nslc_id))
3912 return t1_out, trace_2
3914 elif trace_1.deltat > trace_2.deltat:
3915 t2_out = trace_2.copy()
3916 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3917 logger.debug('Trace downsampled (return copy of trace): %s'
3918 % '.'.join(t2_out.nslc_id))
3919 return trace_1, t2_out
3922def Lx_norm(u, v, norm=2):
3923 '''
3924 Calculate the misfit denominator *m* and the normalization divisor *n*
3925 according to norm.
3927 The normalization divisor *n* is calculated from ``v``.
3929 :param u: :py:class:`numpy.ndarray`
3930 :param v: :py:class:`numpy.ndarray`
3931 :param norm: (default = 2)
3933 ``u`` and ``v`` must be of same size.
3934 '''
3936 if norm == 1:
3937 return (
3938 num.sum(num.abs(v-u)),
3939 num.sum(num.abs(v)))
3941 elif norm == 2:
3942 return (
3943 num.sqrt(num.sum((v-u)**2)),
3944 num.sqrt(num.sum(v**2)))
3946 else:
3947 return (
3948 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3949 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3952def do_downsample(tr, deltat):
3953 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3954 tr = tr.copy()
3955 tr.downsample_to(deltat, snap=True, demean=False)
3956 else:
3957 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3958 tr = tr.copy()
3959 tr.snap()
3960 return tr
3963def do_extend(tr, tmin, tmax):
3964 if tmin < tr.tmin or tmax > tr.tmax:
3965 tr = tr.copy()
3966 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3968 return tr
3971def do_pre_taper(tr, taper):
3972 return tr.taper(taper, inplace=False, chop=True)
3975def do_fft(tr, filter):
3976 if filter is None:
3977 return tr
3978 else:
3979 ndata = tr.ydata.size
3980 nfft = nextpow2(ndata)
3981 padded = num.zeros(nfft, dtype=float)
3982 padded[:ndata] = tr.ydata
3983 spectrum = num.fft.rfft(padded)
3984 df = 1.0 / (tr.deltat * nfft)
3985 frequencies = num.arange(spectrum.size)*df
3986 return [tr, frequencies, spectrum]
3989def do_filter(inp, filter):
3990 if filter is None:
3991 return inp
3992 else:
3993 tr, frequencies, spectrum = inp
3994 spectrum *= filter.evaluate(frequencies)
3995 return [tr, frequencies, spectrum]
3998def do_ifft(inp):
3999 if isinstance(inp, Trace):
4000 return inp
4001 else:
4002 tr, _, spectrum = inp
4003 ndata = tr.ydata.size
4004 tr = tr.copy(data=False)
4005 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
4006 return tr
4009def check_alignment(t1, t2):
4010 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
4011 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
4012 t1.ydata.shape != t2.ydata.shape:
4013 raise MisalignedTraces(
4014 'Cannot calculate misfit of %s and %s due to misaligned '
4015 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))
4018def check_overlaps(traces_a, traces_b=None, message='Traces overlap.'):
4019 for ia, tr_a in enumerate(traces_a):
4020 for tr_b in traces_a[ia+1:] if traces_b is None else traces_b:
4021 if tr_a.nslc_id == tr_b.nslc_id and tr_a.overlaps(
4022 tr_b.tmin, tr_b.tmax):
4023 raise OverlappingTraces(
4024 message + '\n Trace 1: %s\n Trace 2: %s' % (
4025 tr_a.summary, tr_b.summary))