1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6This module provides basic signal processing for seismic traces.
7'''
9import time
10import math
11import copy
12import logging
13import hashlib
14from collections import defaultdict
16import numpy as num
17from scipy import signal
19from pyrocko import util, orthodrome, pchain, model
20from pyrocko.util import reuse
21from pyrocko.guts import Object, Float, Int, String, List, \
22 StringChoice, Timestamp
23from pyrocko.guts_array import Array
24from pyrocko.model import squirrel_content
26# backward compatibility
27from pyrocko.util import UnavailableDecimation # noqa
28from pyrocko.response import ( # noqa
29 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
30 ButterworthResponse, SampledResponse, IntegrationResponse,
31 DifferentiationResponse, MultiplyResponse)
34guts_prefix = 'pf'
36logger = logging.getLogger('pyrocko.trace')
39@squirrel_content
40class Trace(Object):
42 '''
43 Create new trace object.
45 A ``Trace`` object represents a single continuous strip of evenly sampled
46 time series data. It is built from a 1D NumPy array containing the data
47 samples and some attributes describing its beginning and ending time, its
48 sampling rate and four string identifiers (its network, station, location
49 and channel code).
51 :param network: network code
52 :param station: station code
53 :param location: location code
54 :param channel: channel code
55 :param extra: extra code
56 :param tmin: system time of first sample in [s]
57 :param tmax: system time of last sample in [s] (if set to ``None`` it is
58 computed from length of ``ydata``)
59 :param deltat: sampling interval in [s]
60 :param ydata: 1D numpy array with data samples (can be ``None`` when
61 ``tmax`` is not ``None``)
62 :param mtime: optional modification time
64 :param meta: additional meta information (not used, but maintained by the
65 library)
67 The length of the network, station, location and channel codes is not
68 resricted by this software, but data formats like SAC, Mini-SEED or GSE
69 have different limits on the lengths of these codes. The codes set here are
70 silently truncated when the trace is stored
71 '''
73 network = String.T(default='', help='Deployment/network code (1-8)')
74 station = String.T(default='STA', help='Station code (1-5)')
75 location = String.T(default='', help='Location code (0-2)')
76 channel = String.T(default='', help='Channel code (3)')
77 extra = String.T(default='', help='Extra/custom code')
79 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
80 tmax = Timestamp.T()
82 deltat = Float.T(default=1.0)
83 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
85 mtime = Timestamp.T(optional=True)
87 cached_frequencies = {}
89 def __init__(self, network='', station='STA', location='', channel='',
90 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
91 meta=None, extra=''):
93 Object.__init__(self, init_props=False)
95 time_float = util.get_time_float()
97 if not isinstance(tmin, time_float):
98 tmin = Trace.tmin.regularize_extra(tmin)
100 if tmax is not None and not isinstance(tmax, time_float):
101 tmax = Trace.tmax.regularize_extra(tmax)
103 if mtime is not None and not isinstance(mtime, time_float):
104 mtime = Trace.mtime.regularize_extra(mtime)
106 if ydata is not None and not isinstance(ydata, num.ndarray):
107 ydata = Trace.ydata.regularize_extra(ydata)
109 self._growbuffer = None
111 tmin = time_float(tmin)
112 if tmax is not None:
113 tmax = time_float(tmax)
115 if mtime is None:
116 mtime = time_float(time.time())
118 self.network, self.station, self.location, self.channel, \
119 self.extra = [
120 reuse(x) for x in (
121 network, station, location, channel, extra)]
123 self.tmin = tmin
124 self.deltat = deltat
126 if tmax is None:
127 if ydata is not None:
128 self.tmax = self.tmin + (ydata.size-1)*self.deltat
129 else:
130 raise Exception(
131 'fixme: trace must be created with tmax or ydata')
132 else:
133 n = int(round((tmax - self.tmin) / self.deltat)) + 1
134 self.tmax = self.tmin + (n - 1) * self.deltat
136 self.meta = meta
137 self.ydata = ydata
138 self.mtime = mtime
139 self._update_ids()
140 self.file = None
141 self._pchain = None
143 def __str__(self):
144 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
145 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
146 s += ' timerange: %s - %s\n' % (
147 util.time_to_str(self.tmin, format=fmt),
148 util.time_to_str(self.tmax, format=fmt))
150 s += ' delta t: %g\n' % self.deltat
151 if self.meta:
152 for k in sorted(self.meta.keys()):
153 s += ' %s: %s\n' % (k, self.meta[k])
154 return s
156 @property
157 def codes(self):
158 from pyrocko.squirrel import model
159 return model.CodesNSLCE(
160 self.network, self.station, self.location, self.channel,
161 self.extra)
163 @property
164 def time_span(self):
165 return self.tmin, self.tmax
167 @property
168 def summary_entries(self):
169 return (
170 self.__class__.__name__,
171 str(self.codes),
172 self.str_time_span,
173 '%g' % (1.0/self.deltat))
175 @property
176 def summary(self):
177 return util.fmt_summary(self.summary_entries, (10, 20, 55, 0))
179 def __getstate__(self):
180 return (self.network, self.station, self.location, self.channel,
181 self.tmin, self.tmax, self.deltat, self.mtime,
182 self.ydata, self.meta, self.extra)
184 def __setstate__(self, state):
185 if len(state) == 11:
186 self.network, self.station, self.location, self.channel, \
187 self.tmin, self.tmax, self.deltat, self.mtime, \
188 self.ydata, self.meta, self.extra = state
190 elif len(state) == 12:
191 # backward compatibility 0
192 self.network, self.station, self.location, self.channel, \
193 self.tmin, self.tmax, self.deltat, self.mtime, \
194 self.ydata, self.meta, _, self.extra = state
196 elif len(state) == 10:
197 # backward compatibility 1
198 self.network, self.station, self.location, self.channel, \
199 self.tmin, self.tmax, self.deltat, self.mtime, \
200 self.ydata, self.meta = state
202 self.extra = ''
204 else:
205 # backward compatibility 2
206 self.network, self.station, self.location, self.channel, \
207 self.tmin, self.tmax, self.deltat, self.mtime = state
208 self.ydata = None
209 self.meta = None
210 self.extra = ''
212 self._growbuffer = None
213 self._update_ids()
215 def hash(self, unsafe=False):
216 sha1 = hashlib.sha1()
217 for code in self.nslc_id:
218 sha1.update(code.encode())
219 sha1.update(self.tmin.hex().encode())
220 sha1.update(self.tmax.hex().encode())
221 sha1.update(self.deltat.hex().encode())
223 if self.ydata is not None:
224 if not unsafe:
225 sha1.update(memoryview(self.ydata))
226 else:
227 sha1.update(memoryview(self.ydata[:32]))
228 sha1.update(memoryview(self.ydata[-32:]))
230 return sha1.hexdigest()
232 def name(self):
233 '''
234 Get a short string description.
235 '''
237 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
238 util.time_to_str(self.tmin),
239 util.time_to_str(self.tmax)))
241 return s
243 def __eq__(self, other):
244 return (
245 isinstance(other, Trace)
246 and self.network == other.network
247 and self.station == other.station
248 and self.location == other.location
249 and self.channel == other.channel
250 and (abs(self.deltat - other.deltat)
251 < (self.deltat + other.deltat)*1e-6)
252 and abs(self.tmin-other.tmin) < self.deltat*0.01
253 and abs(self.tmax-other.tmax) < self.deltat*0.01
254 and num.all(self.ydata == other.ydata))
256 def almost_equal(self, other, rtol=1e-5, atol=0.0):
257 return (
258 self.network == other.network
259 and self.station == other.station
260 and self.location == other.location
261 and self.channel == other.channel
262 and (abs(self.deltat - other.deltat)
263 < (self.deltat + other.deltat)*1e-6)
264 and abs(self.tmin-other.tmin) < self.deltat*0.01
265 and abs(self.tmax-other.tmax) < self.deltat*0.01
266 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
268 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
270 assert self.network == other.network, \
271 'network codes differ: %s, %s' % (self.network, other.network)
272 assert self.station == other.station, \
273 'station codes differ: %s, %s' % (self.station, other.station)
274 assert self.location == other.location, \
275 'location codes differ: %s, %s' % (self.location, other.location)
276 assert self.channel == other.channel, 'channel codes differ'
277 assert (abs(self.deltat - other.deltat)
278 < (self.deltat + other.deltat)*1e-6), \
279 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
280 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
281 'start times differ: %s, %s' % (
282 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
283 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
284 'end times differ: %s, %s' % (
285 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
287 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
288 'trace values differ'
290 def __hash__(self):
291 return id(self)
293 def __call__(self, t, clip=False, snap=round):
294 it = int(snap((t-self.tmin)/self.deltat))
295 if clip:
296 it = max(0, min(it, self.ydata.size-1))
297 else:
298 if it < 0 or self.ydata.size <= it:
299 raise IndexError()
301 return self.tmin+it*self.deltat, self.ydata[it]
303 def interpolate(self, t, clip=False):
304 '''
305 Value of trace between supporting points through linear interpolation.
307 :param t: time instant
308 :param clip: whether to clip indices to trace ends
309 '''
311 t0, y0 = self(t, clip=clip, snap=math.floor)
312 t1, y1 = self(t, clip=clip, snap=math.ceil)
313 if t0 == t1:
314 return y0
315 else:
316 return y0+(t-t0)/(t1-t0)*(y1-y0)
318 def index_clip(self, i):
319 '''
320 Clip index to valid range.
321 '''
323 return min(max(0, i), self.ydata.size)
325 def add(self, other, interpolate=True, left=0., right=0.):
326 '''
327 Add values of other trace (self += other).
329 Add values of ``other`` trace to the values of ``self``, where it
330 intersects with ``other``. This method does not change the extent of
331 ``self``. If ``interpolate`` is ``True`` (the default), the values of
332 ``other`` to be added are interpolated at sampling instants of
333 ``self``. Linear interpolation is performed. In this case the sampling
334 rate of ``other`` must be equal to or lower than that of ``self``. If
335 ``interpolate`` is ``False``, the sampling rates of the two traces must
336 match.
337 '''
339 if interpolate:
340 assert self.deltat <= other.deltat \
341 or same_sampling_rate(self, other)
343 other_xdata = other.get_xdata()
344 xdata = self.get_xdata()
345 self.ydata += num.interp(
346 xdata, other_xdata, other.ydata, left=left, right=left)
347 else:
348 assert self.deltat == other.deltat
349 ioff = int(round((other.tmin-self.tmin)/self.deltat))
350 ibeg = max(0, ioff)
351 iend = min(self.data_len(), ioff+other.data_len())
352 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
354 def mult(self, other, interpolate=True):
355 '''
356 Muliply with values of other trace ``(self *= other)``.
358 Multiply values of ``other`` trace to the values of ``self``, where it
359 intersects with ``other``. This method does not change the extent of
360 ``self``. If ``interpolate`` is ``True`` (the default), the values of
361 ``other`` to be multiplied are interpolated at sampling instants of
362 ``self``. Linear interpolation is performed. In this case the sampling
363 rate of ``other`` must be equal to or lower than that of ``self``. If
364 ``interpolate`` is ``False``, the sampling rates of the two traces must
365 match.
366 '''
368 if interpolate:
369 assert self.deltat <= other.deltat or \
370 same_sampling_rate(self, other)
372 other_xdata = other.get_xdata()
373 xdata = self.get_xdata()
374 self.ydata *= num.interp(
375 xdata, other_xdata, other.ydata, left=0., right=0.)
376 else:
377 assert self.deltat == other.deltat
378 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
379 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
380 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
381 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
383 ibeg1 = self.index_clip(ibeg1)
384 iend1 = self.index_clip(iend1)
385 ibeg2 = self.index_clip(ibeg2)
386 iend2 = self.index_clip(iend2)
388 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
390 def max(self):
391 '''
392 Get time and value of data maximum.
393 '''
395 i = num.argmax(self.ydata)
396 return self.tmin + i*self.deltat, self.ydata[i]
398 def min(self):
399 '''
400 Get time and value of data minimum.
401 '''
403 i = num.argmin(self.ydata)
404 return self.tmin + i*self.deltat, self.ydata[i]
406 def absmax(self):
407 '''
408 Get time and value of maximum of the absolute of data.
409 '''
411 tmi, mi = self.min()
412 tma, ma = self.max()
413 if abs(mi) > abs(ma):
414 return tmi, abs(mi)
415 else:
416 return tma, abs(ma)
418 def set_codes(
419 self, network=None, station=None, location=None, channel=None,
420 extra=None):
422 '''
423 Set network, station, location, and channel codes.
424 '''
426 if network is not None:
427 self.network = network
428 if station is not None:
429 self.station = station
430 if location is not None:
431 self.location = location
432 if channel is not None:
433 self.channel = channel
434 if extra is not None:
435 self.extra = extra
437 self._update_ids()
439 def set_network(self, network):
440 self.network = network
441 self._update_ids()
443 def set_station(self, station):
444 self.station = station
445 self._update_ids()
447 def set_location(self, location):
448 self.location = location
449 self._update_ids()
451 def set_channel(self, channel):
452 self.channel = channel
453 self._update_ids()
455 def overlaps(self, tmin, tmax):
456 '''
457 Check if trace has overlap with a given time span.
458 '''
459 return not (tmax < self.tmin or self.tmax < tmin)
461 def is_relevant(self, tmin, tmax, selector=None):
462 '''
463 Check if trace has overlap with a given time span and matches a
464 condition callback. (internal use)
465 '''
467 return not (tmax <= self.tmin or self.tmax < tmin) \
468 and (selector is None or selector(self))
470 def _update_ids(self):
471 '''
472 Update dependent ids.
473 '''
475 self.full_id = (
476 self.network, self.station, self.location, self.channel, self.tmin)
477 self.nslc_id = reuse(
478 (self.network, self.station, self.location, self.channel))
480 def prune_from_reuse_cache(self):
481 util.deuse(self.nslc_id)
482 util.deuse(self.network)
483 util.deuse(self.station)
484 util.deuse(self.location)
485 util.deuse(self.channel)
487 def set_mtime(self, mtime):
488 '''
489 Set modification time of the trace.
490 '''
492 self.mtime = mtime
494 def get_xdata(self):
495 '''
496 Create array for time axis.
497 '''
499 if self.ydata is None:
500 raise NoData()
502 return self.tmin \
503 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
505 def get_ydata(self):
506 '''
507 Get data array.
508 '''
510 if self.ydata is None:
511 raise NoData()
513 return self.ydata
515 def set_ydata(self, new_ydata):
516 '''
517 Replace data array.
518 '''
520 self.drop_growbuffer()
521 self.ydata = new_ydata
522 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
524 def data_len(self):
525 if self.ydata is not None:
526 return self.ydata.size
527 else:
528 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
530 def drop_data(self):
531 '''
532 Forget data, make dataless trace.
533 '''
535 self.drop_growbuffer()
536 self.ydata = None
538 def drop_growbuffer(self):
539 '''
540 Detach the traces grow buffer.
541 '''
543 self._growbuffer = None
544 self._pchain = None
546 def copy(self, data=True):
547 '''
548 Make a deep copy of the trace.
549 '''
551 tracecopy = copy.copy(self)
552 tracecopy.drop_growbuffer()
553 if data:
554 tracecopy.ydata = self.ydata.copy()
555 tracecopy.meta = copy.deepcopy(self.meta)
556 return tracecopy
558 def crop_zeros(self):
559 '''
560 Remove any zeros at beginning and end.
561 '''
563 indices = num.where(self.ydata != 0.0)[0]
564 if indices.size == 0:
565 raise NoData()
567 ibeg = indices[0]
568 iend = indices[-1]+1
569 if ibeg == 0 and iend == self.ydata.size-1:
570 return
572 self.drop_growbuffer()
573 self.ydata = self.ydata[ibeg:iend].copy()
574 self.tmin = self.tmin+ibeg*self.deltat
575 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
576 self._update_ids()
578 def append(self, data):
579 '''
580 Append data to the end of the trace.
582 To make this method efficient when successively very few or even single
583 samples are appended, a larger grow buffer is allocated upon first
584 invocation. The traces data is then changed to be a view into the
585 currently filled portion of the grow buffer array.
586 '''
588 assert self.ydata.dtype == data.dtype
589 newlen = data.size + self.ydata.size
590 if self._growbuffer is None or self._growbuffer.size < newlen:
591 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
592 self._growbuffer[:self.ydata.size] = self.ydata
593 self._growbuffer[self.ydata.size:newlen] = data
594 self.ydata = self._growbuffer[:newlen]
595 self.tmax = self.tmin + (newlen-1)*self.deltat
597 def chop(
598 self, tmin, tmax, inplace=True, include_last=False,
599 snap=(round, round), want_incomplete=True):
601 '''
602 Cut the trace to given time span.
604 If the ``inplace`` argument is True (the default) the trace is cut in
605 place, otherwise a new trace with the cut part is returned. By
606 default, the indices where to start and end the trace data array are
607 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
608 using Python's :py:func:`round` function. This behaviour can be changed
609 with the ``snap`` argument, which takes a tuple of two functions (one
610 for the lower and one for the upper end) to be used instead of
611 :py:func:`round`. The last sample is by default not included unless
612 ``include_last`` is set to True. If the given time span exceeds the
613 available time span of the trace, the available part is returned,
614 unless ``want_incomplete`` is set to False - in that case, a
615 :py:exc:`NoData` exception is raised. This exception is always raised,
616 when the requested time span does dot overlap with the trace's time
617 span.
618 '''
620 if want_incomplete:
621 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
622 raise NoData()
623 else:
624 if tmin < self.tmin or self.tmax < tmax:
625 raise NoData()
627 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
628 iplus = 0
629 if include_last:
630 iplus = 1
632 iend = min(
633 self.data_len(),
634 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
636 if ibeg >= iend:
637 raise NoData()
639 obj = self
640 if not inplace:
641 obj = self.copy(data=False)
643 self.drop_growbuffer()
644 if self.ydata is not None:
645 obj.ydata = self.ydata[ibeg:iend].copy()
646 else:
647 obj.ydata = None
649 obj.tmin = obj.tmin+ibeg*obj.deltat
650 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
652 obj._update_ids()
654 return obj
656 def downsample(self, ndecimate, snap=False, initials=None, demean=False,
657 ftype='fir-remez'):
658 '''
659 Downsample trace by a given integer factor.
661 Antialiasing filter details:
663 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum
664 ripple of 0.05 dB in the passband.
665 * ``fir``: A FIR filter using a Hamming window and 31 taps and a
666 well-behaved phase response.
667 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm
668 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency.
670 Comparison of the digital filters:
672 .. figure :: ../../static/downsampling-filter-comparison.png
673 :width: 60%
674 :alt: Comparison of the downsampling filters.
676 :param ndecimate: decimation factor, avoid values larger than 8
677 :param snap: whether to put the new sampling instances closest to
678 multiples of the sampling rate.
679 :param initials: ``None``, ``True``, or initial conditions for the
680 anti-aliasing filter, obtained from a previous run. In the latter
681 two cases the final state of the filter is returned instead of
682 ``None``.
683 :param demean: whether to demean the signal before filtering.
684 :param ftype: which FIR filter to use, choose from
685 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``.
686 '''
687 newdeltat = self.deltat*ndecimate
688 if snap:
689 ilag = int(round(
690 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
691 / self.deltat))
692 else:
693 ilag = 0
695 if snap and ilag > 0 and ilag < self.ydata.size:
696 data = self.ydata.astype(num.float64)
697 self.tmin += ilag*self.deltat
698 else:
699 data = self.ydata.astype(num.float64)
701 if demean:
702 data -= num.mean(data)
704 if data.size != 0:
705 result = util.decimate(
706 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag)
707 else:
708 result = data
710 if initials is None:
711 self.ydata, finals = result, None
712 else:
713 self.ydata, finals = result
715 self.deltat = reuse(self.deltat*ndecimate)
716 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
717 self._update_ids()
719 return finals
721 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
722 initials=None, demean=False, ftype='fir-remez'):
724 '''
725 Downsample to given sampling rate.
727 Tries to downsample the trace to a target sampling interval of
728 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
729 times. If ``allow_upsample_max`` is set to a value larger than 1,
730 intermediate upsampling steps are allowed, in order to increase the
731 number of possible downsampling ratios.
733 If the requested ratio is not supported, an exception of type
734 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
736 :param deltat: desired sampling interval in [s]
737 :param allow_upsample_max: maximum upsampling of the signal to achieve
738 the desired deltat. Default is ``1``.
739 :param snap: whether to put the new sampling instances closest to
740 multiples of the sampling rate.
741 :param initials: ``None``, ``True``, or initial conditions for the
742 anti-aliasing filter, obtained from a previous run. In the latter
743 two cases the final state of the filter is returned instead of
744 ``None``.
745 :param demean: whether to demean the signal before filtering.
746 :param ftype: which FIR filter to use, choose from
747 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``.
748 See also: :meth:`Trace.downample`
749 '''
751 ratio = deltat/self.deltat
752 rratio = round(ratio)
754 ok = False
755 for upsratio in range(1, allow_upsample_max+1):
756 dratio = (upsratio/self.deltat) / (1./deltat)
757 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
758 util.decitab(int(round(dratio))):
760 ok = True
761 break
763 if not ok:
764 raise util.UnavailableDecimation('ratio = %g' % ratio)
766 if upsratio > 1:
767 self.drop_growbuffer()
768 ydata = self.ydata
769 self.ydata = num.zeros(
770 ydata.size*upsratio-(upsratio-1), ydata.dtype)
771 self.ydata[::upsratio] = ydata
772 for i in range(1, upsratio):
773 self.ydata[i::upsratio] = \
774 float(i)/upsratio * ydata[:-1] \
775 + float(upsratio-i)/upsratio * ydata[1:]
776 self.deltat = self.deltat/upsratio
778 ratio = deltat/self.deltat
779 rratio = round(ratio)
781 deci_seq = util.decitab(int(rratio))
782 finals = []
783 for i, ndecimate in enumerate(deci_seq):
784 if ndecimate != 1:
785 xinitials = None
786 if initials is not None:
787 xinitials = initials[i]
788 finals.append(self.downsample(
789 ndecimate, snap=snap, initials=xinitials, demean=demean,
790 ftype=ftype))
792 if initials is not None:
793 return finals
795 def resample(self, deltat):
796 '''
797 Resample to given sampling rate ``deltat``.
799 Resampling is performed in the frequency domain.
800 '''
802 ndata = self.ydata.size
803 ntrans = nextpow2(ndata)
804 fntrans2 = ntrans * self.deltat/deltat
805 ntrans2 = int(round(fntrans2))
806 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
807 ndata2 = int(round(ndata*self.deltat/deltat2))
808 if abs(fntrans2 - ntrans2) > 1e-7:
809 logger.warning(
810 'resample: requested deltat %g could not be matched exactly: '
811 '%g' % (deltat, deltat2))
813 data = self.ydata
814 data_pad = num.zeros(ntrans, dtype=float)
815 data_pad[:ndata] = data
816 fdata = num.fft.rfft(data_pad)
817 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
818 n = min(fdata.size, fdata2.size)
819 fdata2[:n] = fdata[:n]
820 data2 = num.fft.irfft(fdata2)
821 data2 = data2[:ndata2]
822 data2 *= float(ntrans2) / float(ntrans)
823 self.deltat = deltat2
824 self.set_ydata(data2)
826 def resample_simple(self, deltat):
827 tyear = 3600*24*365.
829 if deltat == self.deltat:
830 return
832 if abs(self.deltat - deltat) * tyear/deltat < deltat:
833 logger.warning(
834 'resample_simple: less than one sample would have to be '
835 'inserted/deleted per year. Doing nothing.')
836 return
838 ninterval = int(round(deltat / (self.deltat - deltat)))
839 if abs(ninterval) < 20:
840 logger.error(
841 'resample_simple: sample insertion/deletion interval less '
842 'than 20. results would be erroneous.')
843 raise ResamplingFailed()
845 delete = False
846 if ninterval < 0:
847 ninterval = - ninterval
848 delete = True
850 tyearbegin = util.year_start(self.tmin)
852 nmin = int(round((self.tmin - tyearbegin)/deltat))
854 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
855 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
856 if nindices > 0:
857 indices = ibegin + num.arange(nindices) * ninterval
858 data_split = num.split(self.ydata, indices)
859 data = []
860 for ln, h in zip(data_split[:-1], data_split[1:]):
861 if delete:
862 ln = ln[:-1]
864 data.append(ln)
865 if not delete:
866 if ln.size == 0:
867 v = h[0]
868 else:
869 v = 0.5*(ln[-1] + h[0])
870 data.append(num.array([v], dtype=ln.dtype))
872 data.append(data_split[-1])
874 ydata_new = num.concatenate(data)
876 self.tmin = tyearbegin + nmin * deltat
877 self.deltat = deltat
878 self.set_ydata(ydata_new)
880 def stretch(self, tmin_new, tmax_new):
881 '''
882 Stretch signal while preserving sample rate using sinc interpolation.
884 :param tmin_new: new time of first sample
885 :param tmax_new: new time of last sample
887 This method can be used to correct for a small linear time drift or to
888 introduce sub-sample time shifts. The amount of stretching is limited
889 to 10% by the implementation and is expected to be much smaller than
890 that by the approximations used.
891 '''
893 from pyrocko import signal_ext
895 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
896 t_control = num.array([tmin_new, tmax_new], dtype=float)
898 r = (tmax_new - tmin_new) / self.deltat + 1.0
899 n_new = int(round(r))
900 if abs(n_new - r) > 0.001:
901 n_new = int(math.floor(r))
903 assert n_new >= 2
905 tmax_new = tmin_new + (n_new-1) * self.deltat
907 ydata_new = num.empty(n_new, dtype=float)
908 signal_ext.antidrift(i_control, t_control,
909 self.ydata.astype(float),
910 tmin_new, self.deltat, ydata_new)
912 self.tmin = tmin_new
913 self.set_ydata(ydata_new)
914 self._update_ids()
916 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
917 raise_exception=False):
919 '''
920 Check if a given frequency is above the Nyquist frequency of the trace.
922 :param intro: string used to introduce the warning/error message
923 :param warn: whether to emit a warning
924 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
925 exception.
926 '''
928 if frequency >= 0.5/self.deltat:
929 message = '%s (%g Hz) is equal to or higher than nyquist ' \
930 'frequency (%g Hz). (Trace %s)' \
931 % (intro, frequency, 0.5/self.deltat, self.name())
932 if warn:
933 logger.warning(message)
934 if raise_exception:
935 raise AboveNyquist(message)
937 def lowpass(self, order, corner, nyquist_warn=True,
938 nyquist_exception=False, demean=True):
940 '''
941 Apply Butterworth lowpass to the trace.
943 :param order: order of the filter
944 :param corner: corner frequency of the filter
946 Mean is removed before filtering.
947 '''
949 self.nyquist_check(
950 corner, 'Corner frequency of lowpass', nyquist_warn,
951 nyquist_exception)
953 (b, a) = _get_cached_filter_coefs(
954 order, [corner*2.0*self.deltat], btype='low')
956 if len(a) != order+1 or len(b) != order+1:
957 logger.warning(
958 'Erroneous filter coefficients returned by '
959 'scipy.signal.butter(). You may need to downsample the '
960 'signal before filtering.')
962 data = self.ydata.astype(num.float64)
963 if demean:
964 data -= num.mean(data)
965 self.drop_growbuffer()
966 self.ydata = signal.lfilter(b, a, data)
968 def highpass(self, order, corner, nyquist_warn=True,
969 nyquist_exception=False, demean=True):
971 '''
972 Apply butterworth highpass to the trace.
974 :param order: order of the filter
975 :param corner: corner frequency of the filter
977 Mean is removed before filtering.
978 '''
980 self.nyquist_check(
981 corner, 'Corner frequency of highpass', nyquist_warn,
982 nyquist_exception)
984 (b, a) = _get_cached_filter_coefs(
985 order, [corner*2.0*self.deltat], btype='high')
987 data = self.ydata.astype(num.float64)
988 if len(a) != order+1 or len(b) != order+1:
989 logger.warning(
990 'Erroneous filter coefficients returned by '
991 'scipy.signal.butter(). You may need to downsample the '
992 'signal before filtering.')
993 if demean:
994 data -= num.mean(data)
995 self.drop_growbuffer()
996 self.ydata = signal.lfilter(b, a, data)
998 def bandpass(self, order, corner_hp, corner_lp, demean=True):
999 '''
1000 Apply butterworth bandpass to the trace.
1002 :param order: order of the filter
1003 :param corner_hp: lower corner frequency of the filter
1004 :param corner_lp: upper corner frequency of the filter
1006 Mean is removed before filtering.
1007 '''
1009 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
1010 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
1011 (b, a) = _get_cached_filter_coefs(
1012 order,
1013 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1014 btype='band')
1015 data = self.ydata.astype(num.float64)
1016 if demean:
1017 data -= num.mean(data)
1018 self.drop_growbuffer()
1019 self.ydata = signal.lfilter(b, a, data)
1021 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1022 '''
1023 Apply bandstop (attenuates frequencies in band) to the trace.
1025 :param order: order of the filter
1026 :param corner_hp: lower corner frequency of the filter
1027 :param corner_lp: upper corner frequency of the filter
1029 Mean is removed before filtering.
1030 '''
1032 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1033 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1034 (b, a) = _get_cached_filter_coefs(
1035 order,
1036 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1037 btype='bandstop')
1038 data = self.ydata.astype(num.float64)
1039 if demean:
1040 data -= num.mean(data)
1041 self.drop_growbuffer()
1042 self.ydata = signal.lfilter(b, a, data)
1044 def envelope(self, inplace=True):
1045 '''
1046 Calculate the envelope of the trace.
1048 :param inplace: calculate envelope in place
1050 The calculation follows:
1052 .. math::
1054 Y' = \\sqrt{Y^2+H(Y)^2}
1056 where H is the Hilbert-Transform of the signal Y.
1057 '''
1059 y = self.ydata.astype(float)
1060 env = num.abs(hilbert(y))
1061 if inplace:
1062 self.drop_growbuffer()
1063 self.ydata = env
1064 else:
1065 tr = self.copy(data=False)
1066 tr.ydata = env
1067 return tr
1069 def taper(self, taperer, inplace=True, chop=False):
1070 '''
1071 Apply a :py:class:`Taper` to the trace.
1073 :param taperer: instance of :py:class:`Taper` subclass
1074 :param inplace: apply taper inplace
1075 :param chop: if ``True``: exclude tapered parts from the resulting
1076 trace
1077 '''
1079 if not inplace:
1080 tr = self.copy()
1081 else:
1082 tr = self
1084 if chop:
1085 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1086 tr.shift(i*tr.deltat)
1087 tr.set_ydata(tr.ydata[i:i+n])
1089 taperer(tr.ydata, tr.tmin, tr.deltat)
1091 if not inplace:
1092 return tr
1094 def whiten(self, order=6):
1095 '''
1096 Whiten signal in time domain using autoregression and recursive filter.
1098 :param order: order of the autoregression process
1099 '''
1101 b, a = self.whitening_coefficients(order)
1102 self.drop_growbuffer()
1103 self.ydata = signal.lfilter(b, a, self.ydata)
1105 def whitening_coefficients(self, order=6):
1106 ar = yulewalker(self.ydata, order)
1107 b, a = [1.] + ar.tolist(), [1.]
1108 return b, a
1110 def ampspec_whiten(
1111 self,
1112 width,
1113 td_taper='auto',
1114 fd_taper='auto',
1115 pad_to_pow2=True,
1116 demean=True):
1118 '''
1119 Whiten signal via frequency domain using moving average on amplitude
1120 spectra.
1122 :param width: width of smoothing kernel [Hz]
1123 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1124 ``None`` or ``'auto'``.
1125 :param fd_taper: frequency domain taper, object of type
1126 :py:class:`Taper` or ``None`` or ``'auto'``.
1127 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1128 of 2^n
1129 :param demean: whether to demean the signal before tapering
1131 The signal is first demeaned and then tapered using ``td_taper``. Then,
1132 the spectrum is calculated and inversely weighted with a smoothed
1133 version of its amplitude spectrum. A moving average is used for the
1134 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1135 Finally, the smoothed and tapered spectrum is back-transformed into the
1136 time domain.
1138 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1139 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1140 '''
1142 ndata = self.data_len()
1144 if pad_to_pow2:
1145 ntrans = nextpow2(ndata)
1146 else:
1147 ntrans = ndata
1149 df = 1./(ntrans*self.deltat)
1150 nw = int(round(width/df))
1151 if ndata//2+1 <= nw:
1152 raise TraceTooShort(
1153 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1155 if td_taper == 'auto':
1156 td_taper = CosFader(1./width)
1158 if fd_taper == 'auto':
1159 fd_taper = CosFader(width)
1161 if td_taper:
1162 self.taper(td_taper)
1164 ydata = self.get_ydata().astype(float)
1165 if demean:
1166 ydata -= ydata.mean()
1168 spec = num.fft.rfft(ydata, ntrans)
1170 amp = num.abs(spec)
1171 nspec = amp.size
1172 csamp = num.cumsum(amp)
1173 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1174 n1, n2 = nw//2, nw//2 + nspec - nw
1175 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1176 amp_smoothed[:n1] = amp_smoothed[n1]
1177 amp_smoothed[n2:] = amp_smoothed[n2-1]
1179 denom = amp_smoothed * amp
1180 numer = amp
1181 eps = num.mean(denom) * 1e-9
1182 if eps == 0.0:
1183 eps = 1e-9
1185 numer += eps
1186 denom += eps
1187 spec *= numer/denom
1189 if fd_taper:
1190 fd_taper(spec, 0., df)
1192 ydata = num.fft.irfft(spec)
1193 self.set_ydata(ydata[:ndata])
1195 def _get_cached_freqs(self, nf, deltaf):
1196 ck = (nf, deltaf)
1197 if ck not in Trace.cached_frequencies:
1198 Trace.cached_frequencies[ck] = deltaf * num.arange(
1199 nf, dtype=float)
1201 return Trace.cached_frequencies[ck]
1203 def bandpass_fft(self, corner_hp, corner_lp):
1204 '''
1205 Apply boxcar bandbpass to trace (in spectral domain).
1206 '''
1208 n = len(self.ydata)
1209 n2 = nextpow2(n)
1210 data = num.zeros(n2, dtype=num.float64)
1211 data[:n] = self.ydata
1212 fdata = num.fft.rfft(data)
1213 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1214 fdata[0] = 0.0
1215 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1216 data = num.fft.irfft(fdata)
1217 self.drop_growbuffer()
1218 self.ydata = data[:n]
1220 def shift(self, tshift):
1221 '''
1222 Time shift the trace.
1223 '''
1225 self.tmin += tshift
1226 self.tmax += tshift
1227 self._update_ids()
1229 def snap(self, inplace=True, interpolate=False):
1230 '''
1231 Shift trace samples to nearest even multiples of the sampling rate.
1233 :param inplace: (boolean) snap traces inplace
1235 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1236 both, the snapped and the original trace is smaller than 0.01 x deltat,
1237 :py:func:`snap` returns the unsnapped instance of the original trace.
1238 '''
1240 tmin = round(self.tmin/self.deltat)*self.deltat
1241 tmax = tmin + (self.ydata.size-1)*self.deltat
1243 if inplace:
1244 xself = self
1245 else:
1246 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1247 abs(self.tmax - tmax) < 1e-2*self.deltat:
1248 return self
1250 xself = self.copy()
1252 if interpolate:
1253 from pyrocko import signal_ext
1254 n = xself.data_len()
1255 ydata_new = num.empty(n, dtype=float)
1256 i_control = num.array([0, n-1], dtype=num.int64)
1257 tref = tmin
1258 t_control = num.array(
1259 [float(xself.tmin-tref), float(xself.tmax-tref)],
1260 dtype=float)
1262 signal_ext.antidrift(i_control, t_control,
1263 xself.ydata.astype(float),
1264 float(tmin-tref), xself.deltat, ydata_new)
1266 xself.ydata = ydata_new
1268 xself.tmin = tmin
1269 xself.tmax = tmax
1270 xself._update_ids()
1272 return xself
1274 def fix_deltat_rounding_errors(self):
1275 '''
1276 Try to undo sampling rate rounding errors.
1278 See :py:func:`fix_deltat_rounding_errors`.
1279 '''
1281 self.deltat = fix_deltat_rounding_errors(self.deltat)
1282 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1284 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1285 '''
1286 Run special STA/LTA filter where the short time window is centered on
1287 the long time window.
1289 :param tshort: length of short time window in [s]
1290 :param tlong: length of long time window in [s]
1291 :param quad: whether to square the data prior to applying the STA/LTA
1292 filter
1293 :param scalingmethod: integer key to select how output values are
1294 scaled / normalized (``1``, ``2``, or ``3``)
1296 ============= ====================================== ===========
1297 Scalingmethod Implementation Range
1298 ============= ====================================== ===========
1299 ``1`` As/Al* Ts/Tl [0,1]
1300 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1301 ``3`` Like ``2`` but clipping range at zero [0,1]
1302 ============= ====================================== ===========
1304 '''
1306 nshort = int(round(tshort/self.deltat))
1307 nlong = int(round(tlong/self.deltat))
1309 assert nshort < nlong
1310 if nlong > len(self.ydata):
1311 raise TraceTooShort(
1312 'Samples in trace: %s, samples needed: %s'
1313 % (len(self.ydata), nlong))
1315 if quad:
1316 sqrdata = self.ydata**2
1317 else:
1318 sqrdata = self.ydata
1320 mavg_short = moving_avg(sqrdata, nshort)
1321 mavg_long = moving_avg(sqrdata, nlong)
1323 self.drop_growbuffer()
1325 if scalingmethod not in (1, 2, 3):
1326 raise Exception('Invalid argument to scalingrange argument.')
1328 if scalingmethod == 1:
1329 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1330 elif scalingmethod in (2, 3):
1331 self.ydata = (mavg_short/mavg_long - 1.) \
1332 / ((float(nlong)/float(nshort)) - 1)
1334 if scalingmethod == 3:
1335 self.ydata = num.maximum(self.ydata, 0.)
1337 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1338 '''
1339 Run special STA/LTA filter where the short time window is overlapping
1340 with the last part of the long time window.
1342 :param tshort: length of short time window in [s]
1343 :param tlong: length of long time window in [s]
1344 :param quad: whether to square the data prior to applying the STA/LTA
1345 filter
1346 :param scalingmethod: integer key to select how output values are
1347 scaled / normalized (``1``, ``2``, or ``3``)
1349 ============= ====================================== ===========
1350 Scalingmethod Implementation Range
1351 ============= ====================================== ===========
1352 ``1`` As/Al* Ts/Tl [0,1]
1353 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1354 ``3`` Like ``2`` but clipping range at zero [0,1]
1355 ============= ====================================== ===========
1357 With ``scalingmethod=1``, the values produced by this variant of the
1358 STA/LTA are equivalent to
1360 .. math::
1361 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1362 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1364 where :math:`f_j` are the input samples, :math:`s` are the number of
1365 samples in the short time window and :math:`l` are the number of
1366 samples in the long time window.
1367 '''
1369 n = self.data_len()
1370 tmin = self.tmin
1372 nshort = max(1, int(round(tshort/self.deltat)))
1373 nlong = max(1, int(round(tlong/self.deltat)))
1375 assert nshort < nlong
1377 if nlong > len(self.ydata):
1378 raise TraceTooShort(
1379 'Samples in trace: %s, samples needed: %s'
1380 % (len(self.ydata), nlong))
1382 if quad:
1383 sqrdata = self.ydata**2
1384 else:
1385 sqrdata = self.ydata
1387 nshift = int(math.floor(0.5 * (nlong - nshort)))
1388 if nlong % 2 != 0 and nshort % 2 == 0:
1389 nshift += 1
1391 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1392 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1394 self.drop_growbuffer()
1396 if scalingmethod not in (1, 2, 3):
1397 raise Exception('Invalid argument to scalingrange argument.')
1399 if scalingmethod == 1:
1400 ydata = mavg_short/mavg_long * nshort/nlong
1401 elif scalingmethod in (2, 3):
1402 ydata = (mavg_short/mavg_long - 1.) \
1403 / ((float(nlong)/float(nshort)) - 1)
1405 if scalingmethod == 3:
1406 ydata = num.maximum(self.ydata, 0.)
1408 self.set_ydata(ydata)
1410 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1412 self.chop(
1413 tmin + (nlong - nshort) * self.deltat,
1414 tmin + (n - nshort) * self.deltat)
1416 def peaks(self, threshold, tsearch,
1417 deadtime=False,
1418 nblock_duration_detection=100):
1420 '''
1421 Detect peaks above a given threshold (method 1).
1423 From every instant, where the signal rises above ``threshold``, a time
1424 length of ``tsearch`` seconds is searched for a maximum. A list with
1425 tuples (time, value) for each detected peak is returned. The
1426 ``deadtime`` argument turns on a special deadtime duration detection
1427 algorithm useful in combination with recursive STA/LTA filters.
1428 '''
1430 y = self.ydata
1431 above = num.where(y > threshold, 1, 0)
1432 deriv = num.zeros(y.size, dtype=num.int8)
1433 deriv[1:] = above[1:]-above[:-1]
1434 itrig_positions = num.nonzero(deriv > 0)[0]
1435 tpeaks = []
1436 apeaks = []
1437 tzeros = []
1438 tzero = self.tmin
1440 for itrig_pos in itrig_positions:
1441 ibeg = itrig_pos
1442 iend = min(
1443 len(self.ydata),
1444 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1445 ipeak = num.argmax(y[ibeg:iend])
1446 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1447 apeak = y[ibeg+ipeak]
1449 if tpeak < tzero:
1450 continue
1452 if deadtime:
1453 ibeg = itrig_pos
1454 iblock = 0
1455 nblock = nblock_duration_detection
1456 totalsum = 0.
1457 while True:
1458 if ibeg+iblock*nblock >= len(y):
1459 tzero = self.tmin + (len(y)-1) * self.deltat
1460 break
1462 logy = num.log(
1463 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1464 logy[0] += totalsum
1465 ysum = num.cumsum(logy)
1466 totalsum = ysum[-1]
1467 below = num.where(ysum <= 0., 1, 0)
1468 deriv = num.zeros(ysum.size, dtype=num.int8)
1469 deriv[1:] = below[1:]-below[:-1]
1470 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1471 if len(izero_positions) > 0:
1472 tzero = self.tmin + self.deltat * (
1473 ibeg + izero_positions[0])
1474 break
1475 iblock += 1
1476 else:
1477 tzero = ibeg*self.deltat + self.tmin + tsearch
1479 tpeaks.append(tpeak)
1480 apeaks.append(apeak)
1481 tzeros.append(tzero)
1483 if deadtime:
1484 return tpeaks, apeaks, tzeros
1485 else:
1486 return tpeaks, apeaks
1488 def peaks2(self, threshold, tsearch):
1490 '''
1491 Detect peaks above a given threshold (method 2).
1493 This variant of peak detection is a bit more robust (and slower) than
1494 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1495 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1496 iteratively the one with the maximum amplitude ``a[j]`` and time
1497 ``t[j]`` is choosen and potential peaks within
1498 ``t[j] - tsearch, t[j] + tsearch``
1499 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1500 no more potential peaks are left.
1501 '''
1503 a = num.copy(self.ydata)
1505 amin = num.min(a)
1507 a[0] = amin
1508 a[1: -1][num.diff(a, 2) <= 0.] = amin
1509 a[-1] = amin
1511 data = []
1512 while True:
1513 imax = num.argmax(a)
1514 amax = a[imax]
1516 if amax < threshold or amax == amin:
1517 break
1519 data.append((self.tmin + imax * self.deltat, amax))
1521 ntsearch = int(round(tsearch / self.deltat))
1522 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1524 if data:
1525 data.sort()
1526 tpeaks, apeaks = list(zip(*data))
1527 else:
1528 tpeaks, apeaks = [], []
1530 return tpeaks, apeaks
1532 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1533 '''
1534 Extend trace to given span.
1536 :param tmin: begin time of new span
1537 :param tmax: end time of new span
1538 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1539 ``'median'``
1540 '''
1542 nold = self.ydata.size
1544 if tmin is not None:
1545 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1546 else:
1547 nl = 0
1549 if tmax is not None:
1550 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1551 else:
1552 nh = nold - 1
1554 n = nh - nl + 1
1555 data = num.zeros(n, dtype=self.ydata.dtype)
1556 data[-nl:-nl + nold] = self.ydata
1557 if self.ydata.size >= 1:
1558 if fillmethod == 'repeat':
1559 data[:-nl] = self.ydata[0]
1560 data[-nl + nold:] = self.ydata[-1]
1561 elif fillmethod == 'median':
1562 v = num.median(self.ydata)
1563 data[:-nl] = v
1564 data[-nl + nold:] = v
1565 elif fillmethod == 'mean':
1566 v = num.mean(self.ydata)
1567 data[:-nl] = v
1568 data[-nl + nold:] = v
1570 self.drop_growbuffer()
1571 self.ydata = data
1573 self.tmin += nl * self.deltat
1574 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1576 self._update_ids()
1578 def transfer(self,
1579 tfade=0.,
1580 freqlimits=None,
1581 transfer_function=None,
1582 cut_off_fading=True,
1583 demean=True,
1584 invert=False):
1586 '''
1587 Return new trace with transfer function applied (convolution).
1589 :param tfade: rise/fall time in seconds of taper applied in timedomain
1590 at both ends of trace.
1591 :param freqlimits: 4-tuple with corner frequencies in Hz.
1592 :param transfer_function: FrequencyResponse object; must provide a
1593 method 'evaluate(freqs)', which returns the transfer function
1594 coefficients at the frequencies 'freqs'.
1595 :param cut_off_fading: whether to cut off rise/fall interval in output
1596 trace.
1597 :param demean: remove mean before applying transfer function
1598 :param invert: set to True to do a deconvolution
1599 '''
1601 if transfer_function is None:
1602 transfer_function = FrequencyResponse()
1604 if self.tmax - self.tmin <= tfade*2.:
1605 raise TraceTooShort(
1606 'Trace %s.%s.%s.%s too short for fading length setting. '
1607 'trace length = %g, fading length = %g'
1608 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1610 if freqlimits is None and (
1611 transfer_function is None or transfer_function.is_scalar()):
1613 # special case for flat responses
1615 output = self.copy()
1616 data = self.ydata
1617 ndata = data.size
1619 if transfer_function is not None:
1620 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1622 if invert:
1623 c = 1.0/c
1625 data *= c
1627 if tfade != 0.0:
1628 data *= costaper(
1629 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1630 ndata, self.deltat)
1632 output.ydata = data
1634 else:
1635 ndata = self.ydata.size
1636 ntrans = nextpow2(ndata*1.2)
1637 coefs = self._get_tapered_coefs(
1638 ntrans, freqlimits, transfer_function, invert=invert)
1640 data = self.ydata
1642 data_pad = num.zeros(ntrans, dtype=float)
1643 data_pad[:ndata] = data
1644 if demean:
1645 data_pad[:ndata] -= data.mean()
1647 if tfade != 0.0:
1648 data_pad[:ndata] *= costaper(
1649 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1650 ndata, self.deltat)
1652 fdata = num.fft.rfft(data_pad)
1653 fdata *= coefs
1654 ddata = num.fft.irfft(fdata)
1655 output = self.copy()
1656 output.ydata = ddata[:ndata]
1658 if cut_off_fading and tfade != 0.0:
1659 try:
1660 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1661 except NoData:
1662 raise TraceTooShort(
1663 'Trace %s.%s.%s.%s too short for fading length setting. '
1664 'trace length = %g, fading length = %g'
1665 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1666 else:
1667 output.ydata = output.ydata.copy()
1669 return output
1671 def differentiate(self, n=1, order=4, inplace=True):
1672 '''
1673 Approximate first or second derivative of the trace.
1675 :param n: 1 for first derivative, 2 for second
1676 :param order: order of the approximation 2 and 4 are supported
1677 :param inplace: if ``True`` the trace is differentiated in place,
1678 otherwise a new trace object with the derivative is returned.
1680 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1682 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1683 '''
1685 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1687 if inplace:
1688 self.ydata = ddata
1689 else:
1690 output = self.copy(data=False)
1691 output.set_ydata(ddata)
1692 return output
1694 def drop_chain_cache(self):
1695 if self._pchain:
1696 self._pchain.clear()
1698 def init_chain(self):
1699 self._pchain = pchain.Chain(
1700 do_downsample,
1701 do_extend,
1702 do_pre_taper,
1703 do_fft,
1704 do_filter,
1705 do_ifft)
1707 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1708 if setup.domain == 'frequency_domain':
1709 _, _, data = self._pchain(
1710 (self, deltat),
1711 (tmin, tmax),
1712 (setup.taper,),
1713 (setup.filter,),
1714 (setup.filter,),
1715 nocache=nocache)
1717 return num.abs(data), num.abs(data)
1719 else:
1720 processed = self._pchain(
1721 (self, deltat),
1722 (tmin, tmax),
1723 (setup.taper,),
1724 (setup.filter,),
1725 (setup.filter,),
1726 (),
1727 nocache=nocache)
1729 if setup.domain == 'time_domain':
1730 data = processed.get_ydata()
1732 elif setup.domain == 'envelope':
1733 processed = processed.envelope(inplace=False)
1735 elif setup.domain == 'absolute':
1736 processed.set_ydata(num.abs(processed.get_ydata()))
1738 return processed.get_ydata(), processed
1740 def misfit(self, candidate, setup, nocache=False, debug=False):
1741 '''
1742 Calculate misfit and normalization factor against candidate trace.
1744 :param candidate: :py:class:`Trace` object
1745 :param setup: :py:class:`MisfitSetup` object
1746 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1747 normalization divisor
1749 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1750 with the higher sampling rate will be downsampled.
1751 '''
1753 a = self
1754 b = candidate
1756 for tr in (a, b):
1757 if not tr._pchain:
1758 tr.init_chain()
1760 deltat = max(a.deltat, b.deltat)
1761 tmin = min(a.tmin, b.tmin) - deltat
1762 tmax = max(a.tmax, b.tmax) + deltat
1764 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1765 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1767 if setup.domain != 'cc_max_norm':
1768 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1769 else:
1770 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1771 ccmax = ctr.max()[1]
1772 m = 0.5 - 0.5 * ccmax
1773 n = 0.5
1775 if debug:
1776 return m, n, aproc, bproc
1777 else:
1778 return m, n
1780 def spectrum(self, pad_to_pow2=False, tfade=None):
1781 '''
1782 Get FFT spectrum of trace.
1784 :param pad_to_pow2: whether to zero-pad the data to next larger
1785 power-of-two length
1786 :param tfade: ``None`` or a time length in seconds, to apply cosine
1787 shaped tapers to both
1789 :returns: a tuple with (frequencies, values)
1790 '''
1792 ndata = self.ydata.size
1794 if pad_to_pow2:
1795 ntrans = nextpow2(ndata)
1796 else:
1797 ntrans = ndata
1799 if tfade is None:
1800 ydata = self.ydata
1801 else:
1802 ydata = self.ydata * costaper(
1803 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1804 ndata, self.deltat)
1806 fydata = num.fft.rfft(ydata, ntrans)
1807 df = 1./(ntrans*self.deltat)
1808 fxdata = num.arange(len(fydata))*df
1809 return fxdata, fydata
1811 def multi_filter(self, filter_freqs, bandwidth):
1813 class Gauss(FrequencyResponse):
1814 f0 = Float.T()
1815 a = Float.T(default=1.0)
1817 def __init__(self, f0, a=1.0, **kwargs):
1818 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1820 def evaluate(self, freqs):
1821 omega0 = 2.*math.pi*self.f0
1822 omega = 2.*math.pi*freqs
1823 return num.exp(-((omega-omega0)
1824 / (self.a*omega0))**2)
1826 freqs, coefs = self.spectrum()
1827 n = self.data_len()
1828 nfilt = len(filter_freqs)
1829 signal_tf = num.zeros((nfilt, n))
1830 centroid_freqs = num.zeros(nfilt)
1831 for ifilt, f0 in enumerate(filter_freqs):
1832 taper = Gauss(f0, a=bandwidth)
1833 weights = taper.evaluate(freqs)
1834 nhalf = freqs.size
1835 analytic_spec = num.zeros(n, dtype=complex)
1836 analytic_spec[:nhalf] = coefs*weights
1838 enorm = num.abs(analytic_spec[:nhalf])**2
1839 enorm /= num.sum(enorm)
1841 if n % 2 == 0:
1842 analytic_spec[1:nhalf-1] *= 2.
1843 else:
1844 analytic_spec[1:nhalf] *= 2.
1846 analytic = num.fft.ifft(analytic_spec)
1847 signal_tf[ifilt, :] = num.abs(analytic)
1849 enorm = num.abs(analytic_spec[:nhalf])**2
1850 enorm /= num.sum(enorm)
1851 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1853 return centroid_freqs, signal_tf
1855 def _get_tapered_coefs(
1856 self, ntrans, freqlimits, transfer_function, invert=False):
1858 deltaf = 1./(self.deltat*ntrans)
1859 nfreqs = ntrans//2 + 1
1860 transfer = num.ones(nfreqs, dtype=complex)
1861 hi = snapper(nfreqs, deltaf)
1862 if freqlimits is not None:
1863 a, b, c, d = freqlimits
1864 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1865 + hi(a)*deltaf
1867 if invert:
1868 coeffs = transfer_function.evaluate(freqs)
1869 if num.any(coeffs == 0.0):
1870 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1872 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1873 else:
1874 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1876 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1877 else:
1878 if invert:
1879 raise Exception(
1880 'transfer: `freqlimits` must be given when `invert` is '
1881 'set to `True`')
1883 freqs = num.arange(nfreqs) * deltaf
1884 tapered_transfer = transfer_function.evaluate(freqs)
1886 tapered_transfer[0] = 0.0 # don't introduce static offsets
1887 return tapered_transfer
1889 def fill_template(self, template, **additional):
1890 '''
1891 Fill string template with trace metadata.
1893 Uses normal python '%(placeholder)s' string templates. The following
1894 placeholders are considered: ``network``, ``station``, ``location``,
1895 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1896 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1897 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1898 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1899 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1900 microseconds, those with ``'_year'`` contain only the year.
1901 '''
1903 template = template.replace('%n', '%(network)s')\
1904 .replace('%s', '%(station)s')\
1905 .replace('%l', '%(location)s')\
1906 .replace('%c', '%(channel)s')\
1907 .replace('%b', '%(tmin)s')\
1908 .replace('%e', '%(tmax)s')\
1909 .replace('%j', '%(julianday)s')
1911 params = dict(
1912 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1913 params['tmin'] = util.time_to_str(
1914 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1915 params['tmax'] = util.time_to_str(
1916 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1917 params['tmin_ms'] = util.time_to_str(
1918 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1919 params['tmax_ms'] = util.time_to_str(
1920 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1921 params['tmin_us'] = util.time_to_str(
1922 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1923 params['tmax_us'] = util.time_to_str(
1924 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1925 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1926 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1927 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1928 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1929 params['julianday'] = util.julian_day_of_year(self.tmin)
1930 params.update(additional)
1931 return template % params
1933 def plot(self):
1934 '''
1935 Show trace with matplotlib.
1937 See also: :py:meth:`Trace.snuffle`.
1938 '''
1940 import pylab
1941 pylab.plot(self.get_xdata(), self.get_ydata())
1942 name = '%s %s %s - %s' % (
1943 self.channel,
1944 self.station,
1945 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmin)),
1946 time.strftime('%d-%m-%y %H:%M:%S', time.gmtime(self.tmax)))
1948 pylab.title(name)
1949 pylab.show()
1951 def snuffle(self, **kwargs):
1952 '''
1953 Show trace in a snuffler window.
1955 :param stations: list of `pyrocko.model.Station` objects or ``None``
1956 :param events: list of `pyrocko.model.Event` objects or ``None``
1957 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1958 :param ntracks: float, number of tracks to be shown initially (default:
1959 12)
1960 :param follow: time interval (in seconds) for real time follow mode or
1961 ``None``
1962 :param controls: bool, whether to show the main controls (default:
1963 ``True``)
1964 :param opengl: bool, whether to use opengl (default: ``False``)
1965 '''
1967 return snuffle([self], **kwargs)
1970def snuffle(traces, **kwargs):
1971 '''
1972 Show traces in a snuffler window.
1974 :param stations: list of `pyrocko.model.Station` objects or ``None``
1975 :param events: list of `pyrocko.model.Event` objects or ``None``
1976 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1977 :param ntracks: float, number of tracks to be shown initially (default: 12)
1978 :param follow: time interval (in seconds) for real time follow mode or
1979 ``None``
1980 :param controls: bool, whether to show the main controls (default:
1981 ``True``)
1982 :param opengl: bool, whether to use opengl (default: ``False``)
1983 '''
1985 from pyrocko import pile
1986 from pyrocko.gui import snuffler
1987 p = pile.Pile()
1988 if traces:
1989 trf = pile.MemTracesFile(None, traces)
1990 p.add_file(trf)
1991 return snuffler.snuffle(p, **kwargs)
1994class InfiniteResponse(Exception):
1995 '''
1996 This exception is raised by :py:class:`Trace` operations when deconvolution
1997 of a frequency response (instrument response transfer function) would
1998 result in a division by zero.
1999 '''
2002class MisalignedTraces(Exception):
2003 '''
2004 This exception is raised by some :py:class:`Trace` operations when tmin,
2005 tmax or number of samples do not match.
2006 '''
2008 pass
2011class NoData(Exception):
2012 '''
2013 This exception is raised by some :py:class:`Trace` operations when no or
2014 not enough data is available.
2015 '''
2017 pass
2020class AboveNyquist(Exception):
2021 '''
2022 This exception is raised by some :py:class:`Trace` operations when given
2023 frequencies are above the Nyquist frequency.
2024 '''
2026 pass
2029class TraceTooShort(Exception):
2030 '''
2031 This exception is raised by some :py:class:`Trace` operations when the
2032 trace is too short.
2033 '''
2035 pass
2038class ResamplingFailed(Exception):
2039 pass
2042def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2044 '''
2045 Get data range given traces grouped by selected pattern.
2047 :param key: a callable which takes as single argument a trace and returns a
2048 key for the grouping of the results. If this is ``None``, the default,
2049 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2050 used.
2051 :param mode: ``'minmax'`` or floating point number. If this is
2052 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2053 number, mean +- standard deviation times ``mode`` is used.
2055 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2056 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2057 extreme values on either end.
2059 :returns: a dict with the combined data ranges.
2061 Examples::
2063 ranges = minmax(traces, lambda tr: tr.channel)
2064 print ranges['N'] # print min & max of all traces with channel == 'N'
2065 print ranges['E'] # print min & max of all traces with channel == 'E'
2067 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2068 print ranges['GR', 'HAM3'] # print min & max of all traces with
2069 # network == 'GR' and station == 'HAM3'
2071 ranges = minmax(traces, lambda tr: None)
2072 print ranges[None] # prints min & max of all traces
2073 '''
2075 if key is None:
2076 key = _default_key
2078 ranges = defaultdict(list)
2079 for trace in traces:
2080 if isinstance(mode, str) and mode == 'minmax':
2081 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2082 else:
2083 mean = trace.ydata.mean()
2084 std = trace.ydata.std()
2085 mi, ma = mean-std*mode, mean+std*mode
2087 k = key(trace)
2088 ranges[k].append((mi, ma))
2090 for k in ranges:
2091 mins, maxs = num.array(ranges[k]).T
2092 if outer_mode == 'minmax':
2093 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2094 elif outer_mode == 'robust':
2095 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2097 return ranges
2100def minmaxtime(traces, key=None):
2102 '''
2103 Get time range given traces grouped by selected pattern.
2105 :param key: a callable which takes as single argument a trace and returns a
2106 key for the grouping of the results. If this is ``None``, the default,
2107 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2108 used.
2110 :returns: a dict with the combined data ranges.
2111 '''
2113 if key is None:
2114 key = _default_key
2116 ranges = {}
2117 for trace in traces:
2118 mi, ma = trace.tmin, trace.tmax
2119 k = key(trace)
2120 if k not in ranges:
2121 ranges[k] = mi, ma
2122 else:
2123 tmi, tma = ranges[k]
2124 ranges[k] = min(tmi, mi), max(tma, ma)
2126 return ranges
2129def degapper(
2130 traces,
2131 maxgap=5,
2132 fillmethod='interpolate',
2133 deoverlap='use_second',
2134 maxlap=None):
2136 '''
2137 Try to connect traces and remove gaps.
2139 This method will combine adjacent traces, which match in their network,
2140 station, location and channel attributes. Overlapping parts are handled
2141 according to the ``deoverlap`` argument.
2143 :param traces: input traces, must be sorted by their full_id attribute.
2144 :param maxgap: maximum number of samples to interpolate.
2145 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2146 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2147 second trace (default), 'use_first' to use data from first trace,
2148 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2149 values.
2150 :param maxlap: maximum number of samples of overlap which are removed
2152 :returns: list of traces
2153 '''
2155 in_traces = traces
2156 out_traces = []
2157 if not in_traces:
2158 return out_traces
2159 out_traces.append(in_traces.pop(0))
2160 while in_traces:
2162 a = out_traces[-1]
2163 b = in_traces.pop(0)
2165 avirt, bvirt = a.ydata is None, b.ydata is None
2166 assert avirt == bvirt, \
2167 'traces given to degapper() must either all have data or have ' \
2168 'no data.'
2170 virtual = avirt and bvirt
2172 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2173 and a.data_len() >= 1 and b.data_len() >= 1
2174 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2176 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2177 idist = int(round(dist))
2178 if abs(dist - idist) > 0.05 and idist <= maxgap:
2179 # logger.warning('Cannot degap traces with displaced sampling '
2180 # '(%s, %s, %s, %s)' % a.nslc_id)
2181 pass
2182 else:
2183 if 1 < idist <= maxgap:
2184 if not virtual:
2185 if fillmethod == 'interpolate':
2186 filler = a.ydata[-1] + (
2187 ((1.0 + num.arange(idist-1, dtype=float))
2188 / idist) * (b.ydata[0]-a.ydata[-1])
2189 ).astype(a.ydata.dtype)
2190 elif fillmethod == 'zeros':
2191 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2192 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2193 a.tmax = b.tmax
2194 if a.mtime and b.mtime:
2195 a.mtime = max(a.mtime, b.mtime)
2196 continue
2198 elif idist == 1:
2199 if not virtual:
2200 a.ydata = num.concatenate((a.ydata, b.ydata))
2201 a.tmax = b.tmax
2202 if a.mtime and b.mtime:
2203 a.mtime = max(a.mtime, b.mtime)
2204 continue
2206 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2207 if b.tmax > a.tmax:
2208 if not virtual:
2209 na = a.ydata.size
2210 n = -idist+1
2211 if deoverlap == 'use_second':
2212 a.ydata = num.concatenate(
2213 (a.ydata[:-n], b.ydata))
2214 elif deoverlap in ('use_first', 'crossfade_cos'):
2215 a.ydata = num.concatenate(
2216 (a.ydata, b.ydata[n:]))
2217 elif deoverlap == 'add':
2218 a.ydata[-n:] += b.ydata[:n]
2219 a.ydata = num.concatenate(
2220 (a.ydata, b.ydata[n:]))
2221 else:
2222 assert False, 'unknown deoverlap method'
2224 if deoverlap == 'crossfade_cos':
2225 n = -idist+1
2226 taper = 0.5-0.5*num.cos(
2227 (1.+num.arange(n))/(1.+n)*num.pi)
2228 a.ydata[na-n:na] *= 1.-taper
2229 a.ydata[na-n:na] += b.ydata[:n] * taper
2231 a.tmax = b.tmax
2232 if a.mtime and b.mtime:
2233 a.mtime = max(a.mtime, b.mtime)
2234 continue
2235 else:
2236 # make short second trace vanish
2237 continue
2239 if b.data_len() >= 1:
2240 out_traces.append(b)
2242 for tr in out_traces:
2243 tr._update_ids()
2245 return out_traces
2248def rotate(traces, azimuth, in_channels, out_channels):
2249 '''
2250 2D rotation of traces.
2252 :param traces: list of input traces
2253 :param azimuth: difference of the azimuths of the component directions
2254 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2255 :param in_channels: names of the input channels (e.g. 'N', 'E')
2256 :param out_channels: names of the output channels (e.g. 'R', 'T')
2257 :returns: list of rotated traces
2258 '''
2260 phi = azimuth/180.*math.pi
2261 cphi = math.cos(phi)
2262 sphi = math.sin(phi)
2263 rotated = []
2264 in_channels = tuple(_channels_to_names(in_channels))
2265 out_channels = tuple(_channels_to_names(out_channels))
2266 for a in traces:
2267 for b in traces:
2268 if ((a.channel, b.channel) == in_channels and
2269 a.nslc_id[:3] == b.nslc_id[:3] and
2270 abs(a.deltat-b.deltat) < a.deltat*0.001):
2271 tmin = max(a.tmin, b.tmin)
2272 tmax = min(a.tmax, b.tmax)
2274 if tmin < tmax:
2275 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2276 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2277 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2278 logger.warning(
2279 'Cannot rotate traces with displaced sampling '
2280 '(%s, %s, %s, %s)' % a.nslc_id)
2281 continue
2283 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2284 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2285 ac.set_ydata(acydata)
2286 bc.set_ydata(bcydata)
2288 ac.set_codes(channel=out_channels[0])
2289 bc.set_codes(channel=out_channels[1])
2290 rotated.append(ac)
2291 rotated.append(bc)
2293 return rotated
2296def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2297 '''
2298 Rotate traces from NE to RT system.
2300 :param n:
2301 North trace.
2302 :type n:
2303 :py:class:`~pyrocko.trace.Trace`
2305 :param e:
2306 East trace.
2307 :type e:
2308 :py:class:`~pyrocko.trace.Trace`
2310 :param source:
2311 Source of the recorded signal.
2312 :type source:
2313 :py:class:`pyrocko.gf.seismosizer.Source`
2315 :param receiver:
2316 Receiver of the recorded signal.
2317 :type receiver:
2318 :py:class:`pyrocko.model.location.Location`
2320 :param out_channels:
2321 Channel codes of the output channels (radial, transversal).
2322 Default is ('R', 'T').
2323 .
2324 :type out_channels
2325 optional, tuple[str, str]
2327 :returns:
2328 Rotated traces (radial, transversal).
2329 :rtype:
2330 tuple[
2331 :py:class:`~pyrocko.trace.Trace`,
2332 :py:class:`~pyrocko.trace.Trace`]
2333 '''
2334 azimuth = orthodrome.azimuth(receiver, source) + 180.
2335 in_channels = n.channel, e.channel
2336 out = rotate(
2337 [n, e], azimuth,
2338 in_channels=in_channels,
2339 out_channels=out_channels)
2341 assert len(out) == 2
2342 for tr in out:
2343 if tr.channel == out_channels[0]:
2344 r = tr
2345 elif tr.channel == out_channels[1]:
2346 t = tr
2347 else:
2348 assert False
2350 return r, t
2353def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2354 out_channels=('L', 'Q', 'T')):
2355 '''
2356 Rotate traces from ZNE to LQT system.
2358 :param traces: list of traces in arbitrary order
2359 :param backazimuth: backazimuth in degrees clockwise from north
2360 :param incidence: incidence angle in degrees from vertical
2361 :param in_channels: input channel names
2362 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2363 :returns: list of transformed traces
2364 '''
2365 i = incidence/180.*num.pi
2366 b = backazimuth/180.*num.pi
2368 ci = num.cos(i)
2369 cb = num.cos(b)
2370 si = num.sin(i)
2371 sb = num.sin(b)
2373 rotmat = num.array(
2374 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2375 return project(traces, rotmat, in_channels, out_channels)
2378def _decompose(a):
2379 '''
2380 Decompose matrix into independent submatrices.
2381 '''
2383 def depends(iout, a):
2384 row = a[iout, :]
2385 return set(num.arange(row.size).compress(row != 0.0))
2387 def provides(iin, a):
2388 col = a[:, iin]
2389 return set(num.arange(col.size).compress(col != 0.0))
2391 a = num.asarray(a)
2392 outs = set(range(a.shape[0]))
2393 systems = []
2394 while outs:
2395 iout = outs.pop()
2397 gout = set()
2398 for iin in depends(iout, a):
2399 gout.update(provides(iin, a))
2401 if not gout:
2402 continue
2404 gin = set()
2405 for iout2 in gout:
2406 gin.update(depends(iout2, a))
2408 if not gin:
2409 continue
2411 for iout2 in gout:
2412 if iout2 in outs:
2413 outs.remove(iout2)
2415 gin = list(gin)
2416 gin.sort()
2417 gout = list(gout)
2418 gout.sort()
2420 systems.append((gin, gout, a[gout, :][:, gin]))
2422 return systems
2425def _channels_to_names(channels):
2426 from pyrocko import squirrel
2427 names = []
2428 for ch in channels:
2429 if isinstance(ch, model.Channel):
2430 names.append(ch.name)
2431 elif isinstance(ch, squirrel.Channel):
2432 names.append(ch.codes.channel)
2433 else:
2434 names.append(ch)
2436 return names
2439def project(traces, matrix, in_channels, out_channels):
2440 '''
2441 Affine transform of three-component traces.
2443 Compute matrix-vector product of three-component traces, to e.g. rotate
2444 traces into a different basis. The traces are distinguished and ordered by
2445 their channel attribute. The tranform is applied to overlapping parts of
2446 any appropriate combinations of the input traces. This should allow this
2447 function to be robust with data gaps. It also tries to apply the
2448 tranformation to subsets of the channels, if this is possible, so that, if
2449 for example a vertical compontent is missing, horizontal components can
2450 still be rotated.
2452 :param traces: list of traces in arbitrary order
2453 :param matrix: tranformation matrix
2454 :param in_channels: input channel names
2455 :param out_channels: output channel names
2456 :returns: list of transformed traces
2457 '''
2459 in_channels = tuple(_channels_to_names(in_channels))
2460 out_channels = tuple(_channels_to_names(out_channels))
2461 systems = _decompose(matrix)
2463 # fallback to full matrix if some are not quadratic
2464 for iins, iouts, submatrix in systems:
2465 if submatrix.shape[0] != submatrix.shape[1]:
2466 if len(in_channels) != 3 or len(out_channels) != 3:
2467 return []
2468 else:
2469 return _project3(traces, matrix, in_channels, out_channels)
2471 projected = []
2472 for iins, iouts, submatrix in systems:
2473 in_cha = tuple([in_channels[iin] for iin in iins])
2474 out_cha = tuple([out_channels[iout] for iout in iouts])
2475 if submatrix.shape[0] == 1:
2476 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2477 elif submatrix.shape[1] == 2:
2478 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2479 else:
2480 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2482 return projected
2485def project_dependencies(matrix, in_channels, out_channels):
2486 '''
2487 Figure out what dependencies project() would produce.
2488 '''
2490 in_channels = tuple(_channels_to_names(in_channels))
2491 out_channels = tuple(_channels_to_names(out_channels))
2492 systems = _decompose(matrix)
2494 subpro = []
2495 for iins, iouts, submatrix in systems:
2496 if submatrix.shape[0] != submatrix.shape[1]:
2497 subpro.append((matrix, in_channels, out_channels))
2499 if not subpro:
2500 for iins, iouts, submatrix in systems:
2501 in_cha = tuple([in_channels[iin] for iin in iins])
2502 out_cha = tuple([out_channels[iout] for iout in iouts])
2503 subpro.append((submatrix, in_cha, out_cha))
2505 deps = {}
2506 for mat, in_cha, out_cha in subpro:
2507 for oc in out_cha:
2508 if oc not in deps:
2509 deps[oc] = []
2511 for ic in in_cha:
2512 deps[oc].append(ic)
2514 return deps
2517def _project1(traces, matrix, in_channels, out_channels):
2518 assert len(in_channels) == 1
2519 assert len(out_channels) == 1
2520 assert matrix.shape == (1, 1)
2522 projected = []
2523 for a in traces:
2524 if not (a.channel,) == in_channels:
2525 continue
2527 ac = a.copy()
2528 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2529 ac.set_codes(channel=out_channels[0])
2530 projected.append(ac)
2532 return projected
2535def _project2(traces, matrix, in_channels, out_channels):
2536 assert len(in_channels) == 2
2537 assert len(out_channels) == 2
2538 assert matrix.shape == (2, 2)
2539 projected = []
2540 for a in traces:
2541 for b in traces:
2542 if not ((a.channel, b.channel) == in_channels and
2543 a.nslc_id[:3] == b.nslc_id[:3] and
2544 abs(a.deltat-b.deltat) < a.deltat*0.001):
2545 continue
2547 tmin = max(a.tmin, b.tmin)
2548 tmax = min(a.tmax, b.tmax)
2550 if tmin > tmax:
2551 continue
2553 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2554 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2555 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2556 logger.warning(
2557 'Cannot project traces with displaced sampling '
2558 '(%s, %s, %s, %s)' % a.nslc_id)
2559 continue
2561 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2562 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2564 ac.set_ydata(acydata)
2565 bc.set_ydata(bcydata)
2567 ac.set_codes(channel=out_channels[0])
2568 bc.set_codes(channel=out_channels[1])
2570 projected.append(ac)
2571 projected.append(bc)
2573 return projected
2576def _project3(traces, matrix, in_channels, out_channels):
2577 assert len(in_channels) == 3
2578 assert len(out_channels) == 3
2579 assert matrix.shape == (3, 3)
2580 projected = []
2581 for a in traces:
2582 for b in traces:
2583 for c in traces:
2584 if not ((a.channel, b.channel, c.channel) == in_channels
2585 and a.nslc_id[:3] == b.nslc_id[:3]
2586 and b.nslc_id[:3] == c.nslc_id[:3]
2587 and abs(a.deltat-b.deltat) < a.deltat*0.001
2588 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2590 continue
2592 tmin = max(a.tmin, b.tmin, c.tmin)
2593 tmax = min(a.tmax, b.tmax, c.tmax)
2595 if tmin >= tmax:
2596 continue
2598 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2599 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2600 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2601 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2602 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2604 logger.warning(
2605 'Cannot project traces with displaced sampling '
2606 '(%s, %s, %s, %s)' % a.nslc_id)
2607 continue
2609 acydata = num.dot(
2610 matrix[0],
2611 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2612 bcydata = num.dot(
2613 matrix[1],
2614 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2615 ccydata = num.dot(
2616 matrix[2],
2617 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2619 ac.set_ydata(acydata)
2620 bc.set_ydata(bcydata)
2621 cc.set_ydata(ccydata)
2623 ac.set_codes(channel=out_channels[0])
2624 bc.set_codes(channel=out_channels[1])
2625 cc.set_codes(channel=out_channels[2])
2627 projected.append(ac)
2628 projected.append(bc)
2629 projected.append(cc)
2631 return projected
2634def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2635 '''
2636 Cross correlation of two traces.
2638 :param a,b: input traces
2639 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2640 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2641 :param use_fft: bool, whether to do cross correlation in spectral domain
2643 :returns: trace containing cross correlation coefficients
2645 This function computes the cross correlation between two traces. It
2646 evaluates the discrete equivalent of
2648 .. math::
2650 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2652 where the star denotes complex conjugate. Note, that the arguments here are
2653 swapped when compared with the :py:func:`numpy.correlate` function,
2654 which is internally called. This function should be safe even with older
2655 versions of NumPy, where the correlate function has some problems.
2657 A trace containing the cross correlation coefficients is returned. The time
2658 information of the output trace is set so that the returned cross
2659 correlation can be viewed directly as a function of time lag.
2661 Example::
2663 # align two traces a and b containing a time shifted similar signal:
2664 c = pyrocko.trace.correlate(a,b)
2665 t, coef = c.max() # get time and value of maximum
2666 b.shift(-t) # align b with a
2668 '''
2670 assert_same_sampling_rate(a, b)
2672 ya, yb = a.ydata, b.ydata
2674 # need reversed order here:
2675 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2676 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2678 if normalization == 'normal':
2679 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2680 yc = yc/normfac
2682 elif normalization == 'gliding':
2683 if mode != 'valid':
2684 assert False, 'gliding normalization currently only available ' \
2685 'with "valid" mode.'
2687 if ya.size < yb.size:
2688 yshort, ylong = ya, yb
2689 else:
2690 yshort, ylong = yb, ya
2692 epsilon = 0.00001
2693 normfac_short = num.sqrt(num.sum(yshort**2))
2694 normfac = normfac_short * num.sqrt(
2695 moving_sum(ylong**2, yshort.size, mode='valid')) \
2696 + normfac_short*epsilon
2698 if yb.size <= ya.size:
2699 normfac = normfac[::-1]
2701 yc /= normfac
2703 c = a.copy()
2704 c.set_ydata(yc)
2705 c.set_codes(*merge_codes(a, b, '~'))
2706 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2708 return c
2711def deconvolve(
2712 a, b, waterlevel,
2713 tshift=0.,
2714 pad=0.5,
2715 fd_taper=None,
2716 pad_to_pow2=True):
2718 same_sampling_rate(a, b)
2719 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2720 deltat = a.deltat
2721 npad = int(round(a.data_len()*pad + tshift / deltat))
2723 ndata = max(a.data_len(), b.data_len())
2724 ndata_pad = ndata + npad
2726 if pad_to_pow2:
2727 ntrans = nextpow2(ndata_pad)
2728 else:
2729 ntrans = ndata
2731 aspec = num.fft.rfft(a.ydata, ntrans)
2732 bspec = num.fft.rfft(b.ydata, ntrans)
2734 out = aspec * num.conj(bspec)
2736 bautocorr = bspec*num.conj(bspec)
2737 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2739 out /= denom
2740 df = 1/(ntrans*deltat)
2742 if fd_taper is not None:
2743 fd_taper(out, 0.0, df)
2745 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2746 c = a.copy(data=False)
2747 c.set_ydata(ydata[:ndata])
2748 c.set_codes(*merge_codes(a, b, '/'))
2749 return c
2752def assert_same_sampling_rate(a, b, eps=1.0e-6):
2753 assert same_sampling_rate(a, b, eps), \
2754 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2757def same_sampling_rate(a, b, eps=1.0e-6):
2758 '''
2759 Check if two traces have the same sampling rate.
2761 :param a,b: input traces
2762 :param eps: relative tolerance
2763 '''
2765 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2768def fix_deltat_rounding_errors(deltat):
2769 '''
2770 Try to undo sampling rate rounding errors.
2772 Fix rounding errors of sampling intervals when these are read from single
2773 precision floating point values.
2775 Assumes that the true sampling rate or sampling interval was an integer
2776 value. No correction will be applied if this would change the sampling
2777 rate by more than 0.001%.
2778 '''
2780 if deltat <= 1.0:
2781 deltat_new = 1.0 / round(1.0 / deltat)
2782 else:
2783 deltat_new = round(deltat)
2785 if abs(deltat_new - deltat) / deltat > 1e-5:
2786 deltat_new = deltat
2788 return deltat_new
2791def merge_codes(a, b, sep='-'):
2792 '''
2793 Merge network-station-location-channel codes of a pair of traces.
2794 '''
2796 o = []
2797 for xa, xb in zip(a.nslc_id, b.nslc_id):
2798 if xa == xb:
2799 o.append(xa)
2800 else:
2801 o.append(sep.join((xa, xb)))
2802 return o
2805class Taper(Object):
2806 '''
2807 Base class for tapers.
2809 Does nothing by default.
2810 '''
2812 def __call__(self, y, x0, dx):
2813 pass
2816class CosTaper(Taper):
2817 '''
2818 Cosine Taper.
2820 :param a: start of fading in
2821 :param b: end of fading in
2822 :param c: start of fading out
2823 :param d: end of fading out
2824 '''
2826 a = Float.T()
2827 b = Float.T()
2828 c = Float.T()
2829 d = Float.T()
2831 def __init__(self, a, b, c, d):
2832 Taper.__init__(self, a=a, b=b, c=c, d=d)
2834 def __call__(self, y, x0, dx):
2835 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2837 def span(self, y, x0, dx):
2838 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2840 def time_span(self):
2841 return self.a, self.d
2844class CosFader(Taper):
2845 '''
2846 Cosine Fader.
2848 :param xfade: fade in and fade out time in seconds (optional)
2849 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2851 Only one argument can be set. The other should to be ``None``.
2852 '''
2854 xfade = Float.T(optional=True)
2855 xfrac = Float.T(optional=True)
2857 def __init__(self, xfade=None, xfrac=None):
2858 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2859 assert (xfade is None) != (xfrac is None)
2860 self._xfade = xfade
2861 self._xfrac = xfrac
2863 def __call__(self, y, x0, dx):
2865 xfade = self._xfade
2867 xlen = (y.size - 1)*dx
2868 if xfade is None:
2869 xfade = xlen * self._xfrac
2871 a = x0
2872 b = x0 + xfade
2873 c = x0 + xlen - xfade
2874 d = x0 + xlen
2876 apply_costaper(a, b, c, d, y, x0, dx)
2878 def span(self, y, x0, dx):
2879 return 0, y.size
2881 def time_span(self):
2882 return None, None
2885def none_min(li):
2886 if None in li:
2887 return None
2888 else:
2889 return min(x for x in li if x is not None)
2892def none_max(li):
2893 if None in li:
2894 return None
2895 else:
2896 return max(x for x in li if x is not None)
2899class MultiplyTaper(Taper):
2900 '''
2901 Multiplication of several tapers.
2902 '''
2904 tapers = List.T(Taper.T())
2906 def __init__(self, tapers=None):
2907 if tapers is None:
2908 tapers = []
2910 Taper.__init__(self, tapers=tapers)
2912 def __call__(self, y, x0, dx):
2913 for taper in self.tapers:
2914 taper(y, x0, dx)
2916 def span(self, y, x0, dx):
2917 spans = []
2918 for taper in self.tapers:
2919 spans.append(taper.span(y, x0, dx))
2921 mins, maxs = list(zip(*spans))
2922 return min(mins), max(maxs)
2924 def time_span(self):
2925 spans = []
2926 for taper in self.tapers:
2927 spans.append(taper.time_span())
2929 mins, maxs = list(zip(*spans))
2930 return none_min(mins), none_max(maxs)
2933class GaussTaper(Taper):
2934 '''
2935 Frequency domain Gaussian filter.
2936 '''
2938 alpha = Float.T()
2940 def __init__(self, alpha):
2941 Taper.__init__(self, alpha=alpha)
2942 self._alpha = alpha
2944 def __call__(self, y, x0, dx):
2945 f = x0 + num.arange(y.size)*dx
2946 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2949cached_coefficients = {}
2952def _get_cached_filter_coefs(order, corners, btype):
2953 ck = (order, tuple(corners), btype)
2954 if ck not in cached_coefficients:
2955 if len(corners) == 0:
2956 cached_coefficients[ck] = signal.butter(
2957 order, corners[0], btype=btype)
2958 else:
2959 cached_coefficients[ck] = signal.butter(
2960 order, corners, btype=btype)
2962 return cached_coefficients[ck]
2965class _globals(object):
2966 _numpy_has_correlate_flip_bug = None
2969def _default_key(tr):
2970 return (tr.network, tr.station, tr.location, tr.channel)
2973def numpy_has_correlate_flip_bug():
2974 '''
2975 Check if NumPy's correlate function reveals old behaviour.
2976 '''
2978 if _globals._numpy_has_correlate_flip_bug is None:
2979 a = num.array([0, 0, 1, 0, 0, 0, 0])
2980 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
2981 ab = num.correlate(a, b, mode='same')
2982 ba = num.correlate(b, a, mode='same')
2983 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
2985 return _globals._numpy_has_correlate_flip_bug
2988def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
2989 '''
2990 Call :py:func:`numpy.correlate` with fixes.
2992 c[k] = sum_i a[i+k] * conj(b[i])
2994 Note that the result produced by newer numpy.correlate is always flipped
2995 with respect to the formula given in its documentation (if ascending k
2996 assumed for the output).
2997 '''
2999 if use_fft:
3000 if a.size < b.size:
3001 c = signal.fftconvolve(b[::-1], a, mode=mode)
3002 else:
3003 c = signal.fftconvolve(a, b[::-1], mode=mode)
3004 return c
3006 else:
3007 buggy = numpy_has_correlate_flip_bug()
3009 a = num.asarray(a)
3010 b = num.asarray(b)
3012 if buggy:
3013 b = num.conj(b)
3015 c = num.correlate(a, b, mode=mode)
3017 if buggy and a.size < b.size:
3018 return c[::-1]
3019 else:
3020 return c
3023def numpy_correlate_emulate(a, b, mode='valid'):
3024 '''
3025 Slow version of :py:func:`numpy.correlate` for comparison.
3026 '''
3028 a = num.asarray(a)
3029 b = num.asarray(b)
3030 kmin = -(b.size-1)
3031 klen = a.size-kmin
3032 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3033 kmin = int(kmin)
3034 kmax = int(kmax)
3035 klen = kmax - kmin + 1
3036 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
3037 for k in range(kmin, kmin+klen):
3038 imin = max(0, -k)
3039 ilen = min(b.size, a.size-k) - imin
3040 c[k-kmin] = num.sum(
3041 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3043 return c
3046def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3047 '''
3048 Get range of lags for which :py:func:`numpy.correlate` produces values.
3049 '''
3051 a = num.asarray(a)
3052 b = num.asarray(b)
3054 kmin = -(b.size-1)
3055 if mode == 'full':
3056 klen = a.size-kmin
3057 elif mode == 'same':
3058 klen = max(a.size, b.size)
3059 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3060 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3061 elif mode == 'valid':
3062 klen = abs(a.size - b.size) + 1
3063 kmin += min(a.size, b.size) - 1
3065 return kmin, kmin + klen - 1
3068def autocorr(x, nshifts):
3069 '''
3070 Compute biased estimate of the first autocorrelation coefficients.
3072 :param x: input array
3073 :param nshifts: number of coefficients to calculate
3074 '''
3076 mean = num.mean(x)
3077 std = num.std(x)
3078 n = x.size
3079 xdm = x - mean
3080 r = num.zeros(nshifts)
3081 for k in range(nshifts):
3082 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3084 return r
3087def yulewalker(x, order):
3088 '''
3089 Compute autoregression coefficients using Yule-Walker method.
3091 :param x: input array
3092 :param order: number of coefficients to produce
3094 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3095 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3096 recursion which is normally used.
3097 '''
3099 gamma = autocorr(x, order+1)
3100 d = gamma[1:1+order]
3101 a = num.zeros((order, order))
3102 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3103 for i in range(order):
3104 ioff = order-i
3105 a[i, :] = gamma2[ioff:ioff+order]
3107 return num.dot(num.linalg.inv(a), -d)
3110def moving_avg(x, n):
3111 n = int(n)
3112 cx = x.cumsum()
3113 nn = len(x)
3114 y = num.zeros(nn, dtype=cx.dtype)
3115 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3116 y[:n//2] = y[n//2]
3117 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3118 return y
3121def moving_sum(x, n, mode='valid'):
3122 n = int(n)
3123 cx = x.cumsum()
3124 nn = len(x)
3126 if mode == 'valid':
3127 if nn-n+1 <= 0:
3128 return num.zeros(0, dtype=cx.dtype)
3129 y = num.zeros(nn-n+1, dtype=cx.dtype)
3130 y[0] = cx[n-1]
3131 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3133 if mode == 'full':
3134 y = num.zeros(nn+n-1, dtype=cx.dtype)
3135 if n <= nn:
3136 y[0:n] = cx[0:n]
3137 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3138 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3139 else:
3140 y[0:nn] = cx[0:nn]
3141 y[nn:n] = cx[nn-1]
3142 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3144 if mode == 'same':
3145 n1 = (n-1)//2
3146 y = num.zeros(nn, dtype=cx.dtype)
3147 if n <= nn:
3148 y[0:n-n1] = cx[n1:n]
3149 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3150 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3151 else:
3152 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3153 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3154 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3156 return y
3159def nextpow2(i):
3160 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3163def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3164 def snap(x):
3165 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3166 return snap
3169def snapper(nmax, delta, snapfun=math.ceil):
3170 def snap(x):
3171 return max(0, min(int(snapfun(x/delta)), nmax))
3172 return snap
3175def apply_costaper(a, b, c, d, y, x0, dx):
3176 hi = snapper_w_offset(y.size, x0, dx)
3177 y[:hi(a)] = 0.
3178 y[hi(a):hi(b)] *= 0.5 \
3179 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3180 y[hi(c):hi(d)] *= 0.5 \
3181 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3182 y[hi(d):] = 0.
3185def span_costaper(a, b, c, d, y, x0, dx):
3186 hi = snapper_w_offset(y.size, x0, dx)
3187 return hi(a), hi(d) - hi(a)
3190def costaper(a, b, c, d, nfreqs, deltaf):
3191 hi = snapper(nfreqs, deltaf)
3192 tap = num.zeros(nfreqs)
3193 tap[hi(a):hi(b)] = 0.5 \
3194 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3195 tap[hi(b):hi(c)] = 1.
3196 tap[hi(c):hi(d)] = 0.5 \
3197 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3199 return tap
3202def t2ind(t, tdelta, snap=round):
3203 return int(snap(t/tdelta))
3206def hilbert(x, N=None):
3207 '''
3208 Return the hilbert transform of x of length N.
3210 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3211 '''
3213 x = num.asarray(x)
3214 if N is None:
3215 N = len(x)
3216 if N <= 0:
3217 raise ValueError('N must be positive.')
3218 if num.iscomplexobj(x):
3219 logger.warning('imaginary part of x ignored.')
3220 x = num.real(x)
3222 Xf = num.fft.fft(x, N, axis=0)
3223 h = num.zeros(N)
3224 if N % 2 == 0:
3225 h[0] = h[N//2] = 1
3226 h[1:N//2] = 2
3227 else:
3228 h[0] = 1
3229 h[1:(N+1)//2] = 2
3231 if len(x.shape) > 1:
3232 h = h[:, num.newaxis]
3233 x = num.fft.ifft(Xf*h)
3234 return x
3237def near(a, b, eps):
3238 return abs(a-b) < eps
3241def coroutine(func):
3242 def wrapper(*args, **kwargs):
3243 gen = func(*args, **kwargs)
3244 next(gen)
3245 return gen
3247 wrapper.__name__ = func.__name__
3248 wrapper.__dict__ = func.__dict__
3249 wrapper.__doc__ = func.__doc__
3250 return wrapper
3253class States(object):
3254 '''
3255 Utility to store channel-specific state in coroutines.
3256 '''
3258 def __init__(self):
3259 self._states = {}
3261 def get(self, tr):
3262 k = tr.nslc_id
3263 if k in self._states:
3264 tmin, deltat, dtype, value = self._states[k]
3265 if (near(tmin, tr.tmin, deltat/100.)
3266 and near(deltat, tr.deltat, deltat/10000.)
3267 and dtype == tr.ydata.dtype):
3269 return value
3271 return None
3273 def set(self, tr, value):
3274 k = tr.nslc_id
3275 if k in self._states and self._states[k][-1] is not value:
3276 self.free(self._states[k][-1])
3278 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3280 def free(self, value):
3281 pass
3284@coroutine
3285def co_list_append(list):
3286 while True:
3287 list.append((yield))
3290class ScipyBug(Exception):
3291 pass
3294@coroutine
3295def co_lfilter(target, b, a):
3296 '''
3297 Successively filter broken continuous trace data (coroutine).
3299 Create coroutine which takes :py:class:`Trace` objects, filters their data
3300 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3301 objects containing the filtered data to target. This is useful, if one
3302 wants to filter a long continuous time series, which is split into many
3303 successive traces without producing filter artifacts at trace boundaries.
3305 Filter states are kept *per channel*, specifically, for each (network,
3306 station, location, channel) combination occuring in the input traces, a
3307 separate state is created and maintained. This makes it possible to filter
3308 multichannel or multistation data with only one :py:func:`co_lfilter`
3309 instance.
3311 Filter state is reset, when gaps occur.
3313 Use it like this::
3315 from pyrocko.trace import co_lfilter, co_list_append
3317 filtered_traces = []
3318 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3319 for trace in traces:
3320 pipe.send(trace)
3322 pipe.close()
3324 '''
3326 try:
3327 states = States()
3328 output = None
3329 while True:
3330 input = (yield)
3332 zi = states.get(input)
3333 if zi is None:
3334 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3336 output = input.copy(data=False)
3337 try:
3338 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3339 except ValueError:
3340 raise ScipyBug(
3341 'signal.lfilter failed: could be related to a bug '
3342 'in some older scipy versions, e.g. on opensuse42.1')
3344 output.set_ydata(ydata)
3345 states.set(input, zf)
3346 target.send(output)
3348 except GeneratorExit:
3349 target.close()
3352def co_antialias(target, q, n=None, ftype='fir'):
3353 b, a, n = util.decimate_coeffs(q, n, ftype)
3354 anti = co_lfilter(target, b, a)
3355 return anti
3358@coroutine
3359def co_dropsamples(target, q, nfir):
3360 try:
3361 states = States()
3362 while True:
3363 tr = (yield)
3364 newdeltat = q * tr.deltat
3365 ioffset = states.get(tr)
3366 if ioffset is None:
3367 # for fir filter, the first nfir samples are pulluted by
3368 # boundary effects; cut it off.
3369 # for iir this may be (much) more, we do not correct for that.
3370 # put sample instances to a time which is a multiple of the
3371 # new sampling interval.
3372 newtmin_want = math.ceil(
3373 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3374 - (nfir/2*tr.deltat)
3375 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3376 if ioffset < 0:
3377 ioffset = ioffset % q
3379 newtmin_have = tr.tmin + ioffset * tr.deltat
3380 newtr = tr.copy(data=False)
3381 newtr.deltat = newdeltat
3382 # because the fir kernel shifts data by nfir/2 samples:
3383 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3384 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3385 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3386 target.send(newtr)
3388 except GeneratorExit:
3389 target.close()
3392def co_downsample(target, q, n=None, ftype='fir'):
3393 '''
3394 Successively downsample broken continuous trace data (coroutine).
3396 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3397 data and sends new :py:class:`Trace` objects containing the downsampled
3398 data to target. This is useful, if one wants to downsample a long
3399 continuous time series, which is split into many successive traces without
3400 producing filter artifacts and gaps at trace boundaries.
3402 Filter states are kept *per channel*, specifically, for each (network,
3403 station, location, channel) combination occuring in the input traces, a
3404 separate state is created and maintained. This makes it possible to filter
3405 multichannel or multistation data with only one :py:func:`co_lfilter`
3406 instance.
3408 Filter state is reset, when gaps occur. The sampling instances are choosen
3409 so that they occur at (or as close as possible) to even multiples of the
3410 sampling interval of the downsampled trace (based on system time).
3411 '''
3413 b, a, n = util.decimate_coeffs(q, n, ftype)
3414 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3417@coroutine
3418def co_downsample_to(target, deltat):
3420 decimators = {}
3421 try:
3422 while True:
3423 tr = (yield)
3424 ratio = deltat / tr.deltat
3425 rratio = round(ratio)
3426 if abs(rratio - ratio)/ratio > 0.0001:
3427 raise util.UnavailableDecimation('ratio = %g' % ratio)
3429 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3430 if deci_seq not in decimators:
3431 pipe = target
3432 for q in deci_seq[::-1]:
3433 pipe = co_downsample(pipe, q)
3435 decimators[deci_seq] = pipe
3437 decimators[deci_seq].send(tr)
3439 except GeneratorExit:
3440 for g in decimators.values():
3441 g.close()
3444class DomainChoice(StringChoice):
3445 choices = [
3446 'time_domain',
3447 'frequency_domain',
3448 'envelope',
3449 'absolute',
3450 'cc_max_norm']
3453class MisfitSetup(Object):
3454 '''
3455 Contains misfit setup to be used in :py:func:`trace.misfit`
3457 :param description: Description of the setup
3458 :param norm: L-norm classifier
3459 :param taper: Object of :py:class:`Taper`
3460 :param filter: Object of :py:class:`FrequencyResponse`
3461 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3462 'cc_max_norm']
3464 Can be dumped to a yaml file.
3465 '''
3467 xmltagname = 'misfitsetup'
3468 description = String.T(optional=True)
3469 norm = Int.T(optional=False)
3470 taper = Taper.T(optional=False)
3471 filter = FrequencyResponse.T(optional=True)
3472 domain = DomainChoice.T(default='time_domain')
3475def equalize_sampling_rates(trace_1, trace_2):
3476 '''
3477 Equalize sampling rates of two traces (reduce higher sampling rate to
3478 lower).
3480 :param trace_1: :py:class:`Trace` object
3481 :param trace_2: :py:class:`Trace` object
3483 Returns a copy of the resampled trace if resampling is needed.
3484 '''
3486 if same_sampling_rate(trace_1, trace_2):
3487 return trace_1, trace_2
3489 if trace_1.deltat < trace_2.deltat:
3490 t1_out = trace_1.copy()
3491 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3492 logger.debug('Trace downsampled (return copy of trace): %s'
3493 % '.'.join(t1_out.nslc_id))
3494 return t1_out, trace_2
3496 elif trace_1.deltat > trace_2.deltat:
3497 t2_out = trace_2.copy()
3498 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3499 logger.debug('Trace downsampled (return copy of trace): %s'
3500 % '.'.join(t2_out.nslc_id))
3501 return trace_1, t2_out
3504def Lx_norm(u, v, norm=2):
3505 '''
3506 Calculate the misfit denominator *m* and the normalization devisor *n*
3507 according to norm.
3509 The normalization divisor *n* is calculated from ``v``.
3511 :param u: :py:class:`numpy.array`
3512 :param v: :py:class:`numpy.array`
3513 :param norm: (default = 2)
3515 ``u`` and ``v`` must be of same size.
3516 '''
3518 if norm == 1:
3519 return (
3520 num.sum(num.abs(v-u)),
3521 num.sum(num.abs(v)))
3523 elif norm == 2:
3524 return (
3525 num.sqrt(num.sum((v-u)**2)),
3526 num.sqrt(num.sum(v**2)))
3528 else:
3529 return (
3530 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3531 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3534def do_downsample(tr, deltat):
3535 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3536 tr = tr.copy()
3537 tr.downsample_to(deltat, snap=True, demean=False)
3538 else:
3539 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3540 tr = tr.copy()
3541 tr.snap()
3542 return tr
3545def do_extend(tr, tmin, tmax):
3546 if tmin < tr.tmin or tmax > tr.tmax:
3547 tr = tr.copy()
3548 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3550 return tr
3553def do_pre_taper(tr, taper):
3554 return tr.taper(taper, inplace=False, chop=True)
3557def do_fft(tr, filter):
3558 if filter is None:
3559 return tr
3560 else:
3561 ndata = tr.ydata.size
3562 nfft = nextpow2(ndata)
3563 padded = num.zeros(nfft, dtype=float)
3564 padded[:ndata] = tr.ydata
3565 spectrum = num.fft.rfft(padded)
3566 df = 1.0 / (tr.deltat * nfft)
3567 frequencies = num.arange(spectrum.size)*df
3568 return [tr, frequencies, spectrum]
3571def do_filter(inp, filter):
3572 if filter is None:
3573 return inp
3574 else:
3575 tr, frequencies, spectrum = inp
3576 spectrum *= filter.evaluate(frequencies)
3577 return [tr, frequencies, spectrum]
3580def do_ifft(inp):
3581 if isinstance(inp, Trace):
3582 return inp
3583 else:
3584 tr, _, spectrum = inp
3585 ndata = tr.ydata.size
3586 tr = tr.copy(data=False)
3587 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3588 return tr
3591def check_alignment(t1, t2):
3592 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3593 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3594 t1.ydata.shape != t2.ydata.shape:
3595 raise MisalignedTraces(
3596 'Cannot calculate misfit of %s and %s due to misaligned '
3597 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))