1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6This module provides basic signal processing for seismic traces.
7'''
9from __future__ import division, absolute_import
11import time
12import math
13import copy
14import logging
15import hashlib
16from collections import defaultdict
18import numpy as num
19from scipy import signal
21from pyrocko import util, orthodrome, pchain, model
22from pyrocko.util import reuse
23from pyrocko.guts import Object, Float, Int, String, List, \
24 StringChoice, Timestamp
25from pyrocko.guts_array import Array
26from pyrocko.model import squirrel_content
28# backward compatibility
29from pyrocko.util import UnavailableDecimation # noqa
30from pyrocko.response import ( # noqa
31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
32 ButterworthResponse, SampledResponse, IntegrationResponse,
33 DifferentiationResponse, MultiplyResponse)
36guts_prefix = 'pf'
38logger = logging.getLogger('pyrocko.trace')
41@squirrel_content
42class Trace(Object):
44 '''
45 Create new trace object.
47 A ``Trace`` object represents a single continuous strip of evenly sampled
48 time series data. It is built from a 1D NumPy array containing the data
49 samples and some attributes describing its beginning and ending time, its
50 sampling rate and four string identifiers (its network, station, location
51 and channel code).
53 :param network: network code
54 :param station: station code
55 :param location: location code
56 :param channel: channel code
57 :param extra: extra code
58 :param tmin: system time of first sample in [s]
59 :param tmax: system time of last sample in [s] (if set to ``None`` it is
60 computed from length of ``ydata``)
61 :param deltat: sampling interval in [s]
62 :param ydata: 1D numpy array with data samples (can be ``None`` when
63 ``tmax`` is not ``None``)
64 :param mtime: optional modification time
66 :param meta: additional meta information (not used, but maintained by the
67 library)
69 The length of the network, station, location and channel codes is not
70 resricted by this software, but data formats like SAC, Mini-SEED or GSE
71 have different limits on the lengths of these codes. The codes set here are
72 silently truncated when the trace is stored
73 '''
75 network = String.T(default='', help='Deployment/network code (1-8)')
76 station = String.T(default='STA', help='Station code (1-5)')
77 location = String.T(default='', help='Location code (0-2)')
78 channel = String.T(default='', help='Channel code (3)')
79 extra = String.T(default='', help='Extra/custom code')
81 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
82 tmax = Timestamp.T()
84 deltat = Float.T(default=1.0)
85 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
87 mtime = Timestamp.T(optional=True)
89 cached_frequencies = {}
91 def __init__(self, network='', station='STA', location='', channel='',
92 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
93 meta=None, extra=''):
95 Object.__init__(self, init_props=False)
97 time_float = util.get_time_float()
99 if not isinstance(tmin, time_float):
100 tmin = Trace.tmin.regularize_extra(tmin)
102 if tmax is not None and not isinstance(tmax, time_float):
103 tmax = Trace.tmax.regularize_extra(tmax)
105 if mtime is not None and not isinstance(mtime, time_float):
106 mtime = Trace.mtime.regularize_extra(mtime)
108 self._growbuffer = None
110 tmin = time_float(tmin)
111 if tmax is not None:
112 tmax = time_float(tmax)
114 if mtime is None:
115 mtime = time_float(time.time())
117 self.network, self.station, self.location, self.channel, \
118 self.extra = [
119 reuse(x) for x in (
120 network, station, location, channel, extra)]
122 self.tmin = tmin
123 self.deltat = deltat
125 if tmax is None:
126 if ydata is not None:
127 self.tmax = self.tmin + (ydata.size-1)*self.deltat
128 else:
129 raise Exception(
130 'fixme: trace must be created with tmax or ydata')
131 else:
132 n = int(round((tmax - self.tmin) / self.deltat)) + 1
133 self.tmax = self.tmin + (n - 1) * self.deltat
135 self.meta = meta
136 self.ydata = ydata
137 self.mtime = mtime
138 self._update_ids()
139 self.file = None
140 self._pchain = None
142 def __str__(self):
143 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
144 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
145 s += ' timerange: %s - %s\n' % (
146 util.time_to_str(self.tmin, format=fmt),
147 util.time_to_str(self.tmax, format=fmt))
149 s += ' delta t: %g\n' % self.deltat
150 if self.meta:
151 for k in sorted(self.meta.keys()):
152 s += ' %s: %s\n' % (k, self.meta[k])
153 return s
155 @property
156 def codes(self):
157 from pyrocko.squirrel import model
158 return model.CodesNSLCE(
159 self.network, self.station, self.location, self.channel,
160 self.extra)
162 @property
163 def time_span(self):
164 return self.tmin, self.tmax
166 @property
167 def summary(self):
168 return '%s %-16s %s %g' % (
169 self.__class__.__name__, self.str_codes, self.str_time_span,
170 self.deltat)
172 def __getstate__(self):
173 return (self.network, self.station, self.location, self.channel,
174 self.tmin, self.tmax, self.deltat, self.mtime,
175 self.ydata, self.meta, self.extra)
177 def __setstate__(self, state):
178 if len(state) == 11:
179 self.network, self.station, self.location, self.channel, \
180 self.tmin, self.tmax, self.deltat, self.mtime, \
181 self.ydata, self.meta, self.extra = state
183 elif len(state) == 12:
184 # backward compatibility 0
185 self.network, self.station, self.location, self.channel, \
186 self.tmin, self.tmax, self.deltat, self.mtime, \
187 self.ydata, self.meta, _, self.extra = state
189 elif len(state) == 10:
190 # backward compatibility 1
191 self.network, self.station, self.location, self.channel, \
192 self.tmin, self.tmax, self.deltat, self.mtime, \
193 self.ydata, self.meta = state
195 self.extra = ''
197 else:
198 # backward compatibility 2
199 self.network, self.station, self.location, self.channel, \
200 self.tmin, self.tmax, self.deltat, self.mtime = state
201 self.ydata = None
202 self.meta = None
203 self.extra = ''
205 self._growbuffer = None
206 self._update_ids()
208 def hash(self, unsafe=False):
209 sha1 = hashlib.sha1()
210 for code in self.nslc_id:
211 sha1.update(code.encode())
212 sha1.update(self.tmin.hex().encode())
213 sha1.update(self.tmax.hex().encode())
214 sha1.update(self.deltat.hex().encode())
216 if self.ydata is not None:
217 if not unsafe:
218 sha1.update(memoryview(self.ydata))
219 else:
220 sha1.update(memoryview(self.ydata[:32]))
221 sha1.update(memoryview(self.ydata[-32:]))
223 return sha1.hexdigest()
225 def name(self):
226 '''
227 Get a short string description.
228 '''
230 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
231 util.time_to_str(self.tmin),
232 util.time_to_str(self.tmax)))
234 return s
236 def __eq__(self, other):
237 return (
238 isinstance(other, Trace)
239 and self.network == other.network
240 and self.station == other.station
241 and self.location == other.location
242 and self.channel == other.channel
243 and (abs(self.deltat - other.deltat)
244 < (self.deltat + other.deltat)*1e-6)
245 and abs(self.tmin-other.tmin) < self.deltat*0.01
246 and abs(self.tmax-other.tmax) < self.deltat*0.01
247 and num.all(self.ydata == other.ydata))
249 def almost_equal(self, other, rtol=1e-5, atol=0.0):
250 return (
251 self.network == other.network
252 and self.station == other.station
253 and self.location == other.location
254 and self.channel == other.channel
255 and (abs(self.deltat - other.deltat)
256 < (self.deltat + other.deltat)*1e-6)
257 and abs(self.tmin-other.tmin) < self.deltat*0.01
258 and abs(self.tmax-other.tmax) < self.deltat*0.01
259 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
261 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
263 assert self.network == other.network, \
264 'network codes differ: %s, %s' % (self.network, other.network)
265 assert self.station == other.station, \
266 'station codes differ: %s, %s' % (self.station, other.station)
267 assert self.location == other.location, \
268 'location codes differ: %s, %s' % (self.location, other.location)
269 assert self.channel == other.channel, 'channel codes differ'
270 assert (abs(self.deltat - other.deltat)
271 < (self.deltat + other.deltat)*1e-6), \
272 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
273 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
274 'start times differ: %s, %s' % (
275 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
276 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
277 'end times differ: %s, %s' % (
278 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
280 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
281 'trace values differ'
283 def __hash__(self):
284 return id(self)
286 def __call__(self, t, clip=False, snap=round):
287 it = int(snap((t-self.tmin)/self.deltat))
288 if clip:
289 it = max(0, min(it, self.ydata.size-1))
290 else:
291 if it < 0 or self.ydata.size <= it:
292 raise IndexError()
294 return self.tmin+it*self.deltat, self.ydata[it]
296 def interpolate(self, t, clip=False):
297 '''
298 Value of trace between supporting points through linear interpolation.
300 :param t: time instant
301 :param clip: whether to clip indices to trace ends
302 '''
304 t0, y0 = self(t, clip=clip, snap=math.floor)
305 t1, y1 = self(t, clip=clip, snap=math.ceil)
306 if t0 == t1:
307 return y0
308 else:
309 return y0+(t-t0)/(t1-t0)*(y1-y0)
311 def index_clip(self, i):
312 '''
313 Clip index to valid range.
314 '''
316 return min(max(0, i), self.ydata.size)
318 def add(self, other, interpolate=True, left=0., right=0.):
319 '''
320 Add values of other trace (self += other).
322 Add values of ``other`` trace to the values of ``self``, where it
323 intersects with ``other``. This method does not change the extent of
324 ``self``. If ``interpolate`` is ``True`` (the default), the values of
325 ``other`` to be added are interpolated at sampling instants of
326 ``self``. Linear interpolation is performed. In this case the sampling
327 rate of ``other`` must be equal to or lower than that of ``self``. If
328 ``interpolate`` is ``False``, the sampling rates of the two traces must
329 match.
330 '''
332 if interpolate:
333 assert self.deltat <= other.deltat \
334 or same_sampling_rate(self, other)
336 other_xdata = other.get_xdata()
337 xdata = self.get_xdata()
338 self.ydata += num.interp(
339 xdata, other_xdata, other.ydata, left=left, right=left)
340 else:
341 assert self.deltat == other.deltat
342 ioff = int(round((other.tmin-self.tmin)/self.deltat))
343 ibeg = max(0, ioff)
344 iend = min(self.data_len(), ioff+other.data_len())
345 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
347 def mult(self, other, interpolate=True):
348 '''
349 Muliply with values of other trace ``(self *= other)``.
351 Multiply values of ``other`` trace to the values of ``self``, where it
352 intersects with ``other``. This method does not change the extent of
353 ``self``. If ``interpolate`` is ``True`` (the default), the values of
354 ``other`` to be multiplied are interpolated at sampling instants of
355 ``self``. Linear interpolation is performed. In this case the sampling
356 rate of ``other`` must be equal to or lower than that of ``self``. If
357 ``interpolate`` is ``False``, the sampling rates of the two traces must
358 match.
359 '''
361 if interpolate:
362 assert self.deltat <= other.deltat or \
363 same_sampling_rate(self, other)
365 other_xdata = other.get_xdata()
366 xdata = self.get_xdata()
367 self.ydata *= num.interp(
368 xdata, other_xdata, other.ydata, left=0., right=0.)
369 else:
370 assert self.deltat == other.deltat
371 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
372 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
373 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
374 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
376 ibeg1 = self.index_clip(ibeg1)
377 iend1 = self.index_clip(iend1)
378 ibeg2 = self.index_clip(ibeg2)
379 iend2 = self.index_clip(iend2)
381 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
383 def max(self):
384 '''
385 Get time and value of data maximum.
386 '''
388 i = num.argmax(self.ydata)
389 return self.tmin + i*self.deltat, self.ydata[i]
391 def min(self):
392 '''
393 Get time and value of data minimum.
394 '''
396 i = num.argmin(self.ydata)
397 return self.tmin + i*self.deltat, self.ydata[i]
399 def absmax(self):
400 '''
401 Get time and value of maximum of the absolute of data.
402 '''
404 tmi, mi = self.min()
405 tma, ma = self.max()
406 if abs(mi) > abs(ma):
407 return tmi, abs(mi)
408 else:
409 return tma, abs(ma)
411 def set_codes(
412 self, network=None, station=None, location=None, channel=None,
413 extra=None):
415 '''
416 Set network, station, location, and channel codes.
417 '''
419 if network is not None:
420 self.network = network
421 if station is not None:
422 self.station = station
423 if location is not None:
424 self.location = location
425 if channel is not None:
426 self.channel = channel
427 if extra is not None:
428 self.extra = extra
430 self._update_ids()
432 def set_network(self, network):
433 self.network = network
434 self._update_ids()
436 def set_station(self, station):
437 self.station = station
438 self._update_ids()
440 def set_location(self, location):
441 self.location = location
442 self._update_ids()
444 def set_channel(self, channel):
445 self.channel = channel
446 self._update_ids()
448 def overlaps(self, tmin, tmax):
449 '''
450 Check if trace has overlap with a given time span.
451 '''
452 return not (tmax < self.tmin or self.tmax < tmin)
454 def is_relevant(self, tmin, tmax, selector=None):
455 '''
456 Check if trace has overlap with a given time span and matches a
457 condition callback. (internal use)
458 '''
460 return not (tmax <= self.tmin or self.tmax < tmin) \
461 and (selector is None or selector(self))
463 def _update_ids(self):
464 '''
465 Update dependent ids.
466 '''
468 self.full_id = (
469 self.network, self.station, self.location, self.channel, self.tmin)
470 self.nslc_id = reuse(
471 (self.network, self.station, self.location, self.channel))
473 def prune_from_reuse_cache(self):
474 util.deuse(self.nslc_id)
475 util.deuse(self.network)
476 util.deuse(self.station)
477 util.deuse(self.location)
478 util.deuse(self.channel)
480 def set_mtime(self, mtime):
481 '''
482 Set modification time of the trace.
483 '''
485 self.mtime = mtime
487 def get_xdata(self):
488 '''
489 Create array for time axis.
490 '''
492 if self.ydata is None:
493 raise NoData()
495 return self.tmin \
496 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
498 def get_ydata(self):
499 '''
500 Get data array.
501 '''
503 if self.ydata is None:
504 raise NoData()
506 return self.ydata
508 def set_ydata(self, new_ydata):
509 '''
510 Replace data array.
511 '''
513 self.drop_growbuffer()
514 self.ydata = new_ydata
515 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
517 def data_len(self):
518 if self.ydata is not None:
519 return self.ydata.size
520 else:
521 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
523 def drop_data(self):
524 '''
525 Forget data, make dataless trace.
526 '''
528 self.drop_growbuffer()
529 self.ydata = None
531 def drop_growbuffer(self):
532 '''
533 Detach the traces grow buffer.
534 '''
536 self._growbuffer = None
537 self._pchain = None
539 def copy(self, data=True):
540 '''
541 Make a deep copy of the trace.
542 '''
544 tracecopy = copy.copy(self)
545 tracecopy.drop_growbuffer()
546 if data:
547 tracecopy.ydata = self.ydata.copy()
548 tracecopy.meta = copy.deepcopy(self.meta)
549 return tracecopy
551 def crop_zeros(self):
552 '''
553 Remove any zeros at beginning and end.
554 '''
556 indices = num.where(self.ydata != 0.0)[0]
557 if indices.size == 0:
558 raise NoData()
560 ibeg = indices[0]
561 iend = indices[-1]+1
562 if ibeg == 0 and iend == self.ydata.size-1:
563 return
565 self.drop_growbuffer()
566 self.ydata = self.ydata[ibeg:iend].copy()
567 self.tmin = self.tmin+ibeg*self.deltat
568 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
569 self._update_ids()
571 def append(self, data):
572 '''
573 Append data to the end of the trace.
575 To make this method efficient when successively very few or even single
576 samples are appended, a larger grow buffer is allocated upon first
577 invocation. The traces data is then changed to be a view into the
578 currently filled portion of the grow buffer array.
579 '''
581 assert self.ydata.dtype == data.dtype
582 newlen = data.size + self.ydata.size
583 if self._growbuffer is None or self._growbuffer.size < newlen:
584 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
585 self._growbuffer[:self.ydata.size] = self.ydata
586 self._growbuffer[self.ydata.size:newlen] = data
587 self.ydata = self._growbuffer[:newlen]
588 self.tmax = self.tmin + (newlen-1)*self.deltat
590 def chop(
591 self, tmin, tmax, inplace=True, include_last=False,
592 snap=(round, round), want_incomplete=True):
594 '''
595 Cut the trace to given time span.
597 If the ``inplace`` argument is True (the default) the trace is cut in
598 place, otherwise a new trace with the cut part is returned. By
599 default, the indices where to start and end the trace data array are
600 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
601 using Python's :py:func:`round` function. This behaviour can be changed
602 with the ``snap`` argument, which takes a tuple of two functions (one
603 for the lower and one for the upper end) to be used instead of
604 :py:func:`round`. The last sample is by default not included unless
605 ``include_last`` is set to True. If the given time span exceeds the
606 available time span of the trace, the available part is returned,
607 unless ``want_incomplete`` is set to False - in that case, a
608 :py:exc:`NoData` exception is raised. This exception is always raised,
609 when the requested time span does dot overlap with the trace's time
610 span.
611 '''
613 if want_incomplete:
614 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
615 raise NoData()
616 else:
617 if tmin < self.tmin or self.tmax < tmax:
618 raise NoData()
620 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
621 iplus = 0
622 if include_last:
623 iplus = 1
625 iend = min(
626 self.data_len(),
627 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
629 if ibeg >= iend:
630 raise NoData()
632 obj = self
633 if not inplace:
634 obj = self.copy(data=False)
636 self.drop_growbuffer()
637 if self.ydata is not None:
638 obj.ydata = self.ydata[ibeg:iend].copy()
639 else:
640 obj.ydata = None
642 obj.tmin = obj.tmin+ibeg*obj.deltat
643 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
645 obj._update_ids()
647 return obj
649 def downsample(self, ndecimate, snap=False, initials=None, demean=False,
650 ftype='fir-remez'):
651 '''
652 Downsample trace by a given integer factor.
654 Antialiasing filter details:
656 * ``iir``: A Chebyshev type I digital filter of order 8 with maximum
657 ripple of 0.05 dB in the passband.
658 * ``fir``: A FIR filter using a Hamming window and 31 taps and a
659 well-behaved phase response.
660 * ``fir-remez``: A FIR filter based on the Remez exchange algorithm
661 with ``45*ndecimate`` taps and a cutoff at 75% Nyquist frequency.
663 Comparison of the digital filters:
665 .. figure :: ../../static/downsampling-filter-comparison.png
666 :width: 60%
667 :alt: Comparison of the downsampling filters.
669 :param ndecimate: decimation factor, avoid values larger than 8
670 :param snap: whether to put the new sampling instances closest to
671 multiples of the sampling rate.
672 :param initials: ``None``, ``True``, or initial conditions for the
673 anti-aliasing filter, obtained from a previous run. In the latter
674 two cases the final state of the filter is returned instead of
675 ``None``.
676 :param demean: whether to demean the signal before filtering.
677 :param ftype: which FIR filter to use, choose from
678 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``.
679 '''
680 newdeltat = self.deltat*ndecimate
681 if snap:
682 ilag = int(round(
683 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
684 / self.deltat))
685 else:
686 ilag = 0
688 if snap and ilag > 0 and ilag < self.ydata.size:
689 data = self.ydata.astype(num.float64)
690 self.tmin += ilag*self.deltat
691 else:
692 data = self.ydata.astype(num.float64)
694 if demean:
695 data -= num.mean(data)
697 if data.size != 0:
698 result = util.decimate(
699 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag)
700 else:
701 result = data
703 if initials is None:
704 self.ydata, finals = result, None
705 else:
706 self.ydata, finals = result
708 self.deltat = reuse(self.deltat*ndecimate)
709 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
710 self._update_ids()
712 return finals
714 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
715 initials=None, demean=False, ftype='fir-remez'):
717 '''
718 Downsample to given sampling rate.
720 Tries to downsample the trace to a target sampling interval of
721 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
722 times. If ``allow_upsample_max`` is set to a value larger than 1,
723 intermediate upsampling steps are allowed, in order to increase the
724 number of possible downsampling ratios.
726 If the requested ratio is not supported, an exception of type
727 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
729 :param deltat: desired sampling interval in [s]
730 :param allow_upsample_max: maximum upsampling of the signal to achieve
731 the desired deltat. Default is ``1``.
732 :param snap: whether to put the new sampling instances closest to
733 multiples of the sampling rate.
734 :param initials: ``None``, ``True``, or initial conditions for the
735 anti-aliasing filter, obtained from a previous run. In the latter
736 two cases the final state of the filter is returned instead of
737 ``None``.
738 :param demean: whether to demean the signal before filtering.
739 :param ftype: which FIR filter to use, choose from
740 ``'iir'``, ``'fir'``, ``'fir-remez'``. Default is ``'fir-remez'``.
741 See also: :meth:`Trace.downample`
742 '''
744 ratio = deltat/self.deltat
745 rratio = round(ratio)
747 ok = False
748 for upsratio in range(1, allow_upsample_max+1):
749 dratio = (upsratio/self.deltat) / (1./deltat)
750 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
751 util.decitab(int(round(dratio))):
753 ok = True
754 break
756 if not ok:
757 raise util.UnavailableDecimation('ratio = %g' % ratio)
759 if upsratio > 1:
760 self.drop_growbuffer()
761 ydata = self.ydata
762 self.ydata = num.zeros(
763 ydata.size*upsratio-(upsratio-1), ydata.dtype)
764 self.ydata[::upsratio] = ydata
765 for i in range(1, upsratio):
766 self.ydata[i::upsratio] = \
767 float(i)/upsratio * ydata[:-1] \
768 + float(upsratio-i)/upsratio * ydata[1:]
769 self.deltat = self.deltat/upsratio
771 ratio = deltat/self.deltat
772 rratio = round(ratio)
774 deci_seq = util.decitab(int(rratio))
775 finals = []
776 for i, ndecimate in enumerate(deci_seq):
777 if ndecimate != 1:
778 xinitials = None
779 if initials is not None:
780 xinitials = initials[i]
781 finals.append(self.downsample(
782 ndecimate, snap=snap, initials=xinitials, demean=demean,
783 ftype=ftype))
785 if initials is not None:
786 return finals
788 def resample(self, deltat):
789 '''
790 Resample to given sampling rate ``deltat``.
792 Resampling is performed in the frequency domain.
793 '''
795 ndata = self.ydata.size
796 ntrans = nextpow2(ndata)
797 fntrans2 = ntrans * self.deltat/deltat
798 ntrans2 = int(round(fntrans2))
799 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
800 ndata2 = int(round(ndata*self.deltat/deltat2))
801 if abs(fntrans2 - ntrans2) > 1e-7:
802 logger.warning(
803 'resample: requested deltat %g could not be matched exactly: '
804 '%g' % (deltat, deltat2))
806 data = self.ydata
807 data_pad = num.zeros(ntrans, dtype=float)
808 data_pad[:ndata] = data
809 fdata = num.fft.rfft(data_pad)
810 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
811 n = min(fdata.size, fdata2.size)
812 fdata2[:n] = fdata[:n]
813 data2 = num.fft.irfft(fdata2)
814 data2 = data2[:ndata2]
815 data2 *= float(ntrans2) / float(ntrans)
816 self.deltat = deltat2
817 self.set_ydata(data2)
819 def resample_simple(self, deltat):
820 tyear = 3600*24*365.
822 if deltat == self.deltat:
823 return
825 if abs(self.deltat - deltat) * tyear/deltat < deltat:
826 logger.warning(
827 'resample_simple: less than one sample would have to be '
828 'inserted/deleted per year. Doing nothing.')
829 return
831 ninterval = int(round(deltat / (self.deltat - deltat)))
832 if abs(ninterval) < 20:
833 logger.error(
834 'resample_simple: sample insertion/deletion interval less '
835 'than 20. results would be erroneous.')
836 raise ResamplingFailed()
838 delete = False
839 if ninterval < 0:
840 ninterval = - ninterval
841 delete = True
843 tyearbegin = util.year_start(self.tmin)
845 nmin = int(round((self.tmin - tyearbegin)/deltat))
847 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
848 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
849 if nindices > 0:
850 indices = ibegin + num.arange(nindices) * ninterval
851 data_split = num.split(self.ydata, indices)
852 data = []
853 for ln, h in zip(data_split[:-1], data_split[1:]):
854 if delete:
855 ln = ln[:-1]
857 data.append(ln)
858 if not delete:
859 if ln.size == 0:
860 v = h[0]
861 else:
862 v = 0.5*(ln[-1] + h[0])
863 data.append(num.array([v], dtype=ln.dtype))
865 data.append(data_split[-1])
867 ydata_new = num.concatenate(data)
869 self.tmin = tyearbegin + nmin * deltat
870 self.deltat = deltat
871 self.set_ydata(ydata_new)
873 def stretch(self, tmin_new, tmax_new):
874 '''
875 Stretch signal while preserving sample rate using sinc interpolation.
877 :param tmin_new: new time of first sample
878 :param tmax_new: new time of last sample
880 This method can be used to correct for a small linear time drift or to
881 introduce sub-sample time shifts. The amount of stretching is limited
882 to 10% by the implementation and is expected to be much smaller than
883 that by the approximations used.
884 '''
886 from pyrocko import signal_ext
888 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
889 t_control = num.array([tmin_new, tmax_new], dtype=float)
891 r = (tmax_new - tmin_new) / self.deltat + 1.0
892 n_new = int(round(r))
893 if abs(n_new - r) > 0.001:
894 n_new = int(math.floor(r))
896 assert n_new >= 2
898 tmax_new = tmin_new + (n_new-1) * self.deltat
900 ydata_new = num.empty(n_new, dtype=float)
901 signal_ext.antidrift(i_control, t_control,
902 self.ydata.astype(float),
903 tmin_new, self.deltat, ydata_new)
905 self.tmin = tmin_new
906 self.set_ydata(ydata_new)
907 self._update_ids()
909 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
910 raise_exception=False):
912 '''
913 Check if a given frequency is above the Nyquist frequency of the trace.
915 :param intro: string used to introduce the warning/error message
916 :param warn: whether to emit a warning
917 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
918 exception.
919 '''
921 if frequency >= 0.5/self.deltat:
922 message = '%s (%g Hz) is equal to or higher than nyquist ' \
923 'frequency (%g Hz). (Trace %s)' \
924 % (intro, frequency, 0.5/self.deltat, self.name())
925 if warn:
926 logger.warning(message)
927 if raise_exception:
928 raise AboveNyquist(message)
930 def lowpass(self, order, corner, nyquist_warn=True,
931 nyquist_exception=False, demean=True):
933 '''
934 Apply Butterworth lowpass to the trace.
936 :param order: order of the filter
937 :param corner: corner frequency of the filter
939 Mean is removed before filtering.
940 '''
942 self.nyquist_check(
943 corner, 'Corner frequency of lowpass', nyquist_warn,
944 nyquist_exception)
946 (b, a) = _get_cached_filter_coefs(
947 order, [corner*2.0*self.deltat], btype='low')
949 if len(a) != order+1 or len(b) != order+1:
950 logger.warning(
951 'Erroneous filter coefficients returned by '
952 'scipy.signal.butter(). You may need to downsample the '
953 'signal before filtering.')
955 data = self.ydata.astype(num.float64)
956 if demean:
957 data -= num.mean(data)
958 self.drop_growbuffer()
959 self.ydata = signal.lfilter(b, a, data)
961 def highpass(self, order, corner, nyquist_warn=True,
962 nyquist_exception=False, demean=True):
964 '''
965 Apply butterworth highpass to the trace.
967 :param order: order of the filter
968 :param corner: corner frequency of the filter
970 Mean is removed before filtering.
971 '''
973 self.nyquist_check(
974 corner, 'Corner frequency of highpass', nyquist_warn,
975 nyquist_exception)
977 (b, a) = _get_cached_filter_coefs(
978 order, [corner*2.0*self.deltat], btype='high')
980 data = self.ydata.astype(num.float64)
981 if len(a) != order+1 or len(b) != order+1:
982 logger.warning(
983 'Erroneous filter coefficients returned by '
984 'scipy.signal.butter(). You may need to downsample the '
985 'signal before filtering.')
986 if demean:
987 data -= num.mean(data)
988 self.drop_growbuffer()
989 self.ydata = signal.lfilter(b, a, data)
991 def bandpass(self, order, corner_hp, corner_lp, demean=True):
992 '''
993 Apply butterworth bandpass to the trace.
995 :param order: order of the filter
996 :param corner_hp: lower corner frequency of the filter
997 :param corner_lp: upper corner frequency of the filter
999 Mean is removed before filtering.
1000 '''
1002 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
1003 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
1004 (b, a) = _get_cached_filter_coefs(
1005 order,
1006 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1007 btype='band')
1008 data = self.ydata.astype(num.float64)
1009 if demean:
1010 data -= num.mean(data)
1011 self.drop_growbuffer()
1012 self.ydata = signal.lfilter(b, a, data)
1014 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1015 '''
1016 Apply bandstop (attenuates frequencies in band) to the trace.
1018 :param order: order of the filter
1019 :param corner_hp: lower corner frequency of the filter
1020 :param corner_lp: upper corner frequency of the filter
1022 Mean is removed before filtering.
1023 '''
1025 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1026 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1027 (b, a) = _get_cached_filter_coefs(
1028 order,
1029 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1030 btype='bandstop')
1031 data = self.ydata.astype(num.float64)
1032 if demean:
1033 data -= num.mean(data)
1034 self.drop_growbuffer()
1035 self.ydata = signal.lfilter(b, a, data)
1037 def envelope(self, inplace=True):
1038 '''
1039 Calculate the envelope of the trace.
1041 :param inplace: calculate envelope in place
1043 The calculation follows:
1045 .. math::
1047 Y' = \\sqrt{Y^2+H(Y)^2}
1049 where H is the Hilbert-Transform of the signal Y.
1050 '''
1052 y = self.ydata.astype(float)
1053 env = num.abs(hilbert(y))
1054 if inplace:
1055 self.drop_growbuffer()
1056 self.ydata = env
1057 else:
1058 tr = self.copy(data=False)
1059 tr.ydata = env
1060 return tr
1062 def taper(self, taperer, inplace=True, chop=False):
1063 '''
1064 Apply a :py:class:`Taper` to the trace.
1066 :param taperer: instance of :py:class:`Taper` subclass
1067 :param inplace: apply taper inplace
1068 :param chop: if ``True``: exclude tapered parts from the resulting
1069 trace
1070 '''
1072 if not inplace:
1073 tr = self.copy()
1074 else:
1075 tr = self
1077 if chop:
1078 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1079 tr.shift(i*tr.deltat)
1080 tr.set_ydata(tr.ydata[i:i+n])
1082 taperer(tr.ydata, tr.tmin, tr.deltat)
1084 if not inplace:
1085 return tr
1087 def whiten(self, order=6):
1088 '''
1089 Whiten signal in time domain using autoregression and recursive filter.
1091 :param order: order of the autoregression process
1092 '''
1094 b, a = self.whitening_coefficients(order)
1095 self.drop_growbuffer()
1096 self.ydata = signal.lfilter(b, a, self.ydata)
1098 def whitening_coefficients(self, order=6):
1099 ar = yulewalker(self.ydata, order)
1100 b, a = [1.] + ar.tolist(), [1.]
1101 return b, a
1103 def ampspec_whiten(
1104 self,
1105 width,
1106 td_taper='auto',
1107 fd_taper='auto',
1108 pad_to_pow2=True,
1109 demean=True):
1111 '''
1112 Whiten signal via frequency domain using moving average on amplitude
1113 spectra.
1115 :param width: width of smoothing kernel [Hz]
1116 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1117 ``None`` or ``'auto'``.
1118 :param fd_taper: frequency domain taper, object of type
1119 :py:class:`Taper` or ``None`` or ``'auto'``.
1120 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1121 of 2^n
1122 :param demean: whether to demean the signal before tapering
1124 The signal is first demeaned and then tapered using ``td_taper``. Then,
1125 the spectrum is calculated and inversely weighted with a smoothed
1126 version of its amplitude spectrum. A moving average is used for the
1127 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1128 Finally, the smoothed and tapered spectrum is back-transformed into the
1129 time domain.
1131 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1132 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1133 '''
1135 ndata = self.data_len()
1137 if pad_to_pow2:
1138 ntrans = nextpow2(ndata)
1139 else:
1140 ntrans = ndata
1142 df = 1./(ntrans*self.deltat)
1143 nw = int(round(width/df))
1144 if ndata//2+1 <= nw:
1145 raise TraceTooShort(
1146 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1148 if td_taper == 'auto':
1149 td_taper = CosFader(1./width)
1151 if fd_taper == 'auto':
1152 fd_taper = CosFader(width)
1154 if td_taper:
1155 self.taper(td_taper)
1157 ydata = self.get_ydata().astype(float)
1158 if demean:
1159 ydata -= ydata.mean()
1161 spec = num.fft.rfft(ydata, ntrans)
1163 amp = num.abs(spec)
1164 nspec = amp.size
1165 csamp = num.cumsum(amp)
1166 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1167 n1, n2 = nw//2, nw//2 + nspec - nw
1168 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1169 amp_smoothed[:n1] = amp_smoothed[n1]
1170 amp_smoothed[n2:] = amp_smoothed[n2-1]
1172 denom = amp_smoothed * amp
1173 numer = amp
1174 eps = num.mean(denom) * 1e-9
1175 if eps == 0.0:
1176 eps = 1e-9
1178 numer += eps
1179 denom += eps
1180 spec *= numer/denom
1182 if fd_taper:
1183 fd_taper(spec, 0., df)
1185 ydata = num.fft.irfft(spec)
1186 self.set_ydata(ydata[:ndata])
1188 def _get_cached_freqs(self, nf, deltaf):
1189 ck = (nf, deltaf)
1190 if ck not in Trace.cached_frequencies:
1191 Trace.cached_frequencies[ck] = deltaf * num.arange(
1192 nf, dtype=float)
1194 return Trace.cached_frequencies[ck]
1196 def bandpass_fft(self, corner_hp, corner_lp):
1197 '''
1198 Apply boxcar bandbpass to trace (in spectral domain).
1199 '''
1201 n = len(self.ydata)
1202 n2 = nextpow2(n)
1203 data = num.zeros(n2, dtype=num.float64)
1204 data[:n] = self.ydata
1205 fdata = num.fft.rfft(data)
1206 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1207 fdata[0] = 0.0
1208 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1209 data = num.fft.irfft(fdata)
1210 self.drop_growbuffer()
1211 self.ydata = data[:n]
1213 def shift(self, tshift):
1214 '''
1215 Time shift the trace.
1216 '''
1218 self.tmin += tshift
1219 self.tmax += tshift
1220 self._update_ids()
1222 def snap(self, inplace=True, interpolate=False):
1223 '''
1224 Shift trace samples to nearest even multiples of the sampling rate.
1226 :param inplace: (boolean) snap traces inplace
1228 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1229 both, the snapped and the original trace is smaller than 0.01 x deltat,
1230 :py:func:`snap` returns the unsnapped instance of the original trace.
1231 '''
1233 tmin = round(self.tmin/self.deltat)*self.deltat
1234 tmax = tmin + (self.ydata.size-1)*self.deltat
1236 if inplace:
1237 xself = self
1238 else:
1239 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1240 abs(self.tmax - tmax) < 1e-2*self.deltat:
1241 return self
1243 xself = self.copy()
1245 if interpolate:
1246 from pyrocko import signal_ext
1247 n = xself.data_len()
1248 ydata_new = num.empty(n, dtype=float)
1249 i_control = num.array([0, n-1], dtype=num.int64)
1250 tref = tmin
1251 t_control = num.array(
1252 [float(xself.tmin-tref), float(xself.tmax-tref)],
1253 dtype=float)
1255 signal_ext.antidrift(i_control, t_control,
1256 xself.ydata.astype(float),
1257 float(tmin-tref), xself.deltat, ydata_new)
1259 xself.ydata = ydata_new
1261 xself.tmin = tmin
1262 xself.tmax = tmax
1263 xself._update_ids()
1265 return xself
1267 def fix_deltat_rounding_errors(self):
1268 '''
1269 Try to undo sampling rate rounding errors.
1271 See :py:func:`fix_deltat_rounding_errors`.
1272 '''
1274 self.deltat = fix_deltat_rounding_errors(self.deltat)
1275 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1277 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1278 '''
1279 Run special STA/LTA filter where the short time window is centered on
1280 the long time window.
1282 :param tshort: length of short time window in [s]
1283 :param tlong: length of long time window in [s]
1284 :param quad: whether to square the data prior to applying the STA/LTA
1285 filter
1286 :param scalingmethod: integer key to select how output values are
1287 scaled / normalized (``1``, ``2``, or ``3``)
1289 ============= ====================================== ===========
1290 Scalingmethod Implementation Range
1291 ============= ====================================== ===========
1292 ``1`` As/Al* Ts/Tl [0,1]
1293 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1294 ``3`` Like ``2`` but clipping range at zero [0,1]
1295 ============= ====================================== ===========
1297 '''
1299 nshort = int(round(tshort/self.deltat))
1300 nlong = int(round(tlong/self.deltat))
1302 assert nshort < nlong
1303 if nlong > len(self.ydata):
1304 raise TraceTooShort(
1305 'Samples in trace: %s, samples needed: %s'
1306 % (len(self.ydata), nlong))
1308 if quad:
1309 sqrdata = self.ydata**2
1310 else:
1311 sqrdata = self.ydata
1313 mavg_short = moving_avg(sqrdata, nshort)
1314 mavg_long = moving_avg(sqrdata, nlong)
1316 self.drop_growbuffer()
1318 if scalingmethod not in (1, 2, 3):
1319 raise Exception('Invalid argument to scalingrange argument.')
1321 if scalingmethod == 1:
1322 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1323 elif scalingmethod in (2, 3):
1324 self.ydata = (mavg_short/mavg_long - 1.) \
1325 / ((float(nlong)/float(nshort)) - 1)
1327 if scalingmethod == 3:
1328 self.ydata = num.maximum(self.ydata, 0.)
1330 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1331 '''
1332 Run special STA/LTA filter where the short time window is overlapping
1333 with the last part of the long time window.
1335 :param tshort: length of short time window in [s]
1336 :param tlong: length of long time window in [s]
1337 :param quad: whether to square the data prior to applying the STA/LTA
1338 filter
1339 :param scalingmethod: integer key to select how output values are
1340 scaled / normalized (``1``, ``2``, or ``3``)
1342 ============= ====================================== ===========
1343 Scalingmethod Implementation Range
1344 ============= ====================================== ===========
1345 ``1`` As/Al* Ts/Tl [0,1]
1346 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1347 ``3`` Like ``2`` but clipping range at zero [0,1]
1348 ============= ====================================== ===========
1350 With ``scalingmethod=1``, the values produced by this variant of the
1351 STA/LTA are equivalent to
1353 .. math::
1354 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1355 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1357 where :math:`f_j` are the input samples, :math:`s` are the number of
1358 samples in the short time window and :math:`l` are the number of
1359 samples in the long time window.
1360 '''
1362 n = self.data_len()
1363 tmin = self.tmin
1365 nshort = max(1, int(round(tshort/self.deltat)))
1366 nlong = max(1, int(round(tlong/self.deltat)))
1368 assert nshort < nlong
1370 if nlong > len(self.ydata):
1371 raise TraceTooShort(
1372 'Samples in trace: %s, samples needed: %s'
1373 % (len(self.ydata), nlong))
1375 if quad:
1376 sqrdata = self.ydata**2
1377 else:
1378 sqrdata = self.ydata
1380 nshift = int(math.floor(0.5 * (nlong - nshort)))
1381 if nlong % 2 != 0 and nshort % 2 == 0:
1382 nshift += 1
1384 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1385 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1387 self.drop_growbuffer()
1389 if scalingmethod not in (1, 2, 3):
1390 raise Exception('Invalid argument to scalingrange argument.')
1392 if scalingmethod == 1:
1393 ydata = mavg_short/mavg_long * nshort/nlong
1394 elif scalingmethod in (2, 3):
1395 ydata = (mavg_short/mavg_long - 1.) \
1396 / ((float(nlong)/float(nshort)) - 1)
1398 if scalingmethod == 3:
1399 ydata = num.maximum(self.ydata, 0.)
1401 self.set_ydata(ydata)
1403 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1405 self.chop(
1406 tmin + (nlong - nshort) * self.deltat,
1407 tmin + (n - nshort) * self.deltat)
1409 def peaks(self, threshold, tsearch,
1410 deadtime=False,
1411 nblock_duration_detection=100):
1413 '''
1414 Detect peaks above a given threshold (method 1).
1416 From every instant, where the signal rises above ``threshold``, a time
1417 length of ``tsearch`` seconds is searched for a maximum. A list with
1418 tuples (time, value) for each detected peak is returned. The
1419 ``deadtime`` argument turns on a special deadtime duration detection
1420 algorithm useful in combination with recursive STA/LTA filters.
1421 '''
1423 y = self.ydata
1424 above = num.where(y > threshold, 1, 0)
1425 deriv = num.zeros(y.size, dtype=num.int8)
1426 deriv[1:] = above[1:]-above[:-1]
1427 itrig_positions = num.nonzero(deriv > 0)[0]
1428 tpeaks = []
1429 apeaks = []
1430 tzeros = []
1431 tzero = self.tmin
1433 for itrig_pos in itrig_positions:
1434 ibeg = itrig_pos
1435 iend = min(
1436 len(self.ydata),
1437 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1438 ipeak = num.argmax(y[ibeg:iend])
1439 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1440 apeak = y[ibeg+ipeak]
1442 if tpeak < tzero:
1443 continue
1445 if deadtime:
1446 ibeg = itrig_pos
1447 iblock = 0
1448 nblock = nblock_duration_detection
1449 totalsum = 0.
1450 while True:
1451 if ibeg+iblock*nblock >= len(y):
1452 tzero = self.tmin + (len(y)-1) * self.deltat
1453 break
1455 logy = num.log(
1456 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1457 logy[0] += totalsum
1458 ysum = num.cumsum(logy)
1459 totalsum = ysum[-1]
1460 below = num.where(ysum <= 0., 1, 0)
1461 deriv = num.zeros(ysum.size, dtype=num.int8)
1462 deriv[1:] = below[1:]-below[:-1]
1463 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1464 if len(izero_positions) > 0:
1465 tzero = self.tmin + self.deltat * (
1466 ibeg + izero_positions[0])
1467 break
1468 iblock += 1
1469 else:
1470 tzero = ibeg*self.deltat + self.tmin + tsearch
1472 tpeaks.append(tpeak)
1473 apeaks.append(apeak)
1474 tzeros.append(tzero)
1476 if deadtime:
1477 return tpeaks, apeaks, tzeros
1478 else:
1479 return tpeaks, apeaks
1481 def peaks2(self, threshold, tsearch):
1483 '''
1484 Detect peaks above a given threshold (method 2).
1486 This variant of peak detection is a bit more robust (and slower) than
1487 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1488 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1489 iteratively the one with the maximum amplitude ``a[j]`` and time
1490 ``t[j]`` is choosen and potential peaks within
1491 ``t[j] - tsearch, t[j] + tsearch``
1492 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1493 no more potential peaks are left.
1494 '''
1496 a = num.copy(self.ydata)
1498 amin = num.min(a)
1500 a[0] = amin
1501 a[1: -1][num.diff(a, 2) <= 0.] = amin
1502 a[-1] = amin
1504 data = []
1505 while True:
1506 imax = num.argmax(a)
1507 amax = a[imax]
1509 if amax < threshold or amax == amin:
1510 break
1512 data.append((self.tmin + imax * self.deltat, amax))
1514 ntsearch = int(round(tsearch / self.deltat))
1515 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1517 if data:
1518 data.sort()
1519 tpeaks, apeaks = list(zip(*data))
1520 else:
1521 tpeaks, apeaks = [], []
1523 return tpeaks, apeaks
1525 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1526 '''
1527 Extend trace to given span.
1529 :param tmin: begin time of new span
1530 :param tmax: end time of new span
1531 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1532 ``'median'``
1533 '''
1535 nold = self.ydata.size
1537 if tmin is not None:
1538 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1539 else:
1540 nl = 0
1542 if tmax is not None:
1543 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1544 else:
1545 nh = nold - 1
1547 n = nh - nl + 1
1548 data = num.zeros(n, dtype=self.ydata.dtype)
1549 data[-nl:-nl + nold] = self.ydata
1550 if self.ydata.size >= 1:
1551 if fillmethod == 'repeat':
1552 data[:-nl] = self.ydata[0]
1553 data[-nl + nold:] = self.ydata[-1]
1554 elif fillmethod == 'median':
1555 v = num.median(self.ydata)
1556 data[:-nl] = v
1557 data[-nl + nold:] = v
1558 elif fillmethod == 'mean':
1559 v = num.mean(self.ydata)
1560 data[:-nl] = v
1561 data[-nl + nold:] = v
1563 self.drop_growbuffer()
1564 self.ydata = data
1566 self.tmin += nl * self.deltat
1567 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1569 self._update_ids()
1571 def transfer(self,
1572 tfade=0.,
1573 freqlimits=None,
1574 transfer_function=None,
1575 cut_off_fading=True,
1576 demean=True,
1577 invert=False):
1579 '''
1580 Return new trace with transfer function applied (convolution).
1582 :param tfade: rise/fall time in seconds of taper applied in timedomain
1583 at both ends of trace.
1584 :param freqlimits: 4-tuple with corner frequencies in Hz.
1585 :param transfer_function: FrequencyResponse object; must provide a
1586 method 'evaluate(freqs)', which returns the transfer function
1587 coefficients at the frequencies 'freqs'.
1588 :param cut_off_fading: whether to cut off rise/fall interval in output
1589 trace.
1590 :param demean: remove mean before applying transfer function
1591 :param invert: set to True to do a deconvolution
1592 '''
1594 if transfer_function is None:
1595 transfer_function = FrequencyResponse()
1597 if self.tmax - self.tmin <= tfade*2.:
1598 raise TraceTooShort(
1599 'Trace %s.%s.%s.%s too short for fading length setting. '
1600 'trace length = %g, fading length = %g'
1601 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1603 if freqlimits is None and (
1604 transfer_function is None or transfer_function.is_scalar()):
1606 # special case for flat responses
1608 output = self.copy()
1609 data = self.ydata
1610 ndata = data.size
1612 if transfer_function is not None:
1613 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1615 if invert:
1616 c = 1.0/c
1618 data *= c
1620 if tfade != 0.0:
1621 data *= costaper(
1622 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1623 ndata, self.deltat)
1625 output.ydata = data
1627 else:
1628 ndata = self.ydata.size
1629 ntrans = nextpow2(ndata*1.2)
1630 coefs = self._get_tapered_coefs(
1631 ntrans, freqlimits, transfer_function, invert=invert)
1633 data = self.ydata
1635 data_pad = num.zeros(ntrans, dtype=float)
1636 data_pad[:ndata] = data
1637 if demean:
1638 data_pad[:ndata] -= data.mean()
1640 if tfade != 0.0:
1641 data_pad[:ndata] *= costaper(
1642 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1643 ndata, self.deltat)
1645 fdata = num.fft.rfft(data_pad)
1646 fdata *= coefs
1647 ddata = num.fft.irfft(fdata)
1648 output = self.copy()
1649 output.ydata = ddata[:ndata]
1651 if cut_off_fading and tfade != 0.0:
1652 try:
1653 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1654 except NoData:
1655 raise TraceTooShort(
1656 'Trace %s.%s.%s.%s too short for fading length setting. '
1657 'trace length = %g, fading length = %g'
1658 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1659 else:
1660 output.ydata = output.ydata.copy()
1662 return output
1664 def differentiate(self, n=1, order=4, inplace=True):
1665 '''
1666 Approximate first or second derivative of the trace.
1668 :param n: 1 for first derivative, 2 for second
1669 :param order: order of the approximation 2 and 4 are supported
1670 :param inplace: if ``True`` the trace is differentiated in place,
1671 otherwise a new trace object with the derivative is returned.
1673 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1675 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1676 '''
1678 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1680 if inplace:
1681 self.ydata = ddata
1682 else:
1683 output = self.copy(data=False)
1684 output.set_ydata(ddata)
1685 return output
1687 def drop_chain_cache(self):
1688 if self._pchain:
1689 self._pchain.clear()
1691 def init_chain(self):
1692 self._pchain = pchain.Chain(
1693 do_downsample,
1694 do_extend,
1695 do_pre_taper,
1696 do_fft,
1697 do_filter,
1698 do_ifft)
1700 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1701 if setup.domain == 'frequency_domain':
1702 _, _, data = self._pchain(
1703 (self, deltat),
1704 (tmin, tmax),
1705 (setup.taper,),
1706 (setup.filter,),
1707 (setup.filter,),
1708 nocache=nocache)
1710 return num.abs(data), num.abs(data)
1712 else:
1713 processed = self._pchain(
1714 (self, deltat),
1715 (tmin, tmax),
1716 (setup.taper,),
1717 (setup.filter,),
1718 (setup.filter,),
1719 (),
1720 nocache=nocache)
1722 if setup.domain == 'time_domain':
1723 data = processed.get_ydata()
1725 elif setup.domain == 'envelope':
1726 processed = processed.envelope(inplace=False)
1728 elif setup.domain == 'absolute':
1729 processed.set_ydata(num.abs(processed.get_ydata()))
1731 return processed.get_ydata(), processed
1733 def misfit(self, candidate, setup, nocache=False, debug=False):
1734 '''
1735 Calculate misfit and normalization factor against candidate trace.
1737 :param candidate: :py:class:`Trace` object
1738 :param setup: :py:class:`MisfitSetup` object
1739 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1740 normalization divisor
1742 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1743 with the higher sampling rate will be downsampled.
1744 '''
1746 a = self
1747 b = candidate
1749 for tr in (a, b):
1750 if not tr._pchain:
1751 tr.init_chain()
1753 deltat = max(a.deltat, b.deltat)
1754 tmin = min(a.tmin, b.tmin) - deltat
1755 tmax = max(a.tmax, b.tmax) + deltat
1757 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1758 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1760 if setup.domain != 'cc_max_norm':
1761 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1762 else:
1763 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1764 ccmax = ctr.max()[1]
1765 m = 0.5 - 0.5 * ccmax
1766 n = 0.5
1768 if debug:
1769 return m, n, aproc, bproc
1770 else:
1771 return m, n
1773 def spectrum(self, pad_to_pow2=False, tfade=None):
1774 '''
1775 Get FFT spectrum of trace.
1777 :param pad_to_pow2: whether to zero-pad the data to next larger
1778 power-of-two length
1779 :param tfade: ``None`` or a time length in seconds, to apply cosine
1780 shaped tapers to both
1782 :returns: a tuple with (frequencies, values)
1783 '''
1785 ndata = self.ydata.size
1787 if pad_to_pow2:
1788 ntrans = nextpow2(ndata)
1789 else:
1790 ntrans = ndata
1792 if tfade is None:
1793 ydata = self.ydata
1794 else:
1795 ydata = self.ydata * costaper(
1796 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1797 ndata, self.deltat)
1799 fydata = num.fft.rfft(ydata, ntrans)
1800 df = 1./(ntrans*self.deltat)
1801 fxdata = num.arange(len(fydata))*df
1802 return fxdata, fydata
1804 def multi_filter(self, filter_freqs, bandwidth):
1806 class Gauss(FrequencyResponse):
1807 f0 = Float.T()
1808 a = Float.T(default=1.0)
1810 def __init__(self, f0, a=1.0, **kwargs):
1811 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1813 def evaluate(self, freqs):
1814 omega0 = 2.*math.pi*self.f0
1815 omega = 2.*math.pi*freqs
1816 return num.exp(-((omega-omega0)
1817 / (self.a*omega0))**2)
1819 freqs, coefs = self.spectrum()
1820 n = self.data_len()
1821 nfilt = len(filter_freqs)
1822 signal_tf = num.zeros((nfilt, n))
1823 centroid_freqs = num.zeros(nfilt)
1824 for ifilt, f0 in enumerate(filter_freqs):
1825 taper = Gauss(f0, a=bandwidth)
1826 weights = taper.evaluate(freqs)
1827 nhalf = freqs.size
1828 analytic_spec = num.zeros(n, dtype=complex)
1829 analytic_spec[:nhalf] = coefs*weights
1831 enorm = num.abs(analytic_spec[:nhalf])**2
1832 enorm /= num.sum(enorm)
1834 if n % 2 == 0:
1835 analytic_spec[1:nhalf-1] *= 2.
1836 else:
1837 analytic_spec[1:nhalf] *= 2.
1839 analytic = num.fft.ifft(analytic_spec)
1840 signal_tf[ifilt, :] = num.abs(analytic)
1842 enorm = num.abs(analytic_spec[:nhalf])**2
1843 enorm /= num.sum(enorm)
1844 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1846 return centroid_freqs, signal_tf
1848 def _get_tapered_coefs(
1849 self, ntrans, freqlimits, transfer_function, invert=False):
1851 deltaf = 1./(self.deltat*ntrans)
1852 nfreqs = ntrans//2 + 1
1853 transfer = num.ones(nfreqs, dtype=complex)
1854 hi = snapper(nfreqs, deltaf)
1855 if freqlimits is not None:
1856 a, b, c, d = freqlimits
1857 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1858 + hi(a)*deltaf
1860 if invert:
1861 coeffs = transfer_function.evaluate(freqs)
1862 if num.any(coeffs == 0.0):
1863 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1865 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1866 else:
1867 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1869 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1870 else:
1871 if invert:
1872 raise Exception(
1873 'transfer: `freqlimits` must be given when `invert` is '
1874 'set to `True`')
1876 freqs = num.arange(nfreqs) * deltaf
1877 tapered_transfer = transfer_function.evaluate(freqs)
1879 tapered_transfer[0] = 0.0 # don't introduce static offsets
1880 return tapered_transfer
1882 def fill_template(self, template, **additional):
1883 '''
1884 Fill string template with trace metadata.
1886 Uses normal python '%(placeholder)s' string templates. The following
1887 placeholders are considered: ``network``, ``station``, ``location``,
1888 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1889 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1890 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1891 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1892 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1893 microseconds, those with ``'_year'`` contain only the year.
1894 '''
1896 template = template.replace('%n', '%(network)s')\
1897 .replace('%s', '%(station)s')\
1898 .replace('%l', '%(location)s')\
1899 .replace('%c', '%(channel)s')\
1900 .replace('%b', '%(tmin)s')\
1901 .replace('%e', '%(tmax)s')\
1902 .replace('%j', '%(julianday)s')
1904 params = dict(
1905 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1906 params['tmin'] = util.time_to_str(
1907 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1908 params['tmax'] = util.time_to_str(
1909 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1910 params['tmin_ms'] = util.time_to_str(
1911 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1912 params['tmax_ms'] = util.time_to_str(
1913 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1914 params['tmin_us'] = util.time_to_str(
1915 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1916 params['tmax_us'] = util.time_to_str(
1917 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1918 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1919 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1920 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1921 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1922 params['julianday'] = util.julian_day_of_year(self.tmin)
1923 params.update(additional)
1924 return template % params
1926 def plot(self):
1927 '''
1928 Show trace with matplotlib.
1930 See also: :py:meth:`Trace.snuffle`.
1931 '''
1933 import pylab
1934 pylab.plot(self.get_xdata(), self.get_ydata())
1935 name = '%s %s %s - %s' % (
1936 self.channel,
1937 self.station,
1938 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)),
1939 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax)))
1941 pylab.title(name)
1942 pylab.show()
1944 def snuffle(self, **kwargs):
1945 '''
1946 Show trace in a snuffler window.
1948 :param stations: list of `pyrocko.model.Station` objects or ``None``
1949 :param events: list of `pyrocko.model.Event` objects or ``None``
1950 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1951 :param ntracks: float, number of tracks to be shown initially (default:
1952 12)
1953 :param follow: time interval (in seconds) for real time follow mode or
1954 ``None``
1955 :param controls: bool, whether to show the main controls (default:
1956 ``True``)
1957 :param opengl: bool, whether to use opengl (default: ``False``)
1958 '''
1960 return snuffle([self], **kwargs)
1963def snuffle(traces, **kwargs):
1964 '''
1965 Show traces in a snuffler window.
1967 :param stations: list of `pyrocko.model.Station` objects or ``None``
1968 :param events: list of `pyrocko.model.Event` objects or ``None``
1969 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1970 :param ntracks: float, number of tracks to be shown initially (default: 12)
1971 :param follow: time interval (in seconds) for real time follow mode or
1972 ``None``
1973 :param controls: bool, whether to show the main controls (default:
1974 ``True``)
1975 :param opengl: bool, whether to use opengl (default: ``False``)
1976 '''
1978 from pyrocko import pile
1979 from pyrocko.gui import snuffler
1980 p = pile.Pile()
1981 if traces:
1982 trf = pile.MemTracesFile(None, traces)
1983 p.add_file(trf)
1984 return snuffler.snuffle(p, **kwargs)
1987class InfiniteResponse(Exception):
1988 '''
1989 This exception is raised by :py:class:`Trace` operations when deconvolution
1990 of a frequency response (instrument response transfer function) would
1991 result in a division by zero.
1992 '''
1995class MisalignedTraces(Exception):
1996 '''
1997 This exception is raised by some :py:class:`Trace` operations when tmin,
1998 tmax or number of samples do not match.
1999 '''
2001 pass
2004class NoData(Exception):
2005 '''
2006 This exception is raised by some :py:class:`Trace` operations when no or
2007 not enough data is available.
2008 '''
2010 pass
2013class AboveNyquist(Exception):
2014 '''
2015 This exception is raised by some :py:class:`Trace` operations when given
2016 frequencies are above the Nyquist frequency.
2017 '''
2019 pass
2022class TraceTooShort(Exception):
2023 '''
2024 This exception is raised by some :py:class:`Trace` operations when the
2025 trace is too short.
2026 '''
2028 pass
2031class ResamplingFailed(Exception):
2032 pass
2035def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2037 '''
2038 Get data range given traces grouped by selected pattern.
2040 :param key: a callable which takes as single argument a trace and returns a
2041 key for the grouping of the results. If this is ``None``, the default,
2042 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2043 used.
2044 :param mode: ``'minmax'`` or floating point number. If this is
2045 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2046 number, mean +- standard deviation times ``mode`` is used.
2048 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2049 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2050 extreme values on either end.
2052 :returns: a dict with the combined data ranges.
2054 Examples::
2056 ranges = minmax(traces, lambda tr: tr.channel)
2057 print ranges['N'] # print min & max of all traces with channel == 'N'
2058 print ranges['E'] # print min & max of all traces with channel == 'E'
2060 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2061 print ranges['GR', 'HAM3'] # print min & max of all traces with
2062 # network == 'GR' and station == 'HAM3'
2064 ranges = minmax(traces, lambda tr: None)
2065 print ranges[None] # prints min & max of all traces
2066 '''
2068 if key is None:
2069 key = _default_key
2071 ranges = defaultdict(list)
2072 for trace in traces:
2073 if isinstance(mode, str) and mode == 'minmax':
2074 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2075 else:
2076 mean = trace.ydata.mean()
2077 std = trace.ydata.std()
2078 mi, ma = mean-std*mode, mean+std*mode
2080 k = key(trace)
2081 ranges[k].append((mi, ma))
2083 for k in ranges:
2084 mins, maxs = num.array(ranges[k]).T
2085 if outer_mode == 'minmax':
2086 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2087 elif outer_mode == 'robust':
2088 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2090 return ranges
2093def minmaxtime(traces, key=None):
2095 '''
2096 Get time range given traces grouped by selected pattern.
2098 :param key: a callable which takes as single argument a trace and returns a
2099 key for the grouping of the results. If this is ``None``, the default,
2100 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2101 used.
2103 :returns: a dict with the combined data ranges.
2104 '''
2106 if key is None:
2107 key = _default_key
2109 ranges = {}
2110 for trace in traces:
2111 mi, ma = trace.tmin, trace.tmax
2112 k = key(trace)
2113 if k not in ranges:
2114 ranges[k] = mi, ma
2115 else:
2116 tmi, tma = ranges[k]
2117 ranges[k] = min(tmi, mi), max(tma, ma)
2119 return ranges
2122def degapper(
2123 traces,
2124 maxgap=5,
2125 fillmethod='interpolate',
2126 deoverlap='use_second',
2127 maxlap=None):
2129 '''
2130 Try to connect traces and remove gaps.
2132 This method will combine adjacent traces, which match in their network,
2133 station, location and channel attributes. Overlapping parts are handled
2134 according to the ``deoverlap`` argument.
2136 :param traces: input traces, must be sorted by their full_id attribute.
2137 :param maxgap: maximum number of samples to interpolate.
2138 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2139 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2140 second trace (default), 'use_first' to use data from first trace,
2141 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2142 values.
2143 :param maxlap: maximum number of samples of overlap which are removed
2145 :returns: list of traces
2146 '''
2148 in_traces = traces
2149 out_traces = []
2150 if not in_traces:
2151 return out_traces
2152 out_traces.append(in_traces.pop(0))
2153 while in_traces:
2155 a = out_traces[-1]
2156 b = in_traces.pop(0)
2158 avirt, bvirt = a.ydata is None, b.ydata is None
2159 assert avirt == bvirt, \
2160 'traces given to degapper() must either all have data or have ' \
2161 'no data.'
2163 virtual = avirt and bvirt
2165 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2166 and a.data_len() >= 1 and b.data_len() >= 1
2167 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2169 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2170 idist = int(round(dist))
2171 if abs(dist - idist) > 0.05 and idist <= maxgap:
2172 # logger.warning('Cannot degap traces with displaced sampling '
2173 # '(%s, %s, %s, %s)' % a.nslc_id)
2174 pass
2175 else:
2176 if 1 < idist <= maxgap:
2177 if not virtual:
2178 if fillmethod == 'interpolate':
2179 filler = a.ydata[-1] + (
2180 ((1.0 + num.arange(idist-1, dtype=float))
2181 / idist) * (b.ydata[0]-a.ydata[-1])
2182 ).astype(a.ydata.dtype)
2183 elif fillmethod == 'zeros':
2184 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2185 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2186 a.tmax = b.tmax
2187 if a.mtime and b.mtime:
2188 a.mtime = max(a.mtime, b.mtime)
2189 continue
2191 elif idist == 1:
2192 if not virtual:
2193 a.ydata = num.concatenate((a.ydata, b.ydata))
2194 a.tmax = b.tmax
2195 if a.mtime and b.mtime:
2196 a.mtime = max(a.mtime, b.mtime)
2197 continue
2199 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2200 if b.tmax > a.tmax:
2201 if not virtual:
2202 na = a.ydata.size
2203 n = -idist+1
2204 if deoverlap == 'use_second':
2205 a.ydata = num.concatenate(
2206 (a.ydata[:-n], b.ydata))
2207 elif deoverlap in ('use_first', 'crossfade_cos'):
2208 a.ydata = num.concatenate(
2209 (a.ydata, b.ydata[n:]))
2210 elif deoverlap == 'add':
2211 a.ydata[-n:] += b.ydata[:n]
2212 a.ydata = num.concatenate(
2213 (a.ydata, b.ydata[n:]))
2214 else:
2215 assert False, 'unknown deoverlap method'
2217 if deoverlap == 'crossfade_cos':
2218 n = -idist+1
2219 taper = 0.5-0.5*num.cos(
2220 (1.+num.arange(n))/(1.+n)*num.pi)
2221 a.ydata[na-n:na] *= 1.-taper
2222 a.ydata[na-n:na] += b.ydata[:n] * taper
2224 a.tmax = b.tmax
2225 if a.mtime and b.mtime:
2226 a.mtime = max(a.mtime, b.mtime)
2227 continue
2228 else:
2229 # make short second trace vanish
2230 continue
2232 if b.data_len() >= 1:
2233 out_traces.append(b)
2235 for tr in out_traces:
2236 tr._update_ids()
2238 return out_traces
2241def rotate(traces, azimuth, in_channels, out_channels):
2242 '''
2243 2D rotation of traces.
2245 :param traces: list of input traces
2246 :param azimuth: difference of the azimuths of the component directions
2247 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2248 :param in_channels: names of the input channels (e.g. 'N', 'E')
2249 :param out_channels: names of the output channels (e.g. 'R', 'T')
2250 :returns: list of rotated traces
2251 '''
2253 phi = azimuth/180.*math.pi
2254 cphi = math.cos(phi)
2255 sphi = math.sin(phi)
2256 rotated = []
2257 in_channels = tuple(_channels_to_names(in_channels))
2258 out_channels = tuple(_channels_to_names(out_channels))
2259 for a in traces:
2260 for b in traces:
2261 if ((a.channel, b.channel) == in_channels and
2262 a.nslc_id[:3] == b.nslc_id[:3] and
2263 abs(a.deltat-b.deltat) < a.deltat*0.001):
2264 tmin = max(a.tmin, b.tmin)
2265 tmax = min(a.tmax, b.tmax)
2267 if tmin < tmax:
2268 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2269 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2270 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2271 logger.warning(
2272 'Cannot rotate traces with displaced sampling '
2273 '(%s, %s, %s, %s)' % a.nslc_id)
2274 continue
2276 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2277 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2278 ac.set_ydata(acydata)
2279 bc.set_ydata(bcydata)
2281 ac.set_codes(channel=out_channels[0])
2282 bc.set_codes(channel=out_channels[1])
2283 rotated.append(ac)
2284 rotated.append(bc)
2286 return rotated
2289def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2290 '''
2291 Rotate traces from NE to RT system.
2293 :param n:
2294 North trace.
2295 :type n:
2296 :py:class:`~pyrocko.trace.Trace`
2298 :param e:
2299 East trace.
2300 :type e:
2301 :py:class:`~pyrocko.trace.Trace`
2303 :param source:
2304 Source of the recorded signal.
2305 :type source:
2306 :py:class:`pyrocko.gf.seismosizer.Source`
2308 :param receiver:
2309 Receiver of the recorded signal.
2310 :type receiver:
2311 :py:class:`pyrocko.model.location.Location`
2313 :param out_channels:
2314 Channel codes of the output channels (radial, transversal).
2315 Default is ('R', 'T').
2316 .
2317 :type out_channels
2318 optional, tuple[str, str]
2320 :returns:
2321 Rotated traces (radial, transversal).
2322 :rtype:
2323 tuple[
2324 :py:class:`~pyrocko.trace.Trace`,
2325 :py:class:`~pyrocko.trace.Trace`]
2326 '''
2327 azimuth = orthodrome.azimuth(receiver, source) + 180.
2328 in_channels = n.channel, e.channel
2329 out = rotate(
2330 [n, e], azimuth,
2331 in_channels=in_channels,
2332 out_channels=out_channels)
2334 assert len(out) == 2
2335 for tr in out:
2336 if tr.channel == out_channels[0]:
2337 r = tr
2338 elif tr.channel == out_channels[1]:
2339 t = tr
2340 else:
2341 assert False
2343 return r, t
2346def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2347 out_channels=('L', 'Q', 'T')):
2348 '''
2349 Rotate traces from ZNE to LQT system.
2351 :param traces: list of traces in arbitrary order
2352 :param backazimuth: backazimuth in degrees clockwise from north
2353 :param incidence: incidence angle in degrees from vertical
2354 :param in_channels: input channel names
2355 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2356 :returns: list of transformed traces
2357 '''
2358 i = incidence/180.*num.pi
2359 b = backazimuth/180.*num.pi
2361 ci = num.cos(i)
2362 cb = num.cos(b)
2363 si = num.sin(i)
2364 sb = num.sin(b)
2366 rotmat = num.array(
2367 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2368 return project(traces, rotmat, in_channels, out_channels)
2371def _decompose(a):
2372 '''
2373 Decompose matrix into independent submatrices.
2374 '''
2376 def depends(iout, a):
2377 row = a[iout, :]
2378 return set(num.arange(row.size).compress(row != 0.0))
2380 def provides(iin, a):
2381 col = a[:, iin]
2382 return set(num.arange(col.size).compress(col != 0.0))
2384 a = num.asarray(a)
2385 outs = set(range(a.shape[0]))
2386 systems = []
2387 while outs:
2388 iout = outs.pop()
2390 gout = set()
2391 for iin in depends(iout, a):
2392 gout.update(provides(iin, a))
2394 if not gout:
2395 continue
2397 gin = set()
2398 for iout2 in gout:
2399 gin.update(depends(iout2, a))
2401 if not gin:
2402 continue
2404 for iout2 in gout:
2405 if iout2 in outs:
2406 outs.remove(iout2)
2408 gin = list(gin)
2409 gin.sort()
2410 gout = list(gout)
2411 gout.sort()
2413 systems.append((gin, gout, a[gout, :][:, gin]))
2415 return systems
2418def _channels_to_names(channels):
2419 from pyrocko import squirrel
2420 names = []
2421 for ch in channels:
2422 if isinstance(ch, model.Channel):
2423 names.append(ch.name)
2424 elif isinstance(ch, squirrel.Channel):
2425 names.append(ch.codes.channel)
2426 else:
2427 names.append(ch)
2429 return names
2432def project(traces, matrix, in_channels, out_channels):
2433 '''
2434 Affine transform of three-component traces.
2436 Compute matrix-vector product of three-component traces, to e.g. rotate
2437 traces into a different basis. The traces are distinguished and ordered by
2438 their channel attribute. The tranform is applied to overlapping parts of
2439 any appropriate combinations of the input traces. This should allow this
2440 function to be robust with data gaps. It also tries to apply the
2441 tranformation to subsets of the channels, if this is possible, so that, if
2442 for example a vertical compontent is missing, horizontal components can
2443 still be rotated.
2445 :param traces: list of traces in arbitrary order
2446 :param matrix: tranformation matrix
2447 :param in_channels: input channel names
2448 :param out_channels: output channel names
2449 :returns: list of transformed traces
2450 '''
2452 in_channels = tuple(_channels_to_names(in_channels))
2453 out_channels = tuple(_channels_to_names(out_channels))
2454 systems = _decompose(matrix)
2456 # fallback to full matrix if some are not quadratic
2457 for iins, iouts, submatrix in systems:
2458 if submatrix.shape[0] != submatrix.shape[1]:
2459 if len(in_channels) != 3 or len(out_channels) != 3:
2460 return []
2461 else:
2462 return _project3(traces, matrix, in_channels, out_channels)
2464 projected = []
2465 for iins, iouts, submatrix in systems:
2466 in_cha = tuple([in_channels[iin] for iin in iins])
2467 out_cha = tuple([out_channels[iout] for iout in iouts])
2468 if submatrix.shape[0] == 1:
2469 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2470 elif submatrix.shape[1] == 2:
2471 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2472 else:
2473 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2475 return projected
2478def project_dependencies(matrix, in_channels, out_channels):
2479 '''
2480 Figure out what dependencies project() would produce.
2481 '''
2483 in_channels = tuple(_channels_to_names(in_channels))
2484 out_channels = tuple(_channels_to_names(out_channels))
2485 systems = _decompose(matrix)
2487 subpro = []
2488 for iins, iouts, submatrix in systems:
2489 if submatrix.shape[0] != submatrix.shape[1]:
2490 subpro.append((matrix, in_channels, out_channels))
2492 if not subpro:
2493 for iins, iouts, submatrix in systems:
2494 in_cha = tuple([in_channels[iin] for iin in iins])
2495 out_cha = tuple([out_channels[iout] for iout in iouts])
2496 subpro.append((submatrix, in_cha, out_cha))
2498 deps = {}
2499 for mat, in_cha, out_cha in subpro:
2500 for oc in out_cha:
2501 if oc not in deps:
2502 deps[oc] = []
2504 for ic in in_cha:
2505 deps[oc].append(ic)
2507 return deps
2510def _project1(traces, matrix, in_channels, out_channels):
2511 assert len(in_channels) == 1
2512 assert len(out_channels) == 1
2513 assert matrix.shape == (1, 1)
2515 projected = []
2516 for a in traces:
2517 if not (a.channel,) == in_channels:
2518 continue
2520 ac = a.copy()
2521 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2522 ac.set_codes(channel=out_channels[0])
2523 projected.append(ac)
2525 return projected
2528def _project2(traces, matrix, in_channels, out_channels):
2529 assert len(in_channels) == 2
2530 assert len(out_channels) == 2
2531 assert matrix.shape == (2, 2)
2532 projected = []
2533 for a in traces:
2534 for b in traces:
2535 if not ((a.channel, b.channel) == in_channels and
2536 a.nslc_id[:3] == b.nslc_id[:3] and
2537 abs(a.deltat-b.deltat) < a.deltat*0.001):
2538 continue
2540 tmin = max(a.tmin, b.tmin)
2541 tmax = min(a.tmax, b.tmax)
2543 if tmin > tmax:
2544 continue
2546 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2547 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2548 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2549 logger.warning(
2550 'Cannot project traces with displaced sampling '
2551 '(%s, %s, %s, %s)' % a.nslc_id)
2552 continue
2554 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2555 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2557 ac.set_ydata(acydata)
2558 bc.set_ydata(bcydata)
2560 ac.set_codes(channel=out_channels[0])
2561 bc.set_codes(channel=out_channels[1])
2563 projected.append(ac)
2564 projected.append(bc)
2566 return projected
2569def _project3(traces, matrix, in_channels, out_channels):
2570 assert len(in_channels) == 3
2571 assert len(out_channels) == 3
2572 assert matrix.shape == (3, 3)
2573 projected = []
2574 for a in traces:
2575 for b in traces:
2576 for c in traces:
2577 if not ((a.channel, b.channel, c.channel) == in_channels
2578 and a.nslc_id[:3] == b.nslc_id[:3]
2579 and b.nslc_id[:3] == c.nslc_id[:3]
2580 and abs(a.deltat-b.deltat) < a.deltat*0.001
2581 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2583 continue
2585 tmin = max(a.tmin, b.tmin, c.tmin)
2586 tmax = min(a.tmax, b.tmax, c.tmax)
2588 if tmin >= tmax:
2589 continue
2591 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2592 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2593 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2594 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2595 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2597 logger.warning(
2598 'Cannot project traces with displaced sampling '
2599 '(%s, %s, %s, %s)' % a.nslc_id)
2600 continue
2602 acydata = num.dot(
2603 matrix[0],
2604 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2605 bcydata = num.dot(
2606 matrix[1],
2607 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2608 ccydata = num.dot(
2609 matrix[2],
2610 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2612 ac.set_ydata(acydata)
2613 bc.set_ydata(bcydata)
2614 cc.set_ydata(ccydata)
2616 ac.set_codes(channel=out_channels[0])
2617 bc.set_codes(channel=out_channels[1])
2618 cc.set_codes(channel=out_channels[2])
2620 projected.append(ac)
2621 projected.append(bc)
2622 projected.append(cc)
2624 return projected
2627def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2628 '''
2629 Cross correlation of two traces.
2631 :param a,b: input traces
2632 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2633 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2634 :param use_fft: bool, whether to do cross correlation in spectral domain
2636 :returns: trace containing cross correlation coefficients
2638 This function computes the cross correlation between two traces. It
2639 evaluates the discrete equivalent of
2641 .. math::
2643 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2645 where the star denotes complex conjugate. Note, that the arguments here are
2646 swapped when compared with the :py:func:`numpy.correlate` function,
2647 which is internally called. This function should be safe even with older
2648 versions of NumPy, where the correlate function has some problems.
2650 A trace containing the cross correlation coefficients is returned. The time
2651 information of the output trace is set so that the returned cross
2652 correlation can be viewed directly as a function of time lag.
2654 Example::
2656 # align two traces a and b containing a time shifted similar signal:
2657 c = pyrocko.trace.correlate(a,b)
2658 t, coef = c.max() # get time and value of maximum
2659 b.shift(-t) # align b with a
2661 '''
2663 assert_same_sampling_rate(a, b)
2665 ya, yb = a.ydata, b.ydata
2667 # need reversed order here:
2668 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2669 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2671 if normalization == 'normal':
2672 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2673 yc = yc/normfac
2675 elif normalization == 'gliding':
2676 if mode != 'valid':
2677 assert False, 'gliding normalization currently only available ' \
2678 'with "valid" mode.'
2680 if ya.size < yb.size:
2681 yshort, ylong = ya, yb
2682 else:
2683 yshort, ylong = yb, ya
2685 epsilon = 0.00001
2686 normfac_short = num.sqrt(num.sum(yshort**2))
2687 normfac = normfac_short * num.sqrt(
2688 moving_sum(ylong**2, yshort.size, mode='valid')) \
2689 + normfac_short*epsilon
2691 if yb.size <= ya.size:
2692 normfac = normfac[::-1]
2694 yc /= normfac
2696 c = a.copy()
2697 c.set_ydata(yc)
2698 c.set_codes(*merge_codes(a, b, '~'))
2699 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2701 return c
2704def deconvolve(
2705 a, b, waterlevel,
2706 tshift=0.,
2707 pad=0.5,
2708 fd_taper=None,
2709 pad_to_pow2=True):
2711 same_sampling_rate(a, b)
2712 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2713 deltat = a.deltat
2714 npad = int(round(a.data_len()*pad + tshift / deltat))
2716 ndata = max(a.data_len(), b.data_len())
2717 ndata_pad = ndata + npad
2719 if pad_to_pow2:
2720 ntrans = nextpow2(ndata_pad)
2721 else:
2722 ntrans = ndata
2724 aspec = num.fft.rfft(a.ydata, ntrans)
2725 bspec = num.fft.rfft(b.ydata, ntrans)
2727 out = aspec * num.conj(bspec)
2729 bautocorr = bspec*num.conj(bspec)
2730 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2732 out /= denom
2733 df = 1/(ntrans*deltat)
2735 if fd_taper is not None:
2736 fd_taper(out, 0.0, df)
2738 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2739 c = a.copy(data=False)
2740 c.set_ydata(ydata[:ndata])
2741 c.set_codes(*merge_codes(a, b, '/'))
2742 return c
2745def assert_same_sampling_rate(a, b, eps=1.0e-6):
2746 assert same_sampling_rate(a, b, eps), \
2747 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2750def same_sampling_rate(a, b, eps=1.0e-6):
2751 '''
2752 Check if two traces have the same sampling rate.
2754 :param a,b: input traces
2755 :param eps: relative tolerance
2756 '''
2758 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2761def fix_deltat_rounding_errors(deltat):
2762 '''
2763 Try to undo sampling rate rounding errors.
2765 Fix rounding errors of sampling intervals when these are read from single
2766 precision floating point values.
2768 Assumes that the true sampling rate or sampling interval was an integer
2769 value. No correction will be applied if this would change the sampling
2770 rate by more than 0.001%.
2771 '''
2773 if deltat <= 1.0:
2774 deltat_new = 1.0 / round(1.0 / deltat)
2775 else:
2776 deltat_new = round(deltat)
2778 if abs(deltat_new - deltat) / deltat > 1e-5:
2779 deltat_new = deltat
2781 return deltat_new
2784def merge_codes(a, b, sep='-'):
2785 '''
2786 Merge network-station-location-channel codes of a pair of traces.
2787 '''
2789 o = []
2790 for xa, xb in zip(a.nslc_id, b.nslc_id):
2791 if xa == xb:
2792 o.append(xa)
2793 else:
2794 o.append(sep.join((xa, xb)))
2795 return o
2798class Taper(Object):
2799 '''
2800 Base class for tapers.
2802 Does nothing by default.
2803 '''
2805 def __call__(self, y, x0, dx):
2806 pass
2809class CosTaper(Taper):
2810 '''
2811 Cosine Taper.
2813 :param a: start of fading in
2814 :param b: end of fading in
2815 :param c: start of fading out
2816 :param d: end of fading out
2817 '''
2819 a = Float.T()
2820 b = Float.T()
2821 c = Float.T()
2822 d = Float.T()
2824 def __init__(self, a, b, c, d):
2825 Taper.__init__(self, a=a, b=b, c=c, d=d)
2827 def __call__(self, y, x0, dx):
2828 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2830 def span(self, y, x0, dx):
2831 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2833 def time_span(self):
2834 return self.a, self.d
2837class CosFader(Taper):
2838 '''
2839 Cosine Fader.
2841 :param xfade: fade in and fade out time in seconds (optional)
2842 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2844 Only one argument can be set. The other should to be ``None``.
2845 '''
2847 xfade = Float.T(optional=True)
2848 xfrac = Float.T(optional=True)
2850 def __init__(self, xfade=None, xfrac=None):
2851 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2852 assert (xfade is None) != (xfrac is None)
2853 self._xfade = xfade
2854 self._xfrac = xfrac
2856 def __call__(self, y, x0, dx):
2858 xfade = self._xfade
2860 xlen = (y.size - 1)*dx
2861 if xfade is None:
2862 xfade = xlen * self._xfrac
2864 a = x0
2865 b = x0 + xfade
2866 c = x0 + xlen - xfade
2867 d = x0 + xlen
2869 apply_costaper(a, b, c, d, y, x0, dx)
2871 def span(self, y, x0, dx):
2872 return 0, y.size
2874 def time_span(self):
2875 return None, None
2878def none_min(li):
2879 if None in li:
2880 return None
2881 else:
2882 return min(x for x in li if x is not None)
2885def none_max(li):
2886 if None in li:
2887 return None
2888 else:
2889 return max(x for x in li if x is not None)
2892class MultiplyTaper(Taper):
2893 '''
2894 Multiplication of several tapers.
2895 '''
2897 tapers = List.T(Taper.T())
2899 def __init__(self, tapers=None):
2900 if tapers is None:
2901 tapers = []
2903 Taper.__init__(self, tapers=tapers)
2905 def __call__(self, y, x0, dx):
2906 for taper in self.tapers:
2907 taper(y, x0, dx)
2909 def span(self, y, x0, dx):
2910 spans = []
2911 for taper in self.tapers:
2912 spans.append(taper.span(y, x0, dx))
2914 mins, maxs = list(zip(*spans))
2915 return min(mins), max(maxs)
2917 def time_span(self):
2918 spans = []
2919 for taper in self.tapers:
2920 spans.append(taper.time_span())
2922 mins, maxs = list(zip(*spans))
2923 return none_min(mins), none_max(maxs)
2926class GaussTaper(Taper):
2927 '''
2928 Frequency domain Gaussian filter.
2929 '''
2931 alpha = Float.T()
2933 def __init__(self, alpha):
2934 Taper.__init__(self, alpha=alpha)
2935 self._alpha = alpha
2937 def __call__(self, y, x0, dx):
2938 f = x0 + num.arange(y.size)*dx
2939 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2942cached_coefficients = {}
2945def _get_cached_filter_coefs(order, corners, btype):
2946 ck = (order, tuple(corners), btype)
2947 if ck not in cached_coefficients:
2948 if len(corners) == 0:
2949 cached_coefficients[ck] = signal.butter(
2950 order, corners[0], btype=btype)
2951 else:
2952 cached_coefficients[ck] = signal.butter(
2953 order, corners, btype=btype)
2955 return cached_coefficients[ck]
2958class _globals(object):
2959 _numpy_has_correlate_flip_bug = None
2962def _default_key(tr):
2963 return (tr.network, tr.station, tr.location, tr.channel)
2966def numpy_has_correlate_flip_bug():
2967 '''
2968 Check if NumPy's correlate function reveals old behaviour.
2969 '''
2971 if _globals._numpy_has_correlate_flip_bug is None:
2972 a = num.array([0, 0, 1, 0, 0, 0, 0])
2973 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
2974 ab = num.correlate(a, b, mode='same')
2975 ba = num.correlate(b, a, mode='same')
2976 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
2978 return _globals._numpy_has_correlate_flip_bug
2981def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
2982 '''
2983 Call :py:func:`numpy.correlate` with fixes.
2985 c[k] = sum_i a[i+k] * conj(b[i])
2987 Note that the result produced by newer numpy.correlate is always flipped
2988 with respect to the formula given in its documentation (if ascending k
2989 assumed for the output).
2990 '''
2992 if use_fft:
2993 if a.size < b.size:
2994 c = signal.fftconvolve(b[::-1], a, mode=mode)
2995 else:
2996 c = signal.fftconvolve(a, b[::-1], mode=mode)
2997 return c
2999 else:
3000 buggy = numpy_has_correlate_flip_bug()
3002 a = num.asarray(a)
3003 b = num.asarray(b)
3005 if buggy:
3006 b = num.conj(b)
3008 c = num.correlate(a, b, mode=mode)
3010 if buggy and a.size < b.size:
3011 return c[::-1]
3012 else:
3013 return c
3016def numpy_correlate_emulate(a, b, mode='valid'):
3017 '''
3018 Slow version of :py:func:`numpy.correlate` for comparison.
3019 '''
3021 a = num.asarray(a)
3022 b = num.asarray(b)
3023 kmin = -(b.size-1)
3024 klen = a.size-kmin
3025 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3026 kmin = int(kmin)
3027 kmax = int(kmax)
3028 klen = kmax - kmin + 1
3029 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
3030 for k in range(kmin, kmin+klen):
3031 imin = max(0, -k)
3032 ilen = min(b.size, a.size-k) - imin
3033 c[k-kmin] = num.sum(
3034 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3036 return c
3039def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3040 '''
3041 Get range of lags for which :py:func:`numpy.correlate` produces values.
3042 '''
3044 a = num.asarray(a)
3045 b = num.asarray(b)
3047 kmin = -(b.size-1)
3048 if mode == 'full':
3049 klen = a.size-kmin
3050 elif mode == 'same':
3051 klen = max(a.size, b.size)
3052 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3053 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3054 elif mode == 'valid':
3055 klen = abs(a.size - b.size) + 1
3056 kmin += min(a.size, b.size) - 1
3058 return kmin, kmin + klen - 1
3061def autocorr(x, nshifts):
3062 '''
3063 Compute biased estimate of the first autocorrelation coefficients.
3065 :param x: input array
3066 :param nshifts: number of coefficients to calculate
3067 '''
3069 mean = num.mean(x)
3070 std = num.std(x)
3071 n = x.size
3072 xdm = x - mean
3073 r = num.zeros(nshifts)
3074 for k in range(nshifts):
3075 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3077 return r
3080def yulewalker(x, order):
3081 '''
3082 Compute autoregression coefficients using Yule-Walker method.
3084 :param x: input array
3085 :param order: number of coefficients to produce
3087 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3088 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3089 recursion which is normally used.
3090 '''
3092 gamma = autocorr(x, order+1)
3093 d = gamma[1:1+order]
3094 a = num.zeros((order, order))
3095 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3096 for i in range(order):
3097 ioff = order-i
3098 a[i, :] = gamma2[ioff:ioff+order]
3100 return num.dot(num.linalg.inv(a), -d)
3103def moving_avg(x, n):
3104 n = int(n)
3105 cx = x.cumsum()
3106 nn = len(x)
3107 y = num.zeros(nn, dtype=cx.dtype)
3108 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3109 y[:n//2] = y[n//2]
3110 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3111 return y
3114def moving_sum(x, n, mode='valid'):
3115 n = int(n)
3116 cx = x.cumsum()
3117 nn = len(x)
3119 if mode == 'valid':
3120 if nn-n+1 <= 0:
3121 return num.zeros(0, dtype=cx.dtype)
3122 y = num.zeros(nn-n+1, dtype=cx.dtype)
3123 y[0] = cx[n-1]
3124 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3126 if mode == 'full':
3127 y = num.zeros(nn+n-1, dtype=cx.dtype)
3128 if n <= nn:
3129 y[0:n] = cx[0:n]
3130 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3131 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3132 else:
3133 y[0:nn] = cx[0:nn]
3134 y[nn:n] = cx[nn-1]
3135 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3137 if mode == 'same':
3138 n1 = (n-1)//2
3139 y = num.zeros(nn, dtype=cx.dtype)
3140 if n <= nn:
3141 y[0:n-n1] = cx[n1:n]
3142 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3143 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3144 else:
3145 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3146 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3147 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3149 return y
3152def nextpow2(i):
3153 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3156def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3157 def snap(x):
3158 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3159 return snap
3162def snapper(nmax, delta, snapfun=math.ceil):
3163 def snap(x):
3164 return max(0, min(int(snapfun(x/delta)), nmax))
3165 return snap
3168def apply_costaper(a, b, c, d, y, x0, dx):
3169 hi = snapper_w_offset(y.size, x0, dx)
3170 y[:hi(a)] = 0.
3171 y[hi(a):hi(b)] *= 0.5 \
3172 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3173 y[hi(c):hi(d)] *= 0.5 \
3174 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3175 y[hi(d):] = 0.
3178def span_costaper(a, b, c, d, y, x0, dx):
3179 hi = snapper_w_offset(y.size, x0, dx)
3180 return hi(a), hi(d) - hi(a)
3183def costaper(a, b, c, d, nfreqs, deltaf):
3184 hi = snapper(nfreqs, deltaf)
3185 tap = num.zeros(nfreqs)
3186 tap[hi(a):hi(b)] = 0.5 \
3187 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3188 tap[hi(b):hi(c)] = 1.
3189 tap[hi(c):hi(d)] = 0.5 \
3190 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3192 return tap
3195def t2ind(t, tdelta, snap=round):
3196 return int(snap(t/tdelta))
3199def hilbert(x, N=None):
3200 '''
3201 Return the hilbert transform of x of length N.
3203 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3204 '''
3206 x = num.asarray(x)
3207 if N is None:
3208 N = len(x)
3209 if N <= 0:
3210 raise ValueError("N must be positive.")
3211 if num.iscomplexobj(x):
3212 logger.warning('imaginary part of x ignored.')
3213 x = num.real(x)
3215 Xf = num.fft.fft(x, N, axis=0)
3216 h = num.zeros(N)
3217 if N % 2 == 0:
3218 h[0] = h[N//2] = 1
3219 h[1:N//2] = 2
3220 else:
3221 h[0] = 1
3222 h[1:(N+1)//2] = 2
3224 if len(x.shape) > 1:
3225 h = h[:, num.newaxis]
3226 x = num.fft.ifft(Xf*h)
3227 return x
3230def near(a, b, eps):
3231 return abs(a-b) < eps
3234def coroutine(func):
3235 def wrapper(*args, **kwargs):
3236 gen = func(*args, **kwargs)
3237 next(gen)
3238 return gen
3240 wrapper.__name__ = func.__name__
3241 wrapper.__dict__ = func.__dict__
3242 wrapper.__doc__ = func.__doc__
3243 return wrapper
3246class States(object):
3247 '''
3248 Utility to store channel-specific state in coroutines.
3249 '''
3251 def __init__(self):
3252 self._states = {}
3254 def get(self, tr):
3255 k = tr.nslc_id
3256 if k in self._states:
3257 tmin, deltat, dtype, value = self._states[k]
3258 if (near(tmin, tr.tmin, deltat/100.)
3259 and near(deltat, tr.deltat, deltat/10000.)
3260 and dtype == tr.ydata.dtype):
3262 return value
3264 return None
3266 def set(self, tr, value):
3267 k = tr.nslc_id
3268 if k in self._states and self._states[k][-1] is not value:
3269 self.free(self._states[k][-1])
3271 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3273 def free(self, value):
3274 pass
3277@coroutine
3278def co_list_append(list):
3279 while True:
3280 list.append((yield))
3283class ScipyBug(Exception):
3284 pass
3287@coroutine
3288def co_lfilter(target, b, a):
3289 '''
3290 Successively filter broken continuous trace data (coroutine).
3292 Create coroutine which takes :py:class:`Trace` objects, filters their data
3293 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3294 objects containing the filtered data to target. This is useful, if one
3295 wants to filter a long continuous time series, which is split into many
3296 successive traces without producing filter artifacts at trace boundaries.
3298 Filter states are kept *per channel*, specifically, for each (network,
3299 station, location, channel) combination occuring in the input traces, a
3300 separate state is created and maintained. This makes it possible to filter
3301 multichannel or multistation data with only one :py:func:`co_lfilter`
3302 instance.
3304 Filter state is reset, when gaps occur.
3306 Use it like this::
3308 from pyrocko.trace import co_lfilter, co_list_append
3310 filtered_traces = []
3311 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3312 for trace in traces:
3313 pipe.send(trace)
3315 pipe.close()
3317 '''
3319 try:
3320 states = States()
3321 output = None
3322 while True:
3323 input = (yield)
3325 zi = states.get(input)
3326 if zi is None:
3327 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3329 output = input.copy(data=False)
3330 try:
3331 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3332 except ValueError:
3333 raise ScipyBug(
3334 'signal.lfilter failed: could be related to a bug '
3335 'in some older scipy versions, e.g. on opensuse42.1')
3337 output.set_ydata(ydata)
3338 states.set(input, zf)
3339 target.send(output)
3341 except GeneratorExit:
3342 target.close()
3345def co_antialias(target, q, n=None, ftype='fir'):
3346 b, a, n = util.decimate_coeffs(q, n, ftype)
3347 anti = co_lfilter(target, b, a)
3348 return anti
3351@coroutine
3352def co_dropsamples(target, q, nfir):
3353 try:
3354 states = States()
3355 while True:
3356 tr = (yield)
3357 newdeltat = q * tr.deltat
3358 ioffset = states.get(tr)
3359 if ioffset is None:
3360 # for fir filter, the first nfir samples are pulluted by
3361 # boundary effects; cut it off.
3362 # for iir this may be (much) more, we do not correct for that.
3363 # put sample instances to a time which is a multiple of the
3364 # new sampling interval.
3365 newtmin_want = math.ceil(
3366 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3367 - (nfir/2*tr.deltat)
3368 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3369 if ioffset < 0:
3370 ioffset = ioffset % q
3372 newtmin_have = tr.tmin + ioffset * tr.deltat
3373 newtr = tr.copy(data=False)
3374 newtr.deltat = newdeltat
3375 # because the fir kernel shifts data by nfir/2 samples:
3376 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3377 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3378 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3379 target.send(newtr)
3381 except GeneratorExit:
3382 target.close()
3385def co_downsample(target, q, n=None, ftype='fir'):
3386 '''
3387 Successively downsample broken continuous trace data (coroutine).
3389 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3390 data and sends new :py:class:`Trace` objects containing the downsampled
3391 data to target. This is useful, if one wants to downsample a long
3392 continuous time series, which is split into many successive traces without
3393 producing filter artifacts and gaps at trace boundaries.
3395 Filter states are kept *per channel*, specifically, for each (network,
3396 station, location, channel) combination occuring in the input traces, a
3397 separate state is created and maintained. This makes it possible to filter
3398 multichannel or multistation data with only one :py:func:`co_lfilter`
3399 instance.
3401 Filter state is reset, when gaps occur. The sampling instances are choosen
3402 so that they occur at (or as close as possible) to even multiples of the
3403 sampling interval of the downsampled trace (based on system time).
3404 '''
3406 b, a, n = util.decimate_coeffs(q, n, ftype)
3407 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3410@coroutine
3411def co_downsample_to(target, deltat):
3413 decimators = {}
3414 try:
3415 while True:
3416 tr = (yield)
3417 ratio = deltat / tr.deltat
3418 rratio = round(ratio)
3419 if abs(rratio - ratio)/ratio > 0.0001:
3420 raise util.UnavailableDecimation('ratio = %g' % ratio)
3422 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3423 if deci_seq not in decimators:
3424 pipe = target
3425 for q in deci_seq[::-1]:
3426 pipe = co_downsample(pipe, q)
3428 decimators[deci_seq] = pipe
3430 decimators[deci_seq].send(tr)
3432 except GeneratorExit:
3433 for g in decimators.values():
3434 g.close()
3437class DomainChoice(StringChoice):
3438 choices = [
3439 'time_domain',
3440 'frequency_domain',
3441 'envelope',
3442 'absolute',
3443 'cc_max_norm']
3446class MisfitSetup(Object):
3447 '''
3448 Contains misfit setup to be used in :py:func:`trace.misfit`
3450 :param description: Description of the setup
3451 :param norm: L-norm classifier
3452 :param taper: Object of :py:class:`Taper`
3453 :param filter: Object of :py:class:`FrequencyResponse`
3454 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3455 'cc_max_norm']
3457 Can be dumped to a yaml file.
3458 '''
3460 xmltagname = 'misfitsetup'
3461 description = String.T(optional=True)
3462 norm = Int.T(optional=False)
3463 taper = Taper.T(optional=False)
3464 filter = FrequencyResponse.T(optional=True)
3465 domain = DomainChoice.T(default='time_domain')
3468def equalize_sampling_rates(trace_1, trace_2):
3469 '''
3470 Equalize sampling rates of two traces (reduce higher sampling rate to
3471 lower).
3473 :param trace_1: :py:class:`Trace` object
3474 :param trace_2: :py:class:`Trace` object
3476 Returns a copy of the resampled trace if resampling is needed.
3477 '''
3479 if same_sampling_rate(trace_1, trace_2):
3480 return trace_1, trace_2
3482 if trace_1.deltat < trace_2.deltat:
3483 t1_out = trace_1.copy()
3484 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3485 logger.debug('Trace downsampled (return copy of trace): %s'
3486 % '.'.join(t1_out.nslc_id))
3487 return t1_out, trace_2
3489 elif trace_1.deltat > trace_2.deltat:
3490 t2_out = trace_2.copy()
3491 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3492 logger.debug('Trace downsampled (return copy of trace): %s'
3493 % '.'.join(t2_out.nslc_id))
3494 return trace_1, t2_out
3497def Lx_norm(u, v, norm=2):
3498 '''
3499 Calculate the misfit denominator *m* and the normalization devisor *n*
3500 according to norm.
3502 The normalization divisor *n* is calculated from ``v``.
3504 :param u: :py:class:`numpy.array`
3505 :param v: :py:class:`numpy.array`
3506 :param norm: (default = 2)
3508 ``u`` and ``v`` must be of same size.
3509 '''
3511 if norm == 1:
3512 return (
3513 num.sum(num.abs(v-u)),
3514 num.sum(num.abs(v)))
3516 elif norm == 2:
3517 return (
3518 num.sqrt(num.sum((v-u)**2)),
3519 num.sqrt(num.sum(v**2)))
3521 else:
3522 return (
3523 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3524 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3527def do_downsample(tr, deltat):
3528 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3529 tr = tr.copy()
3530 tr.downsample_to(deltat, snap=True, demean=False)
3531 else:
3532 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3533 tr = tr.copy()
3534 tr.snap()
3535 return tr
3538def do_extend(tr, tmin, tmax):
3539 if tmin < tr.tmin or tmax > tr.tmax:
3540 tr = tr.copy()
3541 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3543 return tr
3546def do_pre_taper(tr, taper):
3547 return tr.taper(taper, inplace=False, chop=True)
3550def do_fft(tr, filter):
3551 if filter is None:
3552 return tr
3553 else:
3554 ndata = tr.ydata.size
3555 nfft = nextpow2(ndata)
3556 padded = num.zeros(nfft, dtype=float)
3557 padded[:ndata] = tr.ydata
3558 spectrum = num.fft.rfft(padded)
3559 df = 1.0 / (tr.deltat * nfft)
3560 frequencies = num.arange(spectrum.size)*df
3561 return [tr, frequencies, spectrum]
3564def do_filter(inp, filter):
3565 if filter is None:
3566 return inp
3567 else:
3568 tr, frequencies, spectrum = inp
3569 spectrum *= filter.evaluate(frequencies)
3570 return [tr, frequencies, spectrum]
3573def do_ifft(inp):
3574 if isinstance(inp, Trace):
3575 return inp
3576 else:
3577 tr, _, spectrum = inp
3578 ndata = tr.ydata.size
3579 tr = tr.copy(data=False)
3580 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3581 return tr
3584def check_alignment(t1, t2):
3585 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3586 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3587 t1.ydata.shape != t2.ydata.shape:
3588 raise MisalignedTraces(
3589 'Cannot calculate misfit of %s and %s due to misaligned '
3590 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))