1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6This module provides basic signal processing for seismic traces.
7'''
9from __future__ import division, absolute_import
11import time
12import math
13import copy
14import logging
15import hashlib
17import numpy as num
18from scipy import signal
20from pyrocko import util, orthodrome, pchain, model
21from pyrocko.util import reuse
22from pyrocko.guts import Object, Float, Int, String, List, \
23 StringChoice, Timestamp
24from pyrocko.guts_array import Array
25from pyrocko.model import Content
27# backward compatibility
28from pyrocko.util import UnavailableDecimation # noqa
29from pyrocko.response import ( # noqa
30 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
31 ButterworthResponse, SampledResponse, IntegrationResponse,
32 DifferentiationResponse, MultiplyResponse)
35guts_prefix = 'pf'
37logger = logging.getLogger('pyrocko.trace')
40class Trace(Content):
42 '''
43 Create new trace object.
45 A ``Trace`` object represents a single continuous strip of evenly sampled
46 time series data. It is built from a 1D NumPy array containing the data
47 samples and some attributes describing its beginning and ending time, its
48 sampling rate and four string identifiers (its network, station, location
49 and channel code).
51 :param network: network code
52 :param station: station code
53 :param location: location code
54 :param channel: channel code
55 :param tmin: system time of first sample in [s]
56 :param tmax: system time of last sample in [s] (if set to ``None`` it is
57 computed from length of ``ydata``)
58 :param deltat: sampling interval in [s]
59 :param ydata: 1D numpy array with data samples (can be ``None`` when
60 ``tmax`` is not ``None``)
61 :param mtime: optional modification time
63 :param meta: additional meta information (not used, but maintained by the
64 library)
66 The length of the network, station, location and channel codes is not
67 resricted by this software, but data formats like SAC, Mini-SEED or GSE
68 have different limits on the lengths of these codes. The codes set here are
69 silently truncated when the trace is stored
70 '''
72 agency = String.T(default='', help='Agency code (2-5)')
73 network = String.T(default='', help='Deployment/network code (1-8)')
74 station = String.T(default='STA', help='Station code (1-5)')
75 location = String.T(default='', help='Location code (0-2)')
76 channel = String.T(default='', help='Channel code (3)')
77 extra = String.T(default='', help='Extra/custom code')
79 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
80 tmax = Timestamp.T()
82 deltat = Float.T(default=1.0)
83 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
85 mtime = Timestamp.T(optional=True)
87 cached_frequencies = {}
89 def __init__(self, network='', station='STA', location='', channel='',
90 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
91 meta=None, agency='', extra=''):
93 Content.__init__(self, init_props=False)
95 time_float = util.get_time_float()
97 if not isinstance(tmin, time_float):
98 tmin = Trace.tmin.regularize_extra(tmin)
100 if tmax is not None and not isinstance(tmax, time_float):
101 tmax = Trace.tmax.regularize_extra(tmax)
103 if mtime is not None and not isinstance(mtime, time_float):
104 mtime = Trace.mtime.regularize_extra(mtime)
106 self._growbuffer = None
108 tmin = time_float(tmin)
109 if tmax is not None:
110 tmax = time_float(tmax)
112 if mtime is None:
113 mtime = time_float(time.time())
115 self.agency, self.network, self.station, self.location, self.channel, \
116 self.extra = [
117 reuse(x) for x in (
118 agency, network, station, location, channel, extra)]
120 self.tmin = tmin
121 self.deltat = deltat
123 if tmax is None:
124 if ydata is not None:
125 self.tmax = self.tmin + (ydata.size-1)*self.deltat
126 else:
127 raise Exception(
128 'fixme: trace must be created with tmax or ydata')
129 else:
130 n = int(round((tmax - self.tmin) / self.deltat)) + 1
131 self.tmax = self.tmin + (n - 1) * self.deltat
133 self.meta = meta
134 self.ydata = ydata
135 self.mtime = mtime
136 self._update_ids()
137 self.file = None
138 self._pchain = None
140 def __str__(self):
141 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
142 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
143 s += ' timerange: %s - %s\n' % (
144 util.time_to_str(self.tmin, format=fmt),
145 util.time_to_str(self.tmax, format=fmt))
147 s += ' delta t: %g\n' % self.deltat
148 if self.meta:
149 for k in sorted(self.meta.keys()):
150 s += ' %s: %s\n' % (k, self.meta[k])
151 return s
153 @property
154 def codes(self):
155 return (
156 self.agency, self.network, self.station, self.location,
157 self.channel, self.extra)
159 @property
160 def time_span(self):
161 return self.tmin, self.tmax
163 @property
164 def summary(self):
165 return '%s %-16s %s %g' % (
166 self.__class__.__name__, self.str_codes, self.str_time_span,
167 self.deltat)
169 def __getstate__(self):
170 return (self.network, self.station, self.location, self.channel,
171 self.tmin, self.tmax, self.deltat, self.mtime,
172 self.ydata, self.meta, self.agency, self.extra)
174 def __setstate__(self, state):
175 if len(state) == 12:
176 self.network, self.station, self.location, self.channel, \
177 self.tmin, self.tmax, self.deltat, self.mtime, \
178 self.ydata, self.meta, self.agency, self.extra = state
180 elif len(state) == 10:
181 # backward compatibility 1
182 self.network, self.station, self.location, self.channel, \
183 self.tmin, self.tmax, self.deltat, self.mtime, \
184 self.ydata, self.meta = state
186 self.angency = ''
187 self.extra = ''
189 else:
190 # backward compatibility 2
191 self.network, self.station, self.location, self.channel, \
192 self.tmin, self.tmax, self.deltat, self.mtime = state
193 self.ydata = None
194 self.meta = None
195 self.angency = ''
196 self.extra = ''
198 self._growbuffer = None
199 self._update_ids()
201 def hash(self, unsafe=False):
202 sha1 = hashlib.sha1()
203 for code in self.nslc_id:
204 sha1.update(code.encode())
205 sha1.update(self.tmin.hex().encode())
206 sha1.update(self.tmax.hex().encode())
207 sha1.update(self.deltat.hex().encode())
209 if self.ydata is not None:
210 if not unsafe:
211 sha1.update(memoryview(self.ydata))
212 else:
213 sha1.update(memoryview(self.ydata[:32]))
214 sha1.update(memoryview(self.ydata[-32:]))
216 return sha1.hexdigest()
218 def name(self):
219 '''
220 Get a short string description.
221 '''
223 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
224 util.time_to_str(self.tmin),
225 util.time_to_str(self.tmax)))
227 return s
229 def __eq__(self, other):
230 return (
231 isinstance(other, Trace)
232 and self.network == other.network
233 and self.station == other.station
234 and self.location == other.location
235 and self.channel == other.channel
236 and (abs(self.deltat - other.deltat)
237 < (self.deltat + other.deltat)*1e-6)
238 and abs(self.tmin-other.tmin) < self.deltat*0.01
239 and abs(self.tmax-other.tmax) < self.deltat*0.01
240 and num.all(self.ydata == other.ydata))
242 def almost_equal(self, other, rtol=1e-5, atol=0.0):
243 return (
244 self.network == other.network
245 and self.station == other.station
246 and self.location == other.location
247 and self.channel == other.channel
248 and (abs(self.deltat - other.deltat)
249 < (self.deltat + other.deltat)*1e-6)
250 and abs(self.tmin-other.tmin) < self.deltat*0.01
251 and abs(self.tmax-other.tmax) < self.deltat*0.01
252 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
254 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
256 assert self.network == other.network, \
257 'network codes differ: %s, %s' % (self.network, other.network)
258 assert self.station == other.station, \
259 'station codes differ: %s, %s' % (self.station, other.station)
260 assert self.location == other.location, \
261 'location codes differ: %s, %s' % (self.location, other.location)
262 assert self.channel == other.channel, 'channel codes differ'
263 assert (abs(self.deltat - other.deltat)
264 < (self.deltat + other.deltat)*1e-6), \
265 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
266 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
267 'start times differ: %s, %s' % (
268 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
269 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
270 'end times differ: %s, %s' % (
271 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
273 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
274 'trace values differ'
276 def __hash__(self):
277 return id(self)
279 def __call__(self, t, clip=False, snap=round):
280 it = int(snap((t-self.tmin)/self.deltat))
281 if clip:
282 it = max(0, min(it, self.ydata.size-1))
283 else:
284 if it < 0 or self.ydata.size <= it:
285 raise IndexError()
287 return self.tmin+it*self.deltat, self.ydata[it]
289 def interpolate(self, t, clip=False):
290 '''
291 Value of trace between supporting points through linear interpolation.
293 :param t: time instant
294 :param clip: whether to clip indices to trace ends
295 '''
297 t0, y0 = self(t, clip=clip, snap=math.floor)
298 t1, y1 = self(t, clip=clip, snap=math.ceil)
299 if t0 == t1:
300 return y0
301 else:
302 return y0+(t-t0)/(t1-t0)*(y1-y0)
304 def index_clip(self, i):
305 '''
306 Clip index to valid range.
307 '''
309 return min(max(0, i), self.ydata.size)
311 def add(self, other, interpolate=True, left=0., right=0.):
312 '''
313 Add values of other trace (self += other).
315 Add values of ``other`` trace to the values of ``self``, where it
316 intersects with ``other``. This method does not change the extent of
317 ``self``. If ``interpolate`` is ``True`` (the default), the values of
318 ``other`` to be added are interpolated at sampling instants of
319 ``self``. Linear interpolation is performed. In this case the sampling
320 rate of ``other`` must be equal to or lower than that of ``self``. If
321 ``interpolate`` is ``False``, the sampling rates of the two traces must
322 match.
323 '''
325 if interpolate:
326 assert self.deltat <= other.deltat \
327 or same_sampling_rate(self, other)
329 other_xdata = other.get_xdata()
330 xdata = self.get_xdata()
331 self.ydata += num.interp(
332 xdata, other_xdata, other.ydata, left=left, right=left)
333 else:
334 assert self.deltat == other.deltat
335 ioff = int(round((other.tmin-self.tmin)/self.deltat))
336 ibeg = max(0, ioff)
337 iend = min(self.data_len(), ioff+other.data_len())
338 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
340 def mult(self, other, interpolate=True):
341 '''
342 Muliply with values of other trace ``(self *= other)``.
344 Multiply values of ``other`` trace to the values of ``self``, where it
345 intersects with ``other``. This method does not change the extent of
346 ``self``. If ``interpolate`` is ``True`` (the default), the values of
347 ``other`` to be multiplied are interpolated at sampling instants of
348 ``self``. Linear interpolation is performed. In this case the sampling
349 rate of ``other`` must be equal to or lower than that of ``self``. If
350 ``interpolate`` is ``False``, the sampling rates of the two traces must
351 match.
352 '''
354 if interpolate:
355 assert self.deltat <= other.deltat or \
356 same_sampling_rate(self, other)
358 other_xdata = other.get_xdata()
359 xdata = self.get_xdata()
360 self.ydata *= num.interp(
361 xdata, other_xdata, other.ydata, left=0., right=0.)
362 else:
363 assert self.deltat == other.deltat
364 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
365 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
366 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
367 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
369 ibeg1 = self.index_clip(ibeg1)
370 iend1 = self.index_clip(iend1)
371 ibeg2 = self.index_clip(ibeg2)
372 iend2 = self.index_clip(iend2)
374 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
376 def max(self):
377 '''
378 Get time and value of data maximum.
379 '''
381 i = num.argmax(self.ydata)
382 return self.tmin + i*self.deltat, self.ydata[i]
384 def min(self):
385 '''
386 Get time and value of data minimum.
387 '''
389 i = num.argmin(self.ydata)
390 return self.tmin + i*self.deltat, self.ydata[i]
392 def absmax(self):
393 '''
394 Get time and value of maximum of the absolute of data.
395 '''
397 tmi, mi = self.min()
398 tma, ma = self.max()
399 if abs(mi) > abs(ma):
400 return tmi, abs(mi)
401 else:
402 return tma, abs(ma)
404 def set_codes(
405 self, network=None, station=None, location=None, channel=None,
406 agency=None, extra=None):
408 '''
409 Set network, station, location, and channel codes.
410 '''
412 if network is not None:
413 self.network = network
414 if station is not None:
415 self.station = station
416 if location is not None:
417 self.location = location
418 if channel is not None:
419 self.channel = channel
420 if agency is not None:
421 self.agency = agency
422 if extra is not None:
423 self.extra = extra
425 self._update_ids()
427 def set_network(self, network):
428 self.network = network
429 self._update_ids()
431 def set_station(self, station):
432 self.station = station
433 self._update_ids()
435 def set_location(self, location):
436 self.location = location
437 self._update_ids()
439 def set_channel(self, channel):
440 self.channel = channel
441 self._update_ids()
443 def overlaps(self, tmin, tmax):
444 '''
445 Check if trace has overlap with a given time span.
446 '''
447 return not (tmax < self.tmin or self.tmax < tmin)
449 def is_relevant(self, tmin, tmax, selector=None):
450 '''
451 Check if trace has overlap with a given time span and matches a
452 condition callback. (internal use)
453 '''
455 return not (tmax <= self.tmin or self.tmax < tmin) \
456 and (selector is None or selector(self))
458 def _update_ids(self):
459 '''
460 Update dependent ids.
461 '''
463 self.full_id = (
464 self.network, self.station, self.location, self.channel, self.tmin)
465 self.nslc_id = reuse(
466 (self.network, self.station, self.location, self.channel))
468 def prune_from_reuse_cache(self):
469 util.deuse(self.nslc_id)
470 util.deuse(self.network)
471 util.deuse(self.station)
472 util.deuse(self.location)
473 util.deuse(self.channel)
475 def set_mtime(self, mtime):
476 '''
477 Set modification time of the trace.
478 '''
480 self.mtime = mtime
482 def get_xdata(self):
483 '''
484 Create array for time axis.
485 '''
487 if self.ydata is None:
488 raise NoData()
490 return self.tmin \
491 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
493 def get_ydata(self):
494 '''
495 Get data array.
496 '''
498 if self.ydata is None:
499 raise NoData()
501 return self.ydata
503 def set_ydata(self, new_ydata):
504 '''
505 Replace data array.
506 '''
508 self.drop_growbuffer()
509 self.ydata = new_ydata
510 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
512 def data_len(self):
513 if self.ydata is not None:
514 return self.ydata.size
515 else:
516 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
518 def drop_data(self):
519 '''
520 Forget data, make dataless trace.
521 '''
523 self.drop_growbuffer()
524 self.ydata = None
526 def drop_growbuffer(self):
527 '''
528 Detach the traces grow buffer.
529 '''
531 self._growbuffer = None
532 self._pchain = None
534 def copy(self, data=True):
535 '''
536 Make a deep copy of the trace.
537 '''
539 tracecopy = copy.copy(self)
540 tracecopy.drop_growbuffer()
541 if data:
542 tracecopy.ydata = self.ydata.copy()
543 tracecopy.meta = copy.deepcopy(self.meta)
544 return tracecopy
546 def crop_zeros(self):
547 '''
548 Remove any zeros at beginning and end.
549 '''
551 indices = num.where(self.ydata != 0.0)[0]
552 if indices.size == 0:
553 raise NoData()
555 ibeg = indices[0]
556 iend = indices[-1]+1
557 if ibeg == 0 and iend == self.ydata.size-1:
558 return
560 self.drop_growbuffer()
561 self.ydata = self.ydata[ibeg:iend].copy()
562 self.tmin = self.tmin+ibeg*self.deltat
563 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
564 self._update_ids()
566 def append(self, data):
567 '''
568 Append data to the end of the trace.
570 To make this method efficient when successively very few or even single
571 samples are appended, a larger grow buffer is allocated upon first
572 invocation. The traces data is then changed to be a view into the
573 currently filled portion of the grow buffer array.
574 '''
576 assert self.ydata.dtype == data.dtype
577 newlen = data.size + self.ydata.size
578 if self._growbuffer is None or self._growbuffer.size < newlen:
579 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
580 self._growbuffer[:self.ydata.size] = self.ydata
581 self._growbuffer[self.ydata.size:newlen] = data
582 self.ydata = self._growbuffer[:newlen]
583 self.tmax = self.tmin + (newlen-1)*self.deltat
585 def chop(
586 self, tmin, tmax, inplace=True, include_last=False,
587 snap=(round, round), want_incomplete=True):
589 '''
590 Cut the trace to given time span.
592 If the ``inplace`` argument is True (the default) the trace is cut in
593 place, otherwise a new trace with the cut part is returned. By
594 default, the indices where to start and end the trace data array are
595 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
596 using Python's :py:func:`round` function. This behaviour can be changed
597 with the ``snap`` argument, which takes a tuple of two functions (one
598 for the lower and one for the upper end) to be used instead of
599 :py:func:`round`. The last sample is by default not included unless
600 ``include_last`` is set to True. If the given time span exceeds the
601 available time span of the trace, the available part is returned,
602 unless ``want_incomplete`` is set to False - in that case, a
603 :py:exc:`NoData` exception is raised. This exception is always raised,
604 when the requested time span does dot overlap with the trace's time
605 span.
606 '''
608 if want_incomplete:
609 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
610 raise NoData()
611 else:
612 if tmin < self.tmin or self.tmax < tmax:
613 raise NoData()
615 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
616 iplus = 0
617 if include_last:
618 iplus = 1
620 iend = min(
621 self.data_len(),
622 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
624 if ibeg >= iend:
625 raise NoData()
627 obj = self
628 if not inplace:
629 obj = self.copy(data=False)
631 self.drop_growbuffer()
632 if self.ydata is not None:
633 obj.ydata = self.ydata[ibeg:iend].copy()
634 else:
635 obj.ydata = None
637 obj.tmin = obj.tmin+ibeg*obj.deltat
638 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
640 obj._update_ids()
642 return obj
644 def downsample(self, ndecimate, snap=False, initials=None, demean=False,
645 ftype='fir-remez'):
646 '''
647 Downsample trace by a given integer factor.
649 Comparison of the available FIR filters `fir` and `fir-remez`:
652 :param ndecimate: decimation factor, avoid values larger than 8
653 :param snap: whether to put the new sampling instances closest to
654 multiples of the sampling rate.
655 :param initials: ``None``, ``True``, or initial conditions for the
656 anti-aliasing filter, obtained from a previous run. In the latter
657 two cases the final state of the filter is returned instead of
658 ``None``.
659 :param demean: whether to demean the signal before filtering.
660 :param ftype: which FIR filter to use, choose from `fir, fir-remez`.
661 Default is `fir-remez`.
662 '''
663 newdeltat = self.deltat*ndecimate
664 if snap:
665 ilag = int(round(
666 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
667 / self.deltat))
668 else:
669 ilag = 0
671 if snap and ilag > 0 and ilag < self.ydata.size:
672 data = self.ydata.astype(num.float64)
673 self.tmin += ilag*self.deltat
674 else:
675 data = self.ydata.astype(num.float64)
677 if demean:
678 data -= num.mean(data)
680 if data.size != 0:
681 result = util.decimate(
682 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag)
683 else:
684 result = data
686 if initials is None:
687 self.ydata, finals = result, None
688 else:
689 self.ydata, finals = result
691 self.deltat = reuse(self.deltat*ndecimate)
692 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
693 self._update_ids()
695 return finals
697 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
698 initials=None, demean=False, ftype='fir-remez'):
700 '''
701 Downsample to given sampling rate.
703 Tries to downsample the trace to a target sampling interval of
704 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
705 times. If allow_upsample_max is set to a value larger than 1,
706 intermediate upsampling steps are allowed, in order to increase the
707 number of possible downsampling ratios.
709 If the requested ratio is not supported, an exception of type
710 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
712 :param deltat: desired deltat in [s]
713 :param allow_upsample_max: maximum upsampling of the signal to archive
714 the desired deltat. Default is `1`.
715 :param snap: whether to put the new sampling instances closest to
716 multiples of the sampling rate.
717 :param initials: ``None``, ``True``, or initial conditions for the
718 anti-aliasing filter, obtained from a previous run. In the latter
719 two cases the final state of the filter is returned instead of
720 ``None``.
721 :param demean: whether to demean the signal before filtering.
722 :param ftype: which FIR filter to use, choose from `fir, fir-remez`.
723 Default is `fir-remez`.
724 '''
726 ratio = deltat/self.deltat
727 rratio = round(ratio)
729 ok = False
730 for upsratio in range(1, allow_upsample_max+1):
731 dratio = (upsratio/self.deltat) / (1./deltat)
732 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
733 util.decitab(int(round(dratio))):
735 ok = True
736 break
738 if not ok:
739 raise util.UnavailableDecimation('ratio = %g' % ratio)
741 if upsratio > 1:
742 self.drop_growbuffer()
743 ydata = self.ydata
744 self.ydata = num.zeros(
745 ydata.size*upsratio-(upsratio-1), ydata.dtype)
746 self.ydata[::upsratio] = ydata
747 for i in range(1, upsratio):
748 self.ydata[i::upsratio] = \
749 float(i)/upsratio * ydata[:-1] \
750 + float(upsratio-i)/upsratio * ydata[1:]
751 self.deltat = self.deltat/upsratio
753 ratio = deltat/self.deltat
754 rratio = round(ratio)
756 deci_seq = util.decitab(int(rratio))
757 finals = []
758 for i, ndecimate in enumerate(deci_seq):
759 if ndecimate != 1:
760 xinitials = None
761 if initials is not None:
762 xinitials = initials[i]
763 finals.append(self.downsample(
764 ndecimate, snap=snap, initials=xinitials, demean=demean,
765 ftype=ftype))
767 if initials is not None:
768 return finals
770 def resample(self, deltat):
771 '''
772 Resample to given sampling rate ``deltat``.
774 Resampling is performed in the frequency domain.
775 '''
777 ndata = self.ydata.size
778 ntrans = nextpow2(ndata)
779 fntrans2 = ntrans * self.deltat/deltat
780 ntrans2 = int(round(fntrans2))
781 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
782 ndata2 = int(round(ndata*self.deltat/deltat2))
783 if abs(fntrans2 - ntrans2) > 1e-7:
784 logger.warning(
785 'resample: requested deltat %g could not be matched exactly: '
786 '%g' % (deltat, deltat2))
788 data = self.ydata
789 data_pad = num.zeros(ntrans, dtype=float)
790 data_pad[:ndata] = data
791 fdata = num.fft.rfft(data_pad)
792 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
793 n = min(fdata.size, fdata2.size)
794 fdata2[:n] = fdata[:n]
795 data2 = num.fft.irfft(fdata2)
796 data2 = data2[:ndata2]
797 data2 *= float(ntrans2) / float(ntrans)
798 self.deltat = deltat2
799 self.set_ydata(data2)
801 def resample_simple(self, deltat):
802 tyear = 3600*24*365.
804 if deltat == self.deltat:
805 return
807 if abs(self.deltat - deltat) * tyear/deltat < deltat:
808 logger.warning(
809 'resample_simple: less than one sample would have to be '
810 'inserted/deleted per year. Doing nothing.')
811 return
813 ninterval = int(round(deltat / (self.deltat - deltat)))
814 if abs(ninterval) < 20:
815 logger.error(
816 'resample_simple: sample insertion/deletion interval less '
817 'than 20. results would be erroneous.')
818 raise ResamplingFailed()
820 delete = False
821 if ninterval < 0:
822 ninterval = - ninterval
823 delete = True
825 tyearbegin = util.year_start(self.tmin)
827 nmin = int(round((self.tmin - tyearbegin)/deltat))
829 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
830 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
831 if nindices > 0:
832 indices = ibegin + num.arange(nindices) * ninterval
833 data_split = num.split(self.ydata, indices)
834 data = []
835 for ln, h in zip(data_split[:-1], data_split[1:]):
836 if delete:
837 ln = ln[:-1]
839 data.append(ln)
840 if not delete:
841 if ln.size == 0:
842 v = h[0]
843 else:
844 v = 0.5*(ln[-1] + h[0])
845 data.append(num.array([v], dtype=ln.dtype))
847 data.append(data_split[-1])
849 ydata_new = num.concatenate(data)
851 self.tmin = tyearbegin + nmin * deltat
852 self.deltat = deltat
853 self.set_ydata(ydata_new)
855 def stretch(self, tmin_new, tmax_new):
856 '''
857 Stretch signal while preserving sample rate using sinc interpolation.
859 :param tmin_new: new time of first sample
860 :param tmax_new: new time of last sample
862 This method can be used to correct for a small linear time drift or to
863 introduce sub-sample time shifts. The amount of stretching is limited
864 to 10% by the implementation and is expected to be much smaller than
865 that by the approximations used.
866 '''
868 from pyrocko import signal_ext
870 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
871 t_control = num.array([tmin_new, tmax_new], dtype=float)
873 r = (tmax_new - tmin_new) / self.deltat + 1.0
874 n_new = int(round(r))
875 if abs(n_new - r) > 0.001:
876 n_new = int(math.floor(r))
878 assert n_new >= 2
880 tmax_new = tmin_new + (n_new-1) * self.deltat
882 ydata_new = num.empty(n_new, dtype=float)
883 signal_ext.antidrift(i_control, t_control,
884 self.ydata.astype(float),
885 tmin_new, self.deltat, ydata_new)
887 self.tmin = tmin_new
888 self.set_ydata(ydata_new)
889 self._update_ids()
891 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
892 raise_exception=False):
894 '''
895 Check if a given frequency is above the Nyquist frequency of the trace.
897 :param intro: string used to introduce the warning/error message
898 :param warn: whether to emit a warning
899 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
900 exception.
901 '''
903 if frequency >= 0.5/self.deltat:
904 message = '%s (%g Hz) is equal to or higher than nyquist ' \
905 'frequency (%g Hz). (Trace %s)' \
906 % (intro, frequency, 0.5/self.deltat, self.name())
907 if warn:
908 logger.warning(message)
909 if raise_exception:
910 raise AboveNyquist(message)
912 def lowpass(self, order, corner, nyquist_warn=True,
913 nyquist_exception=False, demean=True):
915 '''
916 Apply Butterworth lowpass to the trace.
918 :param order: order of the filter
919 :param corner: corner frequency of the filter
921 Mean is removed before filtering.
922 '''
924 self.nyquist_check(
925 corner, 'Corner frequency of lowpass', nyquist_warn,
926 nyquist_exception)
928 (b, a) = _get_cached_filter_coefs(
929 order, [corner*2.0*self.deltat], btype='low')
931 if len(a) != order+1 or len(b) != order+1:
932 logger.warning(
933 'Erroneous filter coefficients returned by '
934 'scipy.signal.butter(). You may need to downsample the '
935 'signal before filtering.')
937 data = self.ydata.astype(num.float64)
938 if demean:
939 data -= num.mean(data)
940 self.drop_growbuffer()
941 self.ydata = signal.lfilter(b, a, data)
943 def highpass(self, order, corner, nyquist_warn=True,
944 nyquist_exception=False, demean=True):
946 '''
947 Apply butterworth highpass to the trace.
949 :param order: order of the filter
950 :param corner: corner frequency of the filter
952 Mean is removed before filtering.
953 '''
955 self.nyquist_check(
956 corner, 'Corner frequency of highpass', nyquist_warn,
957 nyquist_exception)
959 (b, a) = _get_cached_filter_coefs(
960 order, [corner*2.0*self.deltat], btype='high')
962 data = self.ydata.astype(num.float64)
963 if len(a) != order+1 or len(b) != order+1:
964 logger.warning(
965 'Erroneous filter coefficients returned by '
966 'scipy.signal.butter(). You may need to downsample the '
967 'signal before filtering.')
968 if demean:
969 data -= num.mean(data)
970 self.drop_growbuffer()
971 self.ydata = signal.lfilter(b, a, data)
973 def bandpass(self, order, corner_hp, corner_lp, demean=True):
974 '''
975 Apply butterworth bandpass to the trace.
977 :param order: order of the filter
978 :param corner_hp: lower corner frequency of the filter
979 :param corner_lp: upper corner frequency of the filter
981 Mean is removed before filtering.
982 '''
984 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
985 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
986 (b, a) = _get_cached_filter_coefs(
987 order,
988 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
989 btype='band')
990 data = self.ydata.astype(num.float64)
991 if demean:
992 data -= num.mean(data)
993 self.drop_growbuffer()
994 self.ydata = signal.lfilter(b, a, data)
996 def bandstop(self, order, corner_hp, corner_lp, demean=True):
997 '''
998 Apply bandstop (attenuates frequencies in band) to the trace.
1000 :param order: order of the filter
1001 :param corner_hp: lower corner frequency of the filter
1002 :param corner_lp: upper corner frequency of the filter
1004 Mean is removed before filtering.
1005 '''
1007 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1008 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1009 (b, a) = _get_cached_filter_coefs(
1010 order,
1011 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1012 btype='bandstop')
1013 data = self.ydata.astype(num.float64)
1014 if demean:
1015 data -= num.mean(data)
1016 self.drop_growbuffer()
1017 self.ydata = signal.lfilter(b, a, data)
1019 def envelope(self, inplace=True):
1020 '''
1021 Calculate the envelope of the trace.
1023 :param inplace: calculate envelope in place
1025 The calculation follows:
1027 .. math::
1029 Y' = \\sqrt{Y^2+H(Y)^2}
1031 where H is the Hilbert-Transform of the signal Y.
1032 '''
1034 y = self.ydata.astype(float)
1035 env = num.abs(hilbert(y))
1036 if inplace:
1037 self.drop_growbuffer()
1038 self.ydata = env
1039 else:
1040 tr = self.copy(data=False)
1041 tr.ydata = env
1042 return tr
1044 def taper(self, taperer, inplace=True, chop=False):
1045 '''
1046 Apply a :py:class:`Taper` to the trace.
1048 :param taperer: instance of :py:class:`Taper` subclass
1049 :param inplace: apply taper inplace
1050 :param chop: if ``True``: exclude tapered parts from the resulting
1051 trace
1052 '''
1054 if not inplace:
1055 tr = self.copy()
1056 else:
1057 tr = self
1059 if chop:
1060 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1061 tr.shift(i*tr.deltat)
1062 tr.set_ydata(tr.ydata[i:i+n])
1064 taperer(tr.ydata, tr.tmin, tr.deltat)
1066 if not inplace:
1067 return tr
1069 def whiten(self, order=6):
1070 '''
1071 Whiten signal in time domain using autoregression and recursive filter.
1073 :param order: order of the autoregression process
1074 '''
1076 b, a = self.whitening_coefficients(order)
1077 self.drop_growbuffer()
1078 self.ydata = signal.lfilter(b, a, self.ydata)
1080 def whitening_coefficients(self, order=6):
1081 ar = yulewalker(self.ydata, order)
1082 b, a = [1.] + ar.tolist(), [1.]
1083 return b, a
1085 def ampspec_whiten(
1086 self,
1087 width,
1088 td_taper='auto',
1089 fd_taper='auto',
1090 pad_to_pow2=True,
1091 demean=True):
1093 '''
1094 Whiten signal via frequency domain using moving average on amplitude
1095 spectra.
1097 :param width: width of smoothing kernel [Hz]
1098 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1099 ``None`` or ``'auto'``.
1100 :param fd_taper: frequency domain taper, object of type
1101 :py:class:`Taper` or ``None`` or ``'auto'``.
1102 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1103 of 2^n
1104 :param demean: whether to demean the signal before tapering
1106 The signal is first demeaned and then tapered using ``td_taper``. Then,
1107 the spectrum is calculated and inversely weighted with a smoothed
1108 version of its amplitude spectrum. A moving average is used for the
1109 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1110 Finally, the smoothed and tapered spectrum is back-transformed into the
1111 time domain.
1113 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1114 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1115 '''
1117 ndata = self.data_len()
1119 if pad_to_pow2:
1120 ntrans = nextpow2(ndata)
1121 else:
1122 ntrans = ndata
1124 df = 1./(ntrans*self.deltat)
1125 nw = int(round(width/df))
1126 if ndata//2+1 <= nw:
1127 raise TraceTooShort(
1128 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1130 if td_taper == 'auto':
1131 td_taper = CosFader(1./width)
1133 if fd_taper == 'auto':
1134 fd_taper = CosFader(width)
1136 if td_taper:
1137 self.taper(td_taper)
1139 ydata = self.get_ydata().astype(float)
1140 if demean:
1141 ydata -= ydata.mean()
1143 spec = num.fft.rfft(ydata, ntrans)
1145 amp = num.abs(spec)
1146 nspec = amp.size
1147 csamp = num.cumsum(amp)
1148 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1149 n1, n2 = nw//2, nw//2 + nspec - nw
1150 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1151 amp_smoothed[:n1] = amp_smoothed[n1]
1152 amp_smoothed[n2:] = amp_smoothed[n2-1]
1154 denom = amp_smoothed * amp
1155 numer = amp
1156 eps = num.mean(denom) * 1e-9
1157 if eps == 0.0:
1158 eps = 1e-9
1160 numer += eps
1161 denom += eps
1162 spec *= numer/denom
1164 if fd_taper:
1165 fd_taper(spec, 0., df)
1167 ydata = num.fft.irfft(spec)
1168 self.set_ydata(ydata[:ndata])
1170 def _get_cached_freqs(self, nf, deltaf):
1171 ck = (nf, deltaf)
1172 if ck not in Trace.cached_frequencies:
1173 Trace.cached_frequencies[ck] = deltaf * num.arange(
1174 nf, dtype=float)
1176 return Trace.cached_frequencies[ck]
1178 def bandpass_fft(self, corner_hp, corner_lp):
1179 '''
1180 Apply boxcar bandbpass to trace (in spectral domain).
1181 '''
1183 n = len(self.ydata)
1184 n2 = nextpow2(n)
1185 data = num.zeros(n2, dtype=num.float64)
1186 data[:n] = self.ydata
1187 fdata = num.fft.rfft(data)
1188 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1189 fdata[0] = 0.0
1190 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1191 data = num.fft.irfft(fdata)
1192 self.drop_growbuffer()
1193 self.ydata = data[:n]
1195 def shift(self, tshift):
1196 '''
1197 Time shift the trace.
1198 '''
1200 self.tmin += tshift
1201 self.tmax += tshift
1202 self._update_ids()
1204 def snap(self, inplace=True, interpolate=False):
1205 '''
1206 Shift trace samples to nearest even multiples of the sampling rate.
1208 :param inplace: (boolean) snap traces inplace
1210 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1211 both, the snapped and the original trace is smaller than 0.01 x deltat,
1212 :py:func:`snap` returns the unsnapped instance of the original trace.
1213 '''
1215 tmin = round(self.tmin/self.deltat)*self.deltat
1216 tmax = tmin + (self.ydata.size-1)*self.deltat
1218 if inplace:
1219 xself = self
1220 else:
1221 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1222 abs(self.tmax - tmax) < 1e-2*self.deltat:
1223 return self
1225 xself = self.copy()
1227 if interpolate:
1228 from pyrocko import signal_ext
1229 n = xself.data_len()
1230 ydata_new = num.empty(n, dtype=float)
1231 i_control = num.array([0, n-1], dtype=num.int64)
1232 tref = tmin
1233 t_control = num.array(
1234 [float(xself.tmin-tref), float(xself.tmax-tref)],
1235 dtype=float)
1237 signal_ext.antidrift(i_control, t_control,
1238 xself.ydata.astype(float),
1239 float(tmin-tref), xself.deltat, ydata_new)
1241 xself.ydata = ydata_new
1243 xself.tmin = tmin
1244 xself.tmax = tmax
1245 xself._update_ids()
1247 return xself
1249 def fix_deltat_rounding_errors(self):
1250 '''
1251 Try to undo sampling rate rounding errors.
1253 See :py:func:`fix_deltat_rounding_errors`.
1254 '''
1256 self.deltat = fix_deltat_rounding_errors(self.deltat)
1257 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1259 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1260 '''
1261 Run special STA/LTA filter where the short time window is centered on
1262 the long time window.
1264 :param tshort: length of short time window in [s]
1265 :param tlong: length of long time window in [s]
1266 :param quad: whether to square the data prior to applying the STA/LTA
1267 filter
1268 :param scalingmethod: integer key to select how output values are
1269 scaled / normalized (``1``, ``2``, or ``3``)
1271 ============= ====================================== ===========
1272 Scalingmethod Implementation Range
1273 ============= ====================================== ===========
1274 ``1`` As/Al* Ts/Tl [0,1]
1275 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1276 ``3`` Like ``2`` but clipping range at zero [0,1]
1277 ============= ====================================== ===========
1279 '''
1281 nshort = int(round(tshort/self.deltat))
1282 nlong = int(round(tlong/self.deltat))
1284 assert nshort < nlong
1285 if nlong > len(self.ydata):
1286 raise TraceTooShort(
1287 'Samples in trace: %s, samples needed: %s'
1288 % (len(self.ydata), nlong))
1290 if quad:
1291 sqrdata = self.ydata**2
1292 else:
1293 sqrdata = self.ydata
1295 mavg_short = moving_avg(sqrdata, nshort)
1296 mavg_long = moving_avg(sqrdata, nlong)
1298 self.drop_growbuffer()
1300 if scalingmethod not in (1, 2, 3):
1301 raise Exception('Invalid argument to scalingrange argument.')
1303 if scalingmethod == 1:
1304 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1305 elif scalingmethod in (2, 3):
1306 self.ydata = (mavg_short/mavg_long - 1.) \
1307 / ((float(nlong)/float(nshort)) - 1)
1309 if scalingmethod == 3:
1310 self.ydata = num.maximum(self.ydata, 0.)
1312 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1313 '''
1314 Run special STA/LTA filter where the short time window is overlapping
1315 with the last part of the long time window.
1317 :param tshort: length of short time window in [s]
1318 :param tlong: length of long time window in [s]
1319 :param quad: whether to square the data prior to applying the STA/LTA
1320 filter
1321 :param scalingmethod: integer key to select how output values are
1322 scaled / normalized (``1``, ``2``, or ``3``)
1324 ============= ====================================== ===========
1325 Scalingmethod Implementation Range
1326 ============= ====================================== ===========
1327 ``1`` As/Al* Ts/Tl [0,1]
1328 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1329 ``3`` Like ``2`` but clipping range at zero [0,1]
1330 ============= ====================================== ===========
1332 With ``scalingmethod=1``, the values produced by this variant of the
1333 STA/LTA are equivalent to
1335 .. math::
1336 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1337 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1339 where :math:`f_j` are the input samples, :math:`s` are the number of
1340 samples in the short time window and :math:`l` are the number of
1341 samples in the long time window.
1342 '''
1344 n = self.data_len()
1345 tmin = self.tmin
1347 nshort = max(1, int(round(tshort/self.deltat)))
1348 nlong = max(1, int(round(tlong/self.deltat)))
1350 assert nshort < nlong
1352 if nlong > len(self.ydata):
1353 raise TraceTooShort(
1354 'Samples in trace: %s, samples needed: %s'
1355 % (len(self.ydata), nlong))
1357 if quad:
1358 sqrdata = self.ydata**2
1359 else:
1360 sqrdata = self.ydata
1362 nshift = int(math.floor(0.5 * (nlong - nshort)))
1363 if nlong % 2 != 0 and nshort % 2 == 0:
1364 nshift += 1
1366 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1367 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1369 self.drop_growbuffer()
1371 if scalingmethod not in (1, 2, 3):
1372 raise Exception('Invalid argument to scalingrange argument.')
1374 if scalingmethod == 1:
1375 ydata = mavg_short/mavg_long * nshort/nlong
1376 elif scalingmethod in (2, 3):
1377 ydata = (mavg_short/mavg_long - 1.) \
1378 / ((float(nlong)/float(nshort)) - 1)
1380 if scalingmethod == 3:
1381 ydata = num.maximum(self.ydata, 0.)
1383 self.set_ydata(ydata)
1385 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1387 self.chop(
1388 tmin + (nlong - nshort) * self.deltat,
1389 tmin + (n - nshort) * self.deltat)
1391 def peaks(self, threshold, tsearch,
1392 deadtime=False,
1393 nblock_duration_detection=100):
1395 '''
1396 Detect peaks above a given threshold (method 1).
1398 From every instant, where the signal rises above ``threshold``, a time
1399 length of ``tsearch`` seconds is searched for a maximum. A list with
1400 tuples (time, value) for each detected peak is returned. The
1401 ``deadtime`` argument turns on a special deadtime duration detection
1402 algorithm useful in combination with recursive STA/LTA filters.
1403 '''
1405 y = self.ydata
1406 above = num.where(y > threshold, 1, 0)
1407 deriv = num.zeros(y.size, dtype=num.int8)
1408 deriv[1:] = above[1:]-above[:-1]
1409 itrig_positions = num.nonzero(deriv > 0)[0]
1410 tpeaks = []
1411 apeaks = []
1412 tzeros = []
1413 tzero = self.tmin
1415 for itrig_pos in itrig_positions:
1416 ibeg = itrig_pos
1417 iend = min(
1418 len(self.ydata),
1419 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1420 ipeak = num.argmax(y[ibeg:iend])
1421 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1422 apeak = y[ibeg+ipeak]
1424 if tpeak < tzero:
1425 continue
1427 if deadtime:
1428 ibeg = itrig_pos
1429 iblock = 0
1430 nblock = nblock_duration_detection
1431 totalsum = 0.
1432 while True:
1433 if ibeg+iblock*nblock >= len(y):
1434 tzero = self.tmin + (len(y)-1) * self.deltat
1435 break
1437 logy = num.log(
1438 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1439 logy[0] += totalsum
1440 ysum = num.cumsum(logy)
1441 totalsum = ysum[-1]
1442 below = num.where(ysum <= 0., 1, 0)
1443 deriv = num.zeros(ysum.size, dtype=num.int8)
1444 deriv[1:] = below[1:]-below[:-1]
1445 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1446 if len(izero_positions) > 0:
1447 tzero = self.tmin + self.deltat * (
1448 ibeg + izero_positions[0])
1449 break
1450 iblock += 1
1451 else:
1452 tzero = ibeg*self.deltat + self.tmin + tsearch
1454 tpeaks.append(tpeak)
1455 apeaks.append(apeak)
1456 tzeros.append(tzero)
1458 if deadtime:
1459 return tpeaks, apeaks, tzeros
1460 else:
1461 return tpeaks, apeaks
1463 def peaks2(self, threshold, tsearch):
1465 '''
1466 Detect peaks above a given threshold (method 2).
1468 This variant of peak detection is a bit more robust (and slower) than
1469 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1470 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1471 iteratively the one with the maximum amplitude ``a[j]`` and time
1472 ``t[j]`` is choosen and potential peaks within
1473 ``t[j] - tsearch, t[j] + tsearch``
1474 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1475 no more potential peaks are left.
1476 '''
1478 a = num.copy(self.ydata)
1480 amin = num.min(a)
1482 a[0] = amin
1483 a[1: -1][num.diff(a, 2) <= 0.] = amin
1484 a[-1] = amin
1486 data = []
1487 while True:
1488 imax = num.argmax(a)
1489 amax = a[imax]
1491 if amax < threshold or amax == amin:
1492 break
1494 data.append((self.tmin + imax * self.deltat, amax))
1496 ntsearch = int(round(tsearch / self.deltat))
1497 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1499 if data:
1500 data.sort()
1501 tpeaks, apeaks = list(zip(*data))
1502 else:
1503 tpeaks, apeaks = [], []
1505 return tpeaks, apeaks
1507 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1508 '''
1509 Extend trace to given span.
1511 :param tmin: begin time of new span
1512 :param tmax: end time of new span
1513 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1514 ``'median'``
1515 '''
1517 nold = self.ydata.size
1519 if tmin is not None:
1520 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1521 else:
1522 nl = 0
1524 if tmax is not None:
1525 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1526 else:
1527 nh = nold - 1
1529 n = nh - nl + 1
1530 data = num.zeros(n, dtype=self.ydata.dtype)
1531 data[-nl:-nl + nold] = self.ydata
1532 if self.ydata.size >= 1:
1533 if fillmethod == 'repeat':
1534 data[:-nl] = self.ydata[0]
1535 data[-nl + nold:] = self.ydata[-1]
1536 elif fillmethod == 'median':
1537 v = num.median(self.ydata)
1538 data[:-nl] = v
1539 data[-nl + nold:] = v
1540 elif fillmethod == 'mean':
1541 v = num.mean(self.ydata)
1542 data[:-nl] = v
1543 data[-nl + nold:] = v
1545 self.drop_growbuffer()
1546 self.ydata = data
1548 self.tmin += nl * self.deltat
1549 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1551 self._update_ids()
1553 def transfer(self,
1554 tfade=0.,
1555 freqlimits=None,
1556 transfer_function=None,
1557 cut_off_fading=True,
1558 demean=True,
1559 invert=False):
1561 '''
1562 Return new trace with transfer function applied (convolution).
1564 :param tfade: rise/fall time in seconds of taper applied in timedomain
1565 at both ends of trace.
1566 :param freqlimits: 4-tuple with corner frequencies in Hz.
1567 :param transfer_function: FrequencyResponse object; must provide a
1568 method 'evaluate(freqs)', which returns the transfer function
1569 coefficients at the frequencies 'freqs'.
1570 :param cut_off_fading: whether to cut off rise/fall interval in output
1571 trace.
1572 :param demean: remove mean before applying transfer function
1573 :param invert: set to True to do a deconvolution
1574 '''
1576 if transfer_function is None:
1577 transfer_function = FrequencyResponse()
1579 if self.tmax - self.tmin <= tfade*2.:
1580 raise TraceTooShort(
1581 'Trace %s.%s.%s.%s too short for fading length setting. '
1582 'trace length = %g, fading length = %g'
1583 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1585 if freqlimits is None and (
1586 transfer_function is None or transfer_function.is_scalar()):
1588 # special case for flat responses
1590 output = self.copy()
1591 data = self.ydata
1592 ndata = data.size
1594 if transfer_function is not None:
1595 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1597 if invert:
1598 c = 1.0/c
1600 data *= c
1602 if tfade != 0.0:
1603 data *= costaper(
1604 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1605 ndata, self.deltat)
1607 output.ydata = data
1609 else:
1610 ndata = self.ydata.size
1611 ntrans = nextpow2(ndata*1.2)
1612 coefs = self._get_tapered_coefs(
1613 ntrans, freqlimits, transfer_function, invert=invert)
1615 data = self.ydata
1617 data_pad = num.zeros(ntrans, dtype=float)
1618 data_pad[:ndata] = data
1619 if demean:
1620 data_pad[:ndata] -= data.mean()
1622 if tfade != 0.0:
1623 data_pad[:ndata] *= costaper(
1624 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1625 ndata, self.deltat)
1627 fdata = num.fft.rfft(data_pad)
1628 fdata *= coefs
1629 ddata = num.fft.irfft(fdata)
1630 output = self.copy()
1631 output.ydata = ddata[:ndata]
1633 if cut_off_fading and tfade != 0.0:
1634 try:
1635 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1636 except NoData:
1637 raise TraceTooShort(
1638 'Trace %s.%s.%s.%s too short for fading length setting. '
1639 'trace length = %g, fading length = %g'
1640 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1641 else:
1642 output.ydata = output.ydata.copy()
1644 return output
1646 def differentiate(self, n=1, order=4, inplace=True):
1647 '''
1648 Approximate first or second derivative of the trace.
1650 :param n: 1 for first derivative, 2 for second
1651 :param order: order of the approximation 2 and 4 are supported
1652 :param inplace: if ``True`` the trace is differentiated in place,
1653 otherwise a new trace object with the derivative is returned.
1655 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1657 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1658 '''
1660 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1662 if inplace:
1663 self.ydata = ddata
1664 else:
1665 output = self.copy(data=False)
1666 output.set_ydata(ddata)
1667 return output
1669 def drop_chain_cache(self):
1670 if self._pchain:
1671 self._pchain.clear()
1673 def init_chain(self):
1674 self._pchain = pchain.Chain(
1675 do_downsample,
1676 do_extend,
1677 do_pre_taper,
1678 do_fft,
1679 do_filter,
1680 do_ifft)
1682 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1683 if setup.domain == 'frequency_domain':
1684 _, _, data = self._pchain(
1685 (self, deltat),
1686 (tmin, tmax),
1687 (setup.taper,),
1688 (setup.filter,),
1689 (setup.filter,),
1690 nocache=nocache)
1692 return num.abs(data), num.abs(data)
1694 else:
1695 processed = self._pchain(
1696 (self, deltat),
1697 (tmin, tmax),
1698 (setup.taper,),
1699 (setup.filter,),
1700 (setup.filter,),
1701 (),
1702 nocache=nocache)
1704 if setup.domain == 'time_domain':
1705 data = processed.get_ydata()
1707 elif setup.domain == 'envelope':
1708 processed = processed.envelope(inplace=False)
1710 elif setup.domain == 'absolute':
1711 processed.set_ydata(num.abs(processed.get_ydata()))
1713 return processed.get_ydata(), processed
1715 def misfit(self, candidate, setup, nocache=False, debug=False):
1716 '''
1717 Calculate misfit and normalization factor against candidate trace.
1719 :param candidate: :py:class:`Trace` object
1720 :param setup: :py:class:`MisfitSetup` object
1721 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1722 normalization divisor
1724 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1725 with the higher sampling rate will be downsampled.
1726 '''
1728 a = self
1729 b = candidate
1731 for tr in (a, b):
1732 if not tr._pchain:
1733 tr.init_chain()
1735 deltat = max(a.deltat, b.deltat)
1736 tmin = min(a.tmin, b.tmin) - deltat
1737 tmax = max(a.tmax, b.tmax) + deltat
1739 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1740 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1742 if setup.domain != 'cc_max_norm':
1743 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1744 else:
1745 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1746 ccmax = ctr.max()[1]
1747 m = 0.5 - 0.5 * ccmax
1748 n = 0.5
1750 if debug:
1751 return m, n, aproc, bproc
1752 else:
1753 return m, n
1755 def spectrum(self, pad_to_pow2=False, tfade=None):
1756 '''
1757 Get FFT spectrum of trace.
1759 :param pad_to_pow2: whether to zero-pad the data to next larger
1760 power-of-two length
1761 :param tfade: ``None`` or a time length in seconds, to apply cosine
1762 shaped tapers to both
1764 :returns: a tuple with (frequencies, values)
1765 '''
1767 ndata = self.ydata.size
1769 if pad_to_pow2:
1770 ntrans = nextpow2(ndata)
1771 else:
1772 ntrans = ndata
1774 if tfade is None:
1775 ydata = self.ydata
1776 else:
1777 ydata = self.ydata * costaper(
1778 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1779 ndata, self.deltat)
1781 fydata = num.fft.rfft(ydata, ntrans)
1782 df = 1./(ntrans*self.deltat)
1783 fxdata = num.arange(len(fydata))*df
1784 return fxdata, fydata
1786 def multi_filter(self, filter_freqs, bandwidth):
1788 class Gauss(FrequencyResponse):
1789 f0 = Float.T()
1790 a = Float.T(default=1.0)
1792 def __init__(self, f0, a=1.0, **kwargs):
1793 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1795 def evaluate(self, freqs):
1796 omega0 = 2.*math.pi*self.f0
1797 omega = 2.*math.pi*freqs
1798 return num.exp(-((omega-omega0)
1799 / (self.a*omega0))**2)
1801 freqs, coefs = self.spectrum()
1802 n = self.data_len()
1803 nfilt = len(filter_freqs)
1804 signal_tf = num.zeros((nfilt, n))
1805 centroid_freqs = num.zeros(nfilt)
1806 for ifilt, f0 in enumerate(filter_freqs):
1807 taper = Gauss(f0, a=bandwidth)
1808 weights = taper.evaluate(freqs)
1809 nhalf = freqs.size
1810 analytic_spec = num.zeros(n, dtype=complex)
1811 analytic_spec[:nhalf] = coefs*weights
1813 enorm = num.abs(analytic_spec[:nhalf])**2
1814 enorm /= num.sum(enorm)
1816 if n % 2 == 0:
1817 analytic_spec[1:nhalf-1] *= 2.
1818 else:
1819 analytic_spec[1:nhalf] *= 2.
1821 analytic = num.fft.ifft(analytic_spec)
1822 signal_tf[ifilt, :] = num.abs(analytic)
1824 enorm = num.abs(analytic_spec[:nhalf])**2
1825 enorm /= num.sum(enorm)
1826 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1828 return centroid_freqs, signal_tf
1830 def _get_tapered_coefs(
1831 self, ntrans, freqlimits, transfer_function, invert=False):
1833 deltaf = 1./(self.deltat*ntrans)
1834 nfreqs = ntrans//2 + 1
1835 transfer = num.ones(nfreqs, dtype=complex)
1836 hi = snapper(nfreqs, deltaf)
1837 if freqlimits is not None:
1838 a, b, c, d = freqlimits
1839 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1840 + hi(a)*deltaf
1842 if invert:
1843 coeffs = transfer_function.evaluate(freqs)
1844 if num.any(coeffs == 0.0):
1845 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1847 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1848 else:
1849 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1851 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1852 else:
1853 if invert:
1854 raise Exception(
1855 'transfer: `freqlimits` must be given when `invert` is '
1856 'set to `True`')
1858 freqs = num.arange(nfreqs) * deltaf
1859 tapered_transfer = transfer_function.evaluate(freqs)
1861 tapered_transfer[0] = 0.0 # don't introduce static offsets
1862 return tapered_transfer
1864 def fill_template(self, template, **additional):
1865 '''
1866 Fill string template with trace metadata.
1868 Uses normal python '%(placeholder)s' string templates. The following
1869 placeholders are considered: ``network``, ``station``, ``location``,
1870 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1871 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1872 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1873 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1874 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1875 microseconds, those with ``'_year'`` contain only the year.
1876 '''
1878 template = template.replace('%n', '%(network)s')\
1879 .replace('%s', '%(station)s')\
1880 .replace('%l', '%(location)s')\
1881 .replace('%c', '%(channel)s')\
1882 .replace('%b', '%(tmin)s')\
1883 .replace('%e', '%(tmax)s')\
1884 .replace('%j', '%(julianday)s')
1886 params = dict(
1887 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1888 params['tmin'] = util.time_to_str(
1889 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1890 params['tmax'] = util.time_to_str(
1891 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1892 params['tmin_ms'] = util.time_to_str(
1893 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1894 params['tmax_ms'] = util.time_to_str(
1895 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1896 params['tmin_us'] = util.time_to_str(
1897 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1898 params['tmax_us'] = util.time_to_str(
1899 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1900 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1901 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1902 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1903 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1904 params['julianday'] = util.julian_day_of_year(self.tmin)
1905 params.update(additional)
1906 return template % params
1908 def plot(self):
1909 '''
1910 Show trace with matplotlib.
1912 See also: :py:meth:`Trace.snuffle`.
1913 '''
1915 import pylab
1916 pylab.plot(self.get_xdata(), self.get_ydata())
1917 name = '%s %s %s - %s' % (
1918 self.channel,
1919 self.station,
1920 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)),
1921 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax)))
1923 pylab.title(name)
1924 pylab.show()
1926 def snuffle(self, **kwargs):
1927 '''
1928 Show trace in a snuffler window.
1930 :param stations: list of `pyrocko.model.Station` objects or ``None``
1931 :param events: list of `pyrocko.model.Event` objects or ``None``
1932 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1933 :param ntracks: float, number of tracks to be shown initially (default:
1934 12)
1935 :param follow: time interval (in seconds) for real time follow mode or
1936 ``None``
1937 :param controls: bool, whether to show the main controls (default:
1938 ``True``)
1939 :param opengl: bool, whether to use opengl (default: ``False``)
1940 '''
1942 return snuffle([self], **kwargs)
1945def snuffle(traces, **kwargs):
1946 '''
1947 Show traces in a snuffler window.
1949 :param stations: list of `pyrocko.model.Station` objects or ``None``
1950 :param events: list of `pyrocko.model.Event` objects or ``None``
1951 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1952 :param ntracks: float, number of tracks to be shown initially (default: 12)
1953 :param follow: time interval (in seconds) for real time follow mode or
1954 ``None``
1955 :param controls: bool, whether to show the main controls (default:
1956 ``True``)
1957 :param opengl: bool, whether to use opengl (default: ``False``)
1958 '''
1960 from pyrocko import pile
1961 from pyrocko.gui import snuffler
1962 p = pile.Pile()
1963 if traces:
1964 trf = pile.MemTracesFile(None, traces)
1965 p.add_file(trf)
1966 return snuffler.snuffle(p, **kwargs)
1969class InfiniteResponse(Exception):
1970 '''
1971 This exception is raised by :py:class:`Trace` operations when deconvolution
1972 of a frequency response (instrument response transfer function) would
1973 result in a division by zero.
1974 '''
1977class MisalignedTraces(Exception):
1978 '''
1979 This exception is raised by some :py:class:`Trace` operations when tmin,
1980 tmax or number of samples do not match.
1981 '''
1983 pass
1986class NoData(Exception):
1987 '''
1988 This exception is raised by some :py:class:`Trace` operations when no or
1989 not enough data is available.
1990 '''
1992 pass
1995class AboveNyquist(Exception):
1996 '''
1997 This exception is raised by some :py:class:`Trace` operations when given
1998 frequencies are above the Nyquist frequency.
1999 '''
2001 pass
2004class TraceTooShort(Exception):
2005 '''
2006 This exception is raised by some :py:class:`Trace` operations when the
2007 trace is too short.
2008 '''
2010 pass
2013class ResamplingFailed(Exception):
2014 pass
2017def minmax(traces, key=None, mode='minmax'):
2019 '''
2020 Get data range given traces grouped by selected pattern.
2022 :param key: a callable which takes as single argument a trace and returns a
2023 key for the grouping of the results. If this is ``None``, the default,
2024 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2025 used.
2026 :param mode: 'minmax' or floating point number. If this is 'minmax',
2027 minimum and maximum of the traces are used, if it is a number, mean +-
2028 standard deviation times ``mode`` is used.
2030 :returns: a dict with the combined data ranges.
2032 Examples::
2034 ranges = minmax(traces, lambda tr: tr.channel)
2035 print ranges['N'] # print min & max of all traces with channel == 'N'
2036 print ranges['E'] # print min & max of all traces with channel == 'E'
2038 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2039 print ranges['GR', 'HAM3'] # print min & max of all traces with
2040 # network == 'GR' and station == 'HAM3'
2042 ranges = minmax(traces, lambda tr: None)
2043 print ranges[None] # prints min & max of all traces
2044 '''
2046 if key is None:
2047 key = _default_key
2049 ranges = {}
2050 for trace in traces:
2051 if isinstance(mode, str) and mode == 'minmax':
2052 mi, ma = trace.ydata.min(), trace.ydata.max()
2053 else:
2054 mean = trace.ydata.mean()
2055 std = trace.ydata.std()
2056 mi, ma = mean-std*mode, mean+std*mode
2058 k = key(trace)
2059 if k not in ranges:
2060 ranges[k] = mi, ma
2061 else:
2062 tmi, tma = ranges[k]
2063 ranges[k] = min(tmi, mi), max(tma, ma)
2065 return ranges
2068def minmaxtime(traces, key=None):
2070 '''
2071 Get time range given traces grouped by selected pattern.
2073 :param key: a callable which takes as single argument a trace and returns a
2074 key for the grouping of the results. If this is ``None``, the default,
2075 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2076 used.
2078 :returns: a dict with the combined data ranges.
2079 '''
2081 if key is None:
2082 key = _default_key
2084 ranges = {}
2085 for trace in traces:
2086 mi, ma = trace.tmin, trace.tmax
2087 k = key(trace)
2088 if k not in ranges:
2089 ranges[k] = mi, ma
2090 else:
2091 tmi, tma = ranges[k]
2092 ranges[k] = min(tmi, mi), max(tma, ma)
2094 return ranges
2097def degapper(
2098 traces,
2099 maxgap=5,
2100 fillmethod='interpolate',
2101 deoverlap='use_second',
2102 maxlap=None):
2104 '''
2105 Try to connect traces and remove gaps.
2107 This method will combine adjacent traces, which match in their network,
2108 station, location and channel attributes. Overlapping parts are handled
2109 according to the ``deoverlap`` argument.
2111 :param traces: input traces, must be sorted by their full_id attribute.
2112 :param maxgap: maximum number of samples to interpolate.
2113 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2114 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2115 second trace (default), 'use_first' to use data from first trace,
2116 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2117 values.
2118 :param maxlap: maximum number of samples of overlap which are removed
2120 :returns: list of traces
2121 '''
2123 in_traces = traces
2124 out_traces = []
2125 if not in_traces:
2126 return out_traces
2127 out_traces.append(in_traces.pop(0))
2128 while in_traces:
2130 a = out_traces[-1]
2131 b = in_traces.pop(0)
2133 avirt, bvirt = a.ydata is None, b.ydata is None
2134 assert avirt == bvirt, \
2135 'traces given to degapper() must either all have data or have ' \
2136 'no data.'
2138 virtual = avirt and bvirt
2140 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2141 and a.data_len() >= 1 and b.data_len() >= 1
2142 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2144 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2145 idist = int(round(dist))
2146 if abs(dist - idist) > 0.05 and idist <= maxgap:
2147 # logger.warning('Cannot degap traces with displaced sampling '
2148 # '(%s, %s, %s, %s)' % a.nslc_id)
2149 pass
2150 else:
2151 if 1 < idist <= maxgap:
2152 if not virtual:
2153 if fillmethod == 'interpolate':
2154 filler = a.ydata[-1] + (
2155 ((1.0 + num.arange(idist-1, dtype=float))
2156 / idist) * (b.ydata[0]-a.ydata[-1])
2157 ).astype(a.ydata.dtype)
2158 elif fillmethod == 'zeros':
2159 filler = num.zeros(idist-1, dtype=a.ydist.dtype)
2160 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2161 a.tmax = b.tmax
2162 if a.mtime and b.mtime:
2163 a.mtime = max(a.mtime, b.mtime)
2164 continue
2166 elif idist == 1:
2167 if not virtual:
2168 a.ydata = num.concatenate((a.ydata, b.ydata))
2169 a.tmax = b.tmax
2170 if a.mtime and b.mtime:
2171 a.mtime = max(a.mtime, b.mtime)
2172 continue
2174 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2175 if b.tmax > a.tmax:
2176 if not virtual:
2177 na = a.ydata.size
2178 n = -idist+1
2179 if deoverlap == 'use_second':
2180 a.ydata = num.concatenate(
2181 (a.ydata[:-n], b.ydata))
2182 elif deoverlap in ('use_first', 'crossfade_cos'):
2183 a.ydata = num.concatenate(
2184 (a.ydata, b.ydata[n:]))
2185 elif deoverlap == 'add':
2186 a.ydata[-n:] += b.ydata[:n]
2187 a.ydata = num.concatenate(
2188 (a.ydata, b.ydata[n:]))
2189 else:
2190 assert False, 'unknown deoverlap method'
2192 if deoverlap == 'crossfade_cos':
2193 n = -idist+1
2194 taper = 0.5-0.5*num.cos(
2195 (1.+num.arange(n))/(1.+n)*num.pi)
2196 a.ydata[na-n:na] *= 1.-taper
2197 a.ydata[na-n:na] += b.ydata[:n] * taper
2199 a.tmax = b.tmax
2200 if a.mtime and b.mtime:
2201 a.mtime = max(a.mtime, b.mtime)
2202 continue
2203 else:
2204 # make short second trace vanish
2205 continue
2207 if b.data_len() >= 1:
2208 out_traces.append(b)
2210 for tr in out_traces:
2211 tr._update_ids()
2213 return out_traces
2216def rotate(traces, azimuth, in_channels, out_channels):
2217 '''
2218 2D rotation of traces.
2220 :param traces: list of input traces
2221 :param azimuth: difference of the azimuths of the component directions
2222 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2223 :param in_channels: names of the input channels (e.g. 'N', 'E')
2224 :param out_channels: names of the output channels (e.g. 'R', 'T')
2225 :returns: list of rotated traces
2226 '''
2228 phi = azimuth/180.*math.pi
2229 cphi = math.cos(phi)
2230 sphi = math.sin(phi)
2231 rotated = []
2232 in_channels = tuple(_channels_to_names(in_channels))
2233 out_channels = tuple(_channels_to_names(out_channels))
2234 for a in traces:
2235 for b in traces:
2236 if ((a.channel, b.channel) == in_channels and
2237 a.nslc_id[:3] == b.nslc_id[:3] and
2238 abs(a.deltat-b.deltat) < a.deltat*0.001):
2239 tmin = max(a.tmin, b.tmin)
2240 tmax = min(a.tmax, b.tmax)
2242 if tmin < tmax:
2243 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2244 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2245 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2246 logger.warning(
2247 'Cannot rotate traces with displaced sampling '
2248 '(%s, %s, %s, %s)' % a.nslc_id)
2249 continue
2251 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2252 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2253 ac.set_ydata(acydata)
2254 bc.set_ydata(bcydata)
2256 ac.set_codes(channel=out_channels[0])
2257 bc.set_codes(channel=out_channels[1])
2258 rotated.append(ac)
2259 rotated.append(bc)
2261 return rotated
2264def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2265 azimuth = orthodrome.azimuth(receiver, source) + 180.
2266 in_channels = n.channel, e.channel
2267 out = rotate(
2268 [n, e], azimuth,
2269 in_channels=in_channels,
2270 out_channels=out_channels)
2272 assert len(out) == 2
2273 for tr in out:
2274 if tr.channel == 'R':
2275 r = tr
2276 elif tr.channel == 'T':
2277 t = tr
2279 return r, t
2282def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2283 out_channels=('L', 'Q', 'T')):
2284 '''
2285 Rotate traces from ZNE to LQT system.
2287 :param traces: list of traces in arbitrary order
2288 :param backazimuth: backazimuth in degrees clockwise from north
2289 :param incidence: incidence angle in degrees from vertical
2290 :param in_channels: input channel names
2291 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2292 :returns: list of transformed traces
2293 '''
2294 i = incidence/180.*num.pi
2295 b = backazimuth/180.*num.pi
2297 ci = num.cos(i)
2298 cb = num.cos(b)
2299 si = num.sin(i)
2300 sb = num.sin(b)
2302 rotmat = num.array(
2303 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2304 return project(traces, rotmat, in_channels, out_channels)
2307def _decompose(a):
2308 '''
2309 Decompose matrix into independent submatrices.
2310 '''
2312 def depends(iout, a):
2313 row = a[iout, :]
2314 return set(num.arange(row.size).compress(row != 0.0))
2316 def provides(iin, a):
2317 col = a[:, iin]
2318 return set(num.arange(col.size).compress(col != 0.0))
2320 a = num.asarray(a)
2321 outs = set(range(a.shape[0]))
2322 systems = []
2323 while outs:
2324 iout = outs.pop()
2326 gout = set()
2327 for iin in depends(iout, a):
2328 gout.update(provides(iin, a))
2330 if not gout:
2331 continue
2333 gin = set()
2334 for iout2 in gout:
2335 gin.update(depends(iout2, a))
2337 if not gin:
2338 continue
2340 for iout2 in gout:
2341 if iout2 in outs:
2342 outs.remove(iout2)
2344 gin = list(gin)
2345 gin.sort()
2346 gout = list(gout)
2347 gout.sort()
2349 systems.append((gin, gout, a[gout, :][:, gin]))
2351 return systems
2354def _channels_to_names(channels):
2355 names = []
2356 for ch in channels:
2357 if isinstance(ch, model.Channel):
2358 names.append(ch.name)
2359 else:
2360 names.append(ch)
2361 return names
2364def project(traces, matrix, in_channels, out_channels):
2365 '''
2366 Affine transform of three-component traces.
2368 Compute matrix-vector product of three-component traces, to e.g. rotate
2369 traces into a different basis. The traces are distinguished and ordered by
2370 their channel attribute. The tranform is applied to overlapping parts of
2371 any appropriate combinations of the input traces. This should allow this
2372 function to be robust with data gaps. It also tries to apply the
2373 tranformation to subsets of the channels, if this is possible, so that, if
2374 for example a vertical compontent is missing, horizontal components can
2375 still be rotated.
2377 :param traces: list of traces in arbitrary order
2378 :param matrix: tranformation matrix
2379 :param in_channels: input channel names
2380 :param out_channels: output channel names
2381 :returns: list of transformed traces
2382 '''
2384 in_channels = tuple(_channels_to_names(in_channels))
2385 out_channels = tuple(_channels_to_names(out_channels))
2386 systems = _decompose(matrix)
2388 # fallback to full matrix if some are not quadratic
2389 for iins, iouts, submatrix in systems:
2390 if submatrix.shape[0] != submatrix.shape[1]:
2391 return _project3(traces, matrix, in_channels, out_channels)
2393 projected = []
2394 for iins, iouts, submatrix in systems:
2395 in_cha = tuple([in_channels[iin] for iin in iins])
2396 out_cha = tuple([out_channels[iout] for iout in iouts])
2397 if submatrix.shape[0] == 1:
2398 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2399 elif submatrix.shape[1] == 2:
2400 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2401 else:
2402 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2404 return projected
2407def project_dependencies(matrix, in_channels, out_channels):
2408 '''
2409 Figure out what dependencies project() would produce.
2410 '''
2412 in_channels = tuple(_channels_to_names(in_channels))
2413 out_channels = tuple(_channels_to_names(out_channels))
2414 systems = _decompose(matrix)
2416 subpro = []
2417 for iins, iouts, submatrix in systems:
2418 if submatrix.shape[0] != submatrix.shape[1]:
2419 subpro.append((matrix, in_channels, out_channels))
2421 if not subpro:
2422 for iins, iouts, submatrix in systems:
2423 in_cha = tuple([in_channels[iin] for iin in iins])
2424 out_cha = tuple([out_channels[iout] for iout in iouts])
2425 subpro.append((submatrix, in_cha, out_cha))
2427 deps = {}
2428 for mat, in_cha, out_cha in subpro:
2429 for oc in out_cha:
2430 if oc not in deps:
2431 deps[oc] = []
2433 for ic in in_cha:
2434 deps[oc].append(ic)
2436 return deps
2439def _project1(traces, matrix, in_channels, out_channels):
2440 assert len(in_channels) == 1
2441 assert len(out_channels) == 1
2442 assert matrix.shape == (1, 1)
2444 projected = []
2445 for a in traces:
2446 if not (a.channel,) == in_channels:
2447 continue
2449 ac = a.copy()
2450 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2451 ac.set_codes(channel=out_channels[0])
2452 projected.append(ac)
2454 return projected
2457def _project2(traces, matrix, in_channels, out_channels):
2458 assert len(in_channels) == 2
2459 assert len(out_channels) == 2
2460 assert matrix.shape == (2, 2)
2461 projected = []
2462 for a in traces:
2463 for b in traces:
2464 if not ((a.channel, b.channel) == in_channels and
2465 a.nslc_id[:3] == b.nslc_id[:3] and
2466 abs(a.deltat-b.deltat) < a.deltat*0.001):
2467 continue
2469 tmin = max(a.tmin, b.tmin)
2470 tmax = min(a.tmax, b.tmax)
2472 if tmin > tmax:
2473 continue
2475 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2476 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2477 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2478 logger.warning(
2479 'Cannot project traces with displaced sampling '
2480 '(%s, %s, %s, %s)' % a.nslc_id)
2481 continue
2483 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2484 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2486 ac.set_ydata(acydata)
2487 bc.set_ydata(bcydata)
2489 ac.set_codes(channel=out_channels[0])
2490 bc.set_codes(channel=out_channels[1])
2492 projected.append(ac)
2493 projected.append(bc)
2495 return projected
2498def _project3(traces, matrix, in_channels, out_channels):
2499 assert len(in_channels) == 3
2500 assert len(out_channels) == 3
2501 assert matrix.shape == (3, 3)
2502 projected = []
2503 for a in traces:
2504 for b in traces:
2505 for c in traces:
2506 if not ((a.channel, b.channel, c.channel) == in_channels
2507 and a.nslc_id[:3] == b.nslc_id[:3]
2508 and b.nslc_id[:3] == c.nslc_id[:3]
2509 and abs(a.deltat-b.deltat) < a.deltat*0.001
2510 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2512 continue
2514 tmin = max(a.tmin, b.tmin, c.tmin)
2515 tmax = min(a.tmax, b.tmax, c.tmax)
2517 if tmin >= tmax:
2518 continue
2520 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2521 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2522 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2523 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2524 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2526 logger.warning(
2527 'Cannot project traces with displaced sampling '
2528 '(%s, %s, %s, %s)' % a.nslc_id)
2529 continue
2531 acydata = num.dot(
2532 matrix[0],
2533 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2534 bcydata = num.dot(
2535 matrix[1],
2536 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2537 ccydata = num.dot(
2538 matrix[2],
2539 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2541 ac.set_ydata(acydata)
2542 bc.set_ydata(bcydata)
2543 cc.set_ydata(ccydata)
2545 ac.set_codes(channel=out_channels[0])
2546 bc.set_codes(channel=out_channels[1])
2547 cc.set_codes(channel=out_channels[2])
2549 projected.append(ac)
2550 projected.append(bc)
2551 projected.append(cc)
2553 return projected
2556def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2557 '''
2558 Cross correlation of two traces.
2560 :param a,b: input traces
2561 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2562 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2563 :param use_fft: bool, whether to do cross correlation in spectral domain
2565 :returns: trace containing cross correlation coefficients
2567 This function computes the cross correlation between two traces. It
2568 evaluates the discrete equivalent of
2570 .. math::
2572 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2574 where the star denotes complex conjugate. Note, that the arguments here are
2575 swapped when compared with the :py:func:`numpy.correlate` function,
2576 which is internally called. This function should be safe even with older
2577 versions of NumPy, where the correlate function has some problems.
2579 A trace containing the cross correlation coefficients is returned. The time
2580 information of the output trace is set so that the returned cross
2581 correlation can be viewed directly as a function of time lag.
2583 Example::
2585 # align two traces a and b containing a time shifted similar signal:
2586 c = pyrocko.trace.correlate(a,b)
2587 t, coef = c.max() # get time and value of maximum
2588 b.shift(-t) # align b with a
2590 '''
2592 assert_same_sampling_rate(a, b)
2594 ya, yb = a.ydata, b.ydata
2596 # need reversed order here:
2597 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2598 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2600 if normalization == 'normal':
2601 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2602 yc = yc/normfac
2604 elif normalization == 'gliding':
2605 if mode != 'valid':
2606 assert False, 'gliding normalization currently only available ' \
2607 'with "valid" mode.'
2609 if ya.size < yb.size:
2610 yshort, ylong = ya, yb
2611 else:
2612 yshort, ylong = yb, ya
2614 epsilon = 0.00001
2615 normfac_short = num.sqrt(num.sum(yshort**2))
2616 normfac = normfac_short * num.sqrt(
2617 moving_sum(ylong**2, yshort.size, mode='valid')) \
2618 + normfac_short*epsilon
2620 if yb.size <= ya.size:
2621 normfac = normfac[::-1]
2623 yc /= normfac
2625 c = a.copy()
2626 c.set_ydata(yc)
2627 c.set_codes(*merge_codes(a, b, '~'))
2628 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2630 return c
2633def deconvolve(
2634 a, b, waterlevel,
2635 tshift=0.,
2636 pad=0.5,
2637 fd_taper=None,
2638 pad_to_pow2=True):
2640 same_sampling_rate(a, b)
2641 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2642 deltat = a.deltat
2643 npad = int(round(a.data_len()*pad + tshift / deltat))
2645 ndata = max(a.data_len(), b.data_len())
2646 ndata_pad = ndata + npad
2648 if pad_to_pow2:
2649 ntrans = nextpow2(ndata_pad)
2650 else:
2651 ntrans = ndata
2653 aspec = num.fft.rfft(a.ydata, ntrans)
2654 bspec = num.fft.rfft(b.ydata, ntrans)
2656 out = aspec * num.conj(bspec)
2658 bautocorr = bspec*num.conj(bspec)
2659 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2661 out /= denom
2662 df = 1/(ntrans*deltat)
2664 if fd_taper is not None:
2665 fd_taper(out, 0.0, df)
2667 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2668 c = a.copy(data=False)
2669 c.set_ydata(ydata[:ndata])
2670 c.set_codes(*merge_codes(a, b, '/'))
2671 return c
2674def assert_same_sampling_rate(a, b, eps=1.0e-6):
2675 assert same_sampling_rate(a, b, eps), \
2676 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2679def same_sampling_rate(a, b, eps=1.0e-6):
2680 '''
2681 Check if two traces have the same sampling rate.
2683 :param a,b: input traces
2684 :param eps: relative tolerance
2685 '''
2687 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2690def fix_deltat_rounding_errors(deltat):
2691 '''
2692 Try to undo sampling rate rounding errors.
2694 Fix rounding errors of sampling intervals when these are read from single
2695 precision floating point values.
2697 Assumes that the true sampling rate or sampling interval was an integer
2698 value. No correction will be applied if this would change the sampling
2699 rate by more than 0.001%.
2700 '''
2702 if deltat <= 1.0:
2703 deltat_new = 1.0 / round(1.0 / deltat)
2704 else:
2705 deltat_new = round(deltat)
2707 if abs(deltat_new - deltat) / deltat > 1e-5:
2708 deltat_new = deltat
2710 return deltat_new
2713def merge_codes(a, b, sep='-'):
2714 '''
2715 Merge network-station-location-channel codes of a pair of traces.
2716 '''
2718 o = []
2719 for xa, xb in zip(a.nslc_id, b.nslc_id):
2720 if xa == xb:
2721 o.append(xa)
2722 else:
2723 o.append(sep.join((xa, xb)))
2724 return o
2727class Taper(Object):
2728 '''
2729 Base class for tapers.
2731 Does nothing by default.
2732 '''
2734 def __call__(self, y, x0, dx):
2735 pass
2738class CosTaper(Taper):
2739 '''
2740 Cosine Taper.
2742 :param a: start of fading in
2743 :param b: end of fading in
2744 :param c: start of fading out
2745 :param d: end of fading out
2746 '''
2748 a = Float.T()
2749 b = Float.T()
2750 c = Float.T()
2751 d = Float.T()
2753 def __init__(self, a, b, c, d):
2754 Taper.__init__(self, a=a, b=b, c=c, d=d)
2756 def __call__(self, y, x0, dx):
2757 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2759 def span(self, y, x0, dx):
2760 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2762 def time_span(self):
2763 return self.a, self.d
2766class CosFader(Taper):
2767 '''
2768 Cosine Fader.
2770 :param xfade: fade in and fade out time in seconds (optional)
2771 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2773 Only one argument can be set. The other should to be ``None``.
2774 '''
2776 xfade = Float.T(optional=True)
2777 xfrac = Float.T(optional=True)
2779 def __init__(self, xfade=None, xfrac=None):
2780 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2781 assert (xfade is None) != (xfrac is None)
2782 self._xfade = xfade
2783 self._xfrac = xfrac
2785 def __call__(self, y, x0, dx):
2787 xfade = self._xfade
2789 xlen = (y.size - 1)*dx
2790 if xfade is None:
2791 xfade = xlen * self._xfrac
2793 a = x0
2794 b = x0 + xfade
2795 c = x0 + xlen - xfade
2796 d = x0 + xlen
2798 apply_costaper(a, b, c, d, y, x0, dx)
2800 def span(self, y, x0, dx):
2801 return 0, y.size
2803 def time_span(self):
2804 return None, None
2807def none_min(li):
2808 if None in li:
2809 return None
2810 else:
2811 return min(x for x in li if x is not None)
2814def none_max(li):
2815 if None in li:
2816 return None
2817 else:
2818 return max(x for x in li if x is not None)
2821class MultiplyTaper(Taper):
2822 '''
2823 Multiplication of several tapers.
2824 '''
2826 tapers = List.T(Taper.T())
2828 def __init__(self, tapers=None):
2829 if tapers is None:
2830 tapers = []
2832 Taper.__init__(self, tapers=tapers)
2834 def __call__(self, y, x0, dx):
2835 for taper in self.tapers:
2836 taper(y, x0, dx)
2838 def span(self, y, x0, dx):
2839 spans = []
2840 for taper in self.tapers:
2841 spans.append(taper.span(y, x0, dx))
2843 mins, maxs = list(zip(*spans))
2844 return min(mins), max(maxs)
2846 def time_span(self):
2847 spans = []
2848 for taper in self.tapers:
2849 spans.append(taper.time_span())
2851 mins, maxs = list(zip(*spans))
2852 return none_min(mins), none_max(maxs)
2855class GaussTaper(Taper):
2856 '''
2857 Frequency domain Gaussian filter.
2858 '''
2860 alpha = Float.T()
2862 def __init__(self, alpha):
2863 Taper.__init__(self, alpha=alpha)
2864 self._alpha = alpha
2866 def __call__(self, y, x0, dx):
2867 f = x0 + num.arange(y.size)*dx
2868 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2871cached_coefficients = {}
2874def _get_cached_filter_coefs(order, corners, btype):
2875 ck = (order, tuple(corners), btype)
2876 if ck not in cached_coefficients:
2877 if len(corners) == 0:
2878 cached_coefficients[ck] = signal.butter(
2879 order, corners[0], btype=btype)
2880 else:
2881 cached_coefficients[ck] = signal.butter(
2882 order, corners, btype=btype)
2884 return cached_coefficients[ck]
2887class _globals(object):
2888 _numpy_has_correlate_flip_bug = None
2891def _default_key(tr):
2892 return (tr.network, tr.station, tr.location, tr.channel)
2895def numpy_has_correlate_flip_bug():
2896 '''
2897 Check if NumPy's correlate function reveals old behaviour.
2898 '''
2900 if _globals._numpy_has_correlate_flip_bug is None:
2901 a = num.array([0, 0, 1, 0, 0, 0, 0])
2902 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
2903 ab = num.correlate(a, b, mode='same')
2904 ba = num.correlate(b, a, mode='same')
2905 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
2907 return _globals._numpy_has_correlate_flip_bug
2910def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
2911 '''
2912 Call :py:func:`numpy.correlate` with fixes.
2914 c[k] = sum_i a[i+k] * conj(b[i])
2916 Note that the result produced by newer numpy.correlate is always flipped
2917 with respect to the formula given in its documentation (if ascending k
2918 assumed for the output).
2919 '''
2921 if use_fft:
2922 if a.size < b.size:
2923 c = signal.fftconvolve(b[::-1], a, mode=mode)
2924 else:
2925 c = signal.fftconvolve(a, b[::-1], mode=mode)
2926 return c
2928 else:
2929 buggy = numpy_has_correlate_flip_bug()
2931 a = num.asarray(a)
2932 b = num.asarray(b)
2934 if buggy:
2935 b = num.conj(b)
2937 c = num.correlate(a, b, mode=mode)
2939 if buggy and a.size < b.size:
2940 return c[::-1]
2941 else:
2942 return c
2945def numpy_correlate_emulate(a, b, mode='valid'):
2946 '''
2947 Slow version of :py:func:`numpy.correlate` for comparison.
2948 '''
2950 a = num.asarray(a)
2951 b = num.asarray(b)
2952 kmin = -(b.size-1)
2953 klen = a.size-kmin
2954 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
2955 kmin = int(kmin)
2956 kmax = int(kmax)
2957 klen = kmax - kmin + 1
2958 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
2959 for k in range(kmin, kmin+klen):
2960 imin = max(0, -k)
2961 ilen = min(b.size, a.size-k) - imin
2962 c[k-kmin] = num.sum(
2963 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
2965 return c
2968def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
2969 '''
2970 Get range of lags for which :py:func:`numpy.correlate` produces values.
2971 '''
2973 a = num.asarray(a)
2974 b = num.asarray(b)
2976 kmin = -(b.size-1)
2977 if mode == 'full':
2978 klen = a.size-kmin
2979 elif mode == 'same':
2980 klen = max(a.size, b.size)
2981 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
2982 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
2983 elif mode == 'valid':
2984 klen = abs(a.size - b.size) + 1
2985 kmin += min(a.size, b.size) - 1
2987 return kmin, kmin + klen - 1
2990def autocorr(x, nshifts):
2991 '''
2992 Compute biased estimate of the first autocorrelation coefficients.
2994 :param x: input array
2995 :param nshifts: number of coefficients to calculate
2996 '''
2998 mean = num.mean(x)
2999 std = num.std(x)
3000 n = x.size
3001 xdm = x - mean
3002 r = num.zeros(nshifts)
3003 for k in range(nshifts):
3004 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3006 return r
3009def yulewalker(x, order):
3010 '''
3011 Compute autoregression coefficients using Yule-Walker method.
3013 :param x: input array
3014 :param order: number of coefficients to produce
3016 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3017 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3018 recursion which is normally used.
3019 '''
3021 gamma = autocorr(x, order+1)
3022 d = gamma[1:1+order]
3023 a = num.zeros((order, order))
3024 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3025 for i in range(order):
3026 ioff = order-i
3027 a[i, :] = gamma2[ioff:ioff+order]
3029 return num.dot(num.linalg.inv(a), -d)
3032def moving_avg(x, n):
3033 n = int(n)
3034 cx = x.cumsum()
3035 nn = len(x)
3036 y = num.zeros(nn, dtype=cx.dtype)
3037 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3038 y[:n//2] = y[n//2]
3039 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3040 return y
3043def moving_sum(x, n, mode='valid'):
3044 n = int(n)
3045 cx = x.cumsum()
3046 nn = len(x)
3048 if mode == 'valid':
3049 if nn-n+1 <= 0:
3050 return num.zeros(0, dtype=cx.dtype)
3051 y = num.zeros(nn-n+1, dtype=cx.dtype)
3052 y[0] = cx[n-1]
3053 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3055 if mode == 'full':
3056 y = num.zeros(nn+n-1, dtype=cx.dtype)
3057 if n <= nn:
3058 y[0:n] = cx[0:n]
3059 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3060 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3061 else:
3062 y[0:nn] = cx[0:nn]
3063 y[nn:n] = cx[nn-1]
3064 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3066 if mode == 'same':
3067 n1 = (n-1)//2
3068 y = num.zeros(nn, dtype=cx.dtype)
3069 if n <= nn:
3070 y[0:n-n1] = cx[n1:n]
3071 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3072 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3073 else:
3074 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3075 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3076 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3078 return y
3081def nextpow2(i):
3082 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3085def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3086 def snap(x):
3087 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3088 return snap
3091def snapper(nmax, delta, snapfun=math.ceil):
3092 def snap(x):
3093 return max(0, min(int(snapfun(x/delta)), nmax))
3094 return snap
3097def apply_costaper(a, b, c, d, y, x0, dx):
3098 hi = snapper_w_offset(y.size, x0, dx)
3099 y[:hi(a)] = 0.
3100 y[hi(a):hi(b)] *= 0.5 \
3101 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3102 y[hi(c):hi(d)] *= 0.5 \
3103 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3104 y[hi(d):] = 0.
3107def span_costaper(a, b, c, d, y, x0, dx):
3108 hi = snapper_w_offset(y.size, x0, dx)
3109 return hi(a), hi(d) - hi(a)
3112def costaper(a, b, c, d, nfreqs, deltaf):
3113 hi = snapper(nfreqs, deltaf)
3114 tap = num.zeros(nfreqs)
3115 tap[hi(a):hi(b)] = 0.5 \
3116 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3117 tap[hi(b):hi(c)] = 1.
3118 tap[hi(c):hi(d)] = 0.5 \
3119 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3121 return tap
3124def t2ind(t, tdelta, snap=round):
3125 return int(snap(t/tdelta))
3128def hilbert(x, N=None):
3129 '''
3130 Return the hilbert transform of x of length N.
3132 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3133 '''
3135 x = num.asarray(x)
3136 if N is None:
3137 N = len(x)
3138 if N <= 0:
3139 raise ValueError("N must be positive.")
3140 if num.iscomplexobj(x):
3141 logger.warning('imaginary part of x ignored.')
3142 x = num.real(x)
3144 Xf = num.fft.fft(x, N, axis=0)
3145 h = num.zeros(N)
3146 if N % 2 == 0:
3147 h[0] = h[N//2] = 1
3148 h[1:N//2] = 2
3149 else:
3150 h[0] = 1
3151 h[1:(N+1)//2] = 2
3153 if len(x.shape) > 1:
3154 h = h[:, num.newaxis]
3155 x = num.fft.ifft(Xf*h)
3156 return x
3159def near(a, b, eps):
3160 return abs(a-b) < eps
3163def coroutine(func):
3164 def wrapper(*args, **kwargs):
3165 gen = func(*args, **kwargs)
3166 next(gen)
3167 return gen
3169 wrapper.__name__ = func.__name__
3170 wrapper.__dict__ = func.__dict__
3171 wrapper.__doc__ = func.__doc__
3172 return wrapper
3175class States(object):
3176 '''
3177 Utility to store channel-specific state in coroutines.
3178 '''
3180 def __init__(self):
3181 self._states = {}
3183 def get(self, tr):
3184 k = tr.nslc_id
3185 if k in self._states:
3186 tmin, deltat, dtype, value = self._states[k]
3187 if (near(tmin, tr.tmin, deltat/100.)
3188 and near(deltat, tr.deltat, deltat/10000.)
3189 and dtype == tr.ydata.dtype):
3191 return value
3193 return None
3195 def set(self, tr, value):
3196 k = tr.nslc_id
3197 if k in self._states and self._states[k][-1] is not value:
3198 self.free(self._states[k][-1])
3200 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3202 def free(self, value):
3203 pass
3206@coroutine
3207def co_list_append(list):
3208 while True:
3209 list.append((yield))
3212class ScipyBug(Exception):
3213 pass
3216@coroutine
3217def co_lfilter(target, b, a):
3218 '''
3219 Successively filter broken continuous trace data (coroutine).
3221 Create coroutine which takes :py:class:`Trace` objects, filters their data
3222 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3223 objects containing the filtered data to target. This is useful, if one
3224 wants to filter a long continuous time series, which is split into many
3225 successive traces without producing filter artifacts at trace boundaries.
3227 Filter states are kept *per channel*, specifically, for each (network,
3228 station, location, channel) combination occuring in the input traces, a
3229 separate state is created and maintained. This makes it possible to filter
3230 multichannel or multistation data with only one :py:func:`co_lfilter`
3231 instance.
3233 Filter state is reset, when gaps occur.
3235 Use it like this::
3237 from pyrocko.trace import co_lfilter, co_list_append
3239 filtered_traces = []
3240 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3241 for trace in traces:
3242 pipe.send(trace)
3244 pipe.close()
3246 '''
3248 try:
3249 states = States()
3250 output = None
3251 while True:
3252 input = (yield)
3254 zi = states.get(input)
3255 if zi is None:
3256 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3258 output = input.copy(data=False)
3259 try:
3260 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3261 except ValueError:
3262 raise ScipyBug(
3263 'signal.lfilter failed: could be related to a bug '
3264 'in some older scipy versions, e.g. on opensuse42.1')
3266 output.set_ydata(ydata)
3267 states.set(input, zf)
3268 target.send(output)
3270 except GeneratorExit:
3271 target.close()
3274def co_antialias(target, q, n=None, ftype='fir'):
3275 b, a, n = util.decimate_coeffs(q, n, ftype)
3276 anti = co_lfilter(target, b, a)
3277 return anti
3280@coroutine
3281def co_dropsamples(target, q, nfir):
3282 try:
3283 states = States()
3284 while True:
3285 tr = (yield)
3286 newdeltat = q * tr.deltat
3287 ioffset = states.get(tr)
3288 if ioffset is None:
3289 # for fir filter, the first nfir samples are pulluted by
3290 # boundary effects; cut it off.
3291 # for iir this may be (much) more, we do not correct for that.
3292 # put sample instances to a time which is a multiple of the
3293 # new sampling interval.
3294 newtmin_want = math.ceil(
3295 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3296 - (nfir/2*tr.deltat)
3297 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3298 if ioffset < 0:
3299 ioffset = ioffset % q
3301 newtmin_have = tr.tmin + ioffset * tr.deltat
3302 newtr = tr.copy(data=False)
3303 newtr.deltat = newdeltat
3304 # because the fir kernel shifts data by nfir/2 samples:
3305 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3306 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3307 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3308 target.send(newtr)
3310 except GeneratorExit:
3311 target.close()
3314def co_downsample(target, q, n=None, ftype='fir'):
3315 '''
3316 Successively downsample broken continuous trace data (coroutine).
3318 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3319 data and sends new :py:class:`Trace` objects containing the downsampled
3320 data to target. This is useful, if one wants to downsample a long
3321 continuous time series, which is split into many successive traces without
3322 producing filter artifacts and gaps at trace boundaries.
3324 Filter states are kept *per channel*, specifically, for each (network,
3325 station, location, channel) combination occuring in the input traces, a
3326 separate state is created and maintained. This makes it possible to filter
3327 multichannel or multistation data with only one :py:func:`co_lfilter`
3328 instance.
3330 Filter state is reset, when gaps occur. The sampling instances are choosen
3331 so that they occur at (or as close as possible) to even multiples of the
3332 sampling interval of the downsampled trace (based on system time).
3333 '''
3335 b, a, n = util.decimate_coeffs(q, n, ftype)
3336 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3339@coroutine
3340def co_downsample_to(target, deltat):
3342 decimators = {}
3343 try:
3344 while True:
3345 tr = (yield)
3346 ratio = deltat / tr.deltat
3347 rratio = round(ratio)
3348 if abs(rratio - ratio)/ratio > 0.0001:
3349 raise util.UnavailableDecimation('ratio = %g' % ratio)
3351 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3352 if deci_seq not in decimators:
3353 pipe = target
3354 for q in deci_seq[::-1]:
3355 pipe = co_downsample(pipe, q)
3357 decimators[deci_seq] = pipe
3359 decimators[deci_seq].send(tr)
3361 except GeneratorExit:
3362 for g in decimators.values():
3363 g.close()
3366class DomainChoice(StringChoice):
3367 choices = [
3368 'time_domain',
3369 'frequency_domain',
3370 'envelope',
3371 'absolute',
3372 'cc_max_norm']
3375class MisfitSetup(Object):
3376 '''
3377 Contains misfit setup to be used in :py:func:`trace.misfit`
3379 :param description: Description of the setup
3380 :param norm: L-norm classifier
3381 :param taper: Object of :py:class:`Taper`
3382 :param filter: Object of :py:class:`FrequencyResponse`
3383 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3384 'cc_max_norm']
3386 Can be dumped to a yaml file.
3387 '''
3389 xmltagname = 'misfitsetup'
3390 description = String.T(optional=True)
3391 norm = Int.T(optional=False)
3392 taper = Taper.T(optional=False)
3393 filter = FrequencyResponse.T(optional=True)
3394 domain = DomainChoice.T(default='time_domain')
3397def equalize_sampling_rates(trace_1, trace_2):
3398 '''
3399 Equalize sampling rates of two traces (reduce higher sampling rate to
3400 lower).
3402 :param trace_1: :py:class:`Trace` object
3403 :param trace_2: :py:class:`Trace` object
3405 Returns a copy of the resampled trace if resampling is needed.
3406 '''
3408 if same_sampling_rate(trace_1, trace_2):
3409 return trace_1, trace_2
3411 if trace_1.deltat < trace_2.deltat:
3412 t1_out = trace_1.copy()
3413 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3414 logger.debug('Trace downsampled (return copy of trace): %s'
3415 % '.'.join(t1_out.nslc_id))
3416 return t1_out, trace_2
3418 elif trace_1.deltat > trace_2.deltat:
3419 t2_out = trace_2.copy()
3420 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3421 logger.debug('Trace downsampled (return copy of trace): %s'
3422 % '.'.join(t2_out.nslc_id))
3423 return trace_1, t2_out
3426def Lx_norm(u, v, norm=2):
3427 '''
3428 Calculate the misfit denominator *m* and the normalization devisor *n*
3429 according to norm.
3431 The normalization divisor *n* is calculated from ``v``.
3433 :param u: :py:class:`numpy.array`
3434 :param v: :py:class:`numpy.array`
3435 :param norm: (default = 2)
3437 ``u`` and ``v`` must be of same size.
3438 '''
3440 if norm == 1:
3441 return (
3442 num.sum(num.abs(v-u)),
3443 num.sum(num.abs(v)))
3445 elif norm == 2:
3446 return (
3447 num.sqrt(num.sum((v-u)**2)),
3448 num.sqrt(num.sum(v**2)))
3450 else:
3451 return (
3452 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3453 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3456def do_downsample(tr, deltat):
3457 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3458 tr = tr.copy()
3459 tr.downsample_to(deltat, snap=True, demean=False)
3460 else:
3461 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3462 tr = tr.copy()
3463 tr.snap()
3464 return tr
3467def do_extend(tr, tmin, tmax):
3468 if tmin < tr.tmin or tmax > tr.tmax:
3469 tr = tr.copy()
3470 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3472 return tr
3475def do_pre_taper(tr, taper):
3476 return tr.taper(taper, inplace=False, chop=True)
3479def do_fft(tr, filter):
3480 if filter is None:
3481 return tr
3482 else:
3483 ndata = tr.ydata.size
3484 nfft = nextpow2(ndata)
3485 padded = num.zeros(nfft, dtype=float)
3486 padded[:ndata] = tr.ydata
3487 spectrum = num.fft.rfft(padded)
3488 df = 1.0 / (tr.deltat * nfft)
3489 frequencies = num.arange(spectrum.size)*df
3490 return [tr, frequencies, spectrum]
3493def do_filter(inp, filter):
3494 if filter is None:
3495 return inp
3496 else:
3497 tr, frequencies, spectrum = inp
3498 spectrum *= filter.evaluate(frequencies)
3499 return [tr, frequencies, spectrum]
3502def do_ifft(inp):
3503 if isinstance(inp, Trace):
3504 return inp
3505 else:
3506 tr, _, spectrum = inp
3507 ndata = tr.ydata.size
3508 tr = tr.copy(data=False)
3509 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3510 return tr
3513def check_alignment(t1, t2):
3514 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3515 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3516 t1.ydata.shape != t2.ydata.shape:
3517 raise MisalignedTraces(
3518 'Cannot calculate misfit of %s and %s due to misaligned '
3519 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))