1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6This module provides basic signal processing for seismic traces.
7'''
9from __future__ import division, absolute_import
11import time
12import math
13import copy
14import logging
15import hashlib
16from collections import defaultdict
18import numpy as num
19from scipy import signal
21from pyrocko import util, orthodrome, pchain, model
22from pyrocko.util import reuse
23from pyrocko.guts import Object, Float, Int, String, List, \
24 StringChoice, Timestamp
25from pyrocko.guts_array import Array
26from pyrocko.model import squirrel_content
28# backward compatibility
29from pyrocko.util import UnavailableDecimation # noqa
30from pyrocko.response import ( # noqa
31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
32 ButterworthResponse, SampledResponse, IntegrationResponse,
33 DifferentiationResponse, MultiplyResponse)
36guts_prefix = 'pf'
38logger = logging.getLogger('pyrocko.trace')
41@squirrel_content
42class Trace(Object):
44 '''
45 Create new trace object.
47 A ``Trace`` object represents a single continuous strip of evenly sampled
48 time series data. It is built from a 1D NumPy array containing the data
49 samples and some attributes describing its beginning and ending time, its
50 sampling rate and four string identifiers (its network, station, location
51 and channel code).
53 :param network: network code
54 :param station: station code
55 :param location: location code
56 :param channel: channel code
57 :param extra: extra code
58 :param tmin: system time of first sample in [s]
59 :param tmax: system time of last sample in [s] (if set to ``None`` it is
60 computed from length of ``ydata``)
61 :param deltat: sampling interval in [s]
62 :param ydata: 1D numpy array with data samples (can be ``None`` when
63 ``tmax`` is not ``None``)
64 :param mtime: optional modification time
66 :param meta: additional meta information (not used, but maintained by the
67 library)
69 The length of the network, station, location and channel codes is not
70 resricted by this software, but data formats like SAC, Mini-SEED or GSE
71 have different limits on the lengths of these codes. The codes set here are
72 silently truncated when the trace is stored
73 '''
75 network = String.T(default='', help='Deployment/network code (1-8)')
76 station = String.T(default='STA', help='Station code (1-5)')
77 location = String.T(default='', help='Location code (0-2)')
78 channel = String.T(default='', help='Channel code (3)')
79 extra = String.T(default='', help='Extra/custom code')
81 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
82 tmax = Timestamp.T()
84 deltat = Float.T(default=1.0)
85 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
87 mtime = Timestamp.T(optional=True)
89 cached_frequencies = {}
91 def __init__(self, network='', station='STA', location='', channel='',
92 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
93 meta=None, extra=''):
95 Object.__init__(self, init_props=False)
97 time_float = util.get_time_float()
99 if not isinstance(tmin, time_float):
100 tmin = Trace.tmin.regularize_extra(tmin)
102 if tmax is not None and not isinstance(tmax, time_float):
103 tmax = Trace.tmax.regularize_extra(tmax)
105 if mtime is not None and not isinstance(mtime, time_float):
106 mtime = Trace.mtime.regularize_extra(mtime)
108 self._growbuffer = None
110 tmin = time_float(tmin)
111 if tmax is not None:
112 tmax = time_float(tmax)
114 if mtime is None:
115 mtime = time_float(time.time())
117 self.network, self.station, self.location, self.channel, \
118 self.extra = [
119 reuse(x) for x in (
120 network, station, location, channel, extra)]
122 self.tmin = tmin
123 self.deltat = deltat
125 if tmax is None:
126 if ydata is not None:
127 self.tmax = self.tmin + (ydata.size-1)*self.deltat
128 else:
129 raise Exception(
130 'fixme: trace must be created with tmax or ydata')
131 else:
132 n = int(round((tmax - self.tmin) / self.deltat)) + 1
133 self.tmax = self.tmin + (n - 1) * self.deltat
135 self.meta = meta
136 self.ydata = ydata
137 self.mtime = mtime
138 self._update_ids()
139 self.file = None
140 self._pchain = None
142 def __str__(self):
143 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
144 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
145 s += ' timerange: %s - %s\n' % (
146 util.time_to_str(self.tmin, format=fmt),
147 util.time_to_str(self.tmax, format=fmt))
149 s += ' delta t: %g\n' % self.deltat
150 if self.meta:
151 for k in sorted(self.meta.keys()):
152 s += ' %s: %s\n' % (k, self.meta[k])
153 return s
155 @property
156 def codes(self):
157 from pyrocko.squirrel import model
158 return model.CodesNSLCE(
159 self.network, self.station, self.location, self.channel,
160 self.extra)
162 @property
163 def time_span(self):
164 return self.tmin, self.tmax
166 @property
167 def summary(self):
168 return '%s %-16s %s %g' % (
169 self.__class__.__name__, self.str_codes, self.str_time_span,
170 self.deltat)
172 def __getstate__(self):
173 return (self.network, self.station, self.location, self.channel,
174 self.tmin, self.tmax, self.deltat, self.mtime,
175 self.ydata, self.meta, self.extra)
177 def __setstate__(self, state):
178 if len(state) == 11:
179 self.network, self.station, self.location, self.channel, \
180 self.tmin, self.tmax, self.deltat, self.mtime, \
181 self.ydata, self.meta, self.extra = state
183 elif len(state) == 12:
184 # backward compatibility 0
185 self.network, self.station, self.location, self.channel, \
186 self.tmin, self.tmax, self.deltat, self.mtime, \
187 self.ydata, self.meta, _, self.extra = state
189 elif len(state) == 10:
190 # backward compatibility 1
191 self.network, self.station, self.location, self.channel, \
192 self.tmin, self.tmax, self.deltat, self.mtime, \
193 self.ydata, self.meta = state
195 self.extra = ''
197 else:
198 # backward compatibility 2
199 self.network, self.station, self.location, self.channel, \
200 self.tmin, self.tmax, self.deltat, self.mtime = state
201 self.ydata = None
202 self.meta = None
203 self.extra = ''
205 self._growbuffer = None
206 self._update_ids()
208 def hash(self, unsafe=False):
209 sha1 = hashlib.sha1()
210 for code in self.nslc_id:
211 sha1.update(code.encode())
212 sha1.update(self.tmin.hex().encode())
213 sha1.update(self.tmax.hex().encode())
214 sha1.update(self.deltat.hex().encode())
216 if self.ydata is not None:
217 if not unsafe:
218 sha1.update(memoryview(self.ydata))
219 else:
220 sha1.update(memoryview(self.ydata[:32]))
221 sha1.update(memoryview(self.ydata[-32:]))
223 return sha1.hexdigest()
225 def name(self):
226 '''
227 Get a short string description.
228 '''
230 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
231 util.time_to_str(self.tmin),
232 util.time_to_str(self.tmax)))
234 return s
236 def __eq__(self, other):
237 return (
238 isinstance(other, Trace)
239 and self.network == other.network
240 and self.station == other.station
241 and self.location == other.location
242 and self.channel == other.channel
243 and (abs(self.deltat - other.deltat)
244 < (self.deltat + other.deltat)*1e-6)
245 and abs(self.tmin-other.tmin) < self.deltat*0.01
246 and abs(self.tmax-other.tmax) < self.deltat*0.01
247 and num.all(self.ydata == other.ydata))
249 def almost_equal(self, other, rtol=1e-5, atol=0.0):
250 return (
251 self.network == other.network
252 and self.station == other.station
253 and self.location == other.location
254 and self.channel == other.channel
255 and (abs(self.deltat - other.deltat)
256 < (self.deltat + other.deltat)*1e-6)
257 and abs(self.tmin-other.tmin) < self.deltat*0.01
258 and abs(self.tmax-other.tmax) < self.deltat*0.01
259 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
261 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
263 assert self.network == other.network, \
264 'network codes differ: %s, %s' % (self.network, other.network)
265 assert self.station == other.station, \
266 'station codes differ: %s, %s' % (self.station, other.station)
267 assert self.location == other.location, \
268 'location codes differ: %s, %s' % (self.location, other.location)
269 assert self.channel == other.channel, 'channel codes differ'
270 assert (abs(self.deltat - other.deltat)
271 < (self.deltat + other.deltat)*1e-6), \
272 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
273 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
274 'start times differ: %s, %s' % (
275 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
276 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
277 'end times differ: %s, %s' % (
278 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
280 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
281 'trace values differ'
283 def __hash__(self):
284 return id(self)
286 def __call__(self, t, clip=False, snap=round):
287 it = int(snap((t-self.tmin)/self.deltat))
288 if clip:
289 it = max(0, min(it, self.ydata.size-1))
290 else:
291 if it < 0 or self.ydata.size <= it:
292 raise IndexError()
294 return self.tmin+it*self.deltat, self.ydata[it]
296 def interpolate(self, t, clip=False):
297 '''
298 Value of trace between supporting points through linear interpolation.
300 :param t: time instant
301 :param clip: whether to clip indices to trace ends
302 '''
304 t0, y0 = self(t, clip=clip, snap=math.floor)
305 t1, y1 = self(t, clip=clip, snap=math.ceil)
306 if t0 == t1:
307 return y0
308 else:
309 return y0+(t-t0)/(t1-t0)*(y1-y0)
311 def index_clip(self, i):
312 '''
313 Clip index to valid range.
314 '''
316 return min(max(0, i), self.ydata.size)
318 def add(self, other, interpolate=True, left=0., right=0.):
319 '''
320 Add values of other trace (self += other).
322 Add values of ``other`` trace to the values of ``self``, where it
323 intersects with ``other``. This method does not change the extent of
324 ``self``. If ``interpolate`` is ``True`` (the default), the values of
325 ``other`` to be added are interpolated at sampling instants of
326 ``self``. Linear interpolation is performed. In this case the sampling
327 rate of ``other`` must be equal to or lower than that of ``self``. If
328 ``interpolate`` is ``False``, the sampling rates of the two traces must
329 match.
330 '''
332 if interpolate:
333 assert self.deltat <= other.deltat \
334 or same_sampling_rate(self, other)
336 other_xdata = other.get_xdata()
337 xdata = self.get_xdata()
338 self.ydata += num.interp(
339 xdata, other_xdata, other.ydata, left=left, right=left)
340 else:
341 assert self.deltat == other.deltat
342 ioff = int(round((other.tmin-self.tmin)/self.deltat))
343 ibeg = max(0, ioff)
344 iend = min(self.data_len(), ioff+other.data_len())
345 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
347 def mult(self, other, interpolate=True):
348 '''
349 Muliply with values of other trace ``(self *= other)``.
351 Multiply values of ``other`` trace to the values of ``self``, where it
352 intersects with ``other``. This method does not change the extent of
353 ``self``. If ``interpolate`` is ``True`` (the default), the values of
354 ``other`` to be multiplied are interpolated at sampling instants of
355 ``self``. Linear interpolation is performed. In this case the sampling
356 rate of ``other`` must be equal to or lower than that of ``self``. If
357 ``interpolate`` is ``False``, the sampling rates of the two traces must
358 match.
359 '''
361 if interpolate:
362 assert self.deltat <= other.deltat or \
363 same_sampling_rate(self, other)
365 other_xdata = other.get_xdata()
366 xdata = self.get_xdata()
367 self.ydata *= num.interp(
368 xdata, other_xdata, other.ydata, left=0., right=0.)
369 else:
370 assert self.deltat == other.deltat
371 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
372 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
373 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
374 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
376 ibeg1 = self.index_clip(ibeg1)
377 iend1 = self.index_clip(iend1)
378 ibeg2 = self.index_clip(ibeg2)
379 iend2 = self.index_clip(iend2)
381 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
383 def max(self):
384 '''
385 Get time and value of data maximum.
386 '''
388 i = num.argmax(self.ydata)
389 return self.tmin + i*self.deltat, self.ydata[i]
391 def min(self):
392 '''
393 Get time and value of data minimum.
394 '''
396 i = num.argmin(self.ydata)
397 return self.tmin + i*self.deltat, self.ydata[i]
399 def absmax(self):
400 '''
401 Get time and value of maximum of the absolute of data.
402 '''
404 tmi, mi = self.min()
405 tma, ma = self.max()
406 if abs(mi) > abs(ma):
407 return tmi, abs(mi)
408 else:
409 return tma, abs(ma)
411 def set_codes(
412 self, network=None, station=None, location=None, channel=None,
413 extra=None):
415 '''
416 Set network, station, location, and channel codes.
417 '''
419 if network is not None:
420 self.network = network
421 if station is not None:
422 self.station = station
423 if location is not None:
424 self.location = location
425 if channel is not None:
426 self.channel = channel
427 if extra is not None:
428 self.extra = extra
430 self._update_ids()
432 def set_network(self, network):
433 self.network = network
434 self._update_ids()
436 def set_station(self, station):
437 self.station = station
438 self._update_ids()
440 def set_location(self, location):
441 self.location = location
442 self._update_ids()
444 def set_channel(self, channel):
445 self.channel = channel
446 self._update_ids()
448 def overlaps(self, tmin, tmax):
449 '''
450 Check if trace has overlap with a given time span.
451 '''
452 return not (tmax < self.tmin or self.tmax < tmin)
454 def is_relevant(self, tmin, tmax, selector=None):
455 '''
456 Check if trace has overlap with a given time span and matches a
457 condition callback. (internal use)
458 '''
460 return not (tmax <= self.tmin or self.tmax < tmin) \
461 and (selector is None or selector(self))
463 def _update_ids(self):
464 '''
465 Update dependent ids.
466 '''
468 self.full_id = (
469 self.network, self.station, self.location, self.channel, self.tmin)
470 self.nslc_id = reuse(
471 (self.network, self.station, self.location, self.channel))
473 def prune_from_reuse_cache(self):
474 util.deuse(self.nslc_id)
475 util.deuse(self.network)
476 util.deuse(self.station)
477 util.deuse(self.location)
478 util.deuse(self.channel)
480 def set_mtime(self, mtime):
481 '''
482 Set modification time of the trace.
483 '''
485 self.mtime = mtime
487 def get_xdata(self):
488 '''
489 Create array for time axis.
490 '''
492 if self.ydata is None:
493 raise NoData()
495 return self.tmin \
496 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
498 def get_ydata(self):
499 '''
500 Get data array.
501 '''
503 if self.ydata is None:
504 raise NoData()
506 return self.ydata
508 def set_ydata(self, new_ydata):
509 '''
510 Replace data array.
511 '''
513 self.drop_growbuffer()
514 self.ydata = new_ydata
515 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
517 def data_len(self):
518 if self.ydata is not None:
519 return self.ydata.size
520 else:
521 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
523 def drop_data(self):
524 '''
525 Forget data, make dataless trace.
526 '''
528 self.drop_growbuffer()
529 self.ydata = None
531 def drop_growbuffer(self):
532 '''
533 Detach the traces grow buffer.
534 '''
536 self._growbuffer = None
537 self._pchain = None
539 def copy(self, data=True):
540 '''
541 Make a deep copy of the trace.
542 '''
544 tracecopy = copy.copy(self)
545 tracecopy.drop_growbuffer()
546 if data:
547 tracecopy.ydata = self.ydata.copy()
548 tracecopy.meta = copy.deepcopy(self.meta)
549 return tracecopy
551 def crop_zeros(self):
552 '''
553 Remove any zeros at beginning and end.
554 '''
556 indices = num.where(self.ydata != 0.0)[0]
557 if indices.size == 0:
558 raise NoData()
560 ibeg = indices[0]
561 iend = indices[-1]+1
562 if ibeg == 0 and iend == self.ydata.size-1:
563 return
565 self.drop_growbuffer()
566 self.ydata = self.ydata[ibeg:iend].copy()
567 self.tmin = self.tmin+ibeg*self.deltat
568 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
569 self._update_ids()
571 def append(self, data):
572 '''
573 Append data to the end of the trace.
575 To make this method efficient when successively very few or even single
576 samples are appended, a larger grow buffer is allocated upon first
577 invocation. The traces data is then changed to be a view into the
578 currently filled portion of the grow buffer array.
579 '''
581 assert self.ydata.dtype == data.dtype
582 newlen = data.size + self.ydata.size
583 if self._growbuffer is None or self._growbuffer.size < newlen:
584 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
585 self._growbuffer[:self.ydata.size] = self.ydata
586 self._growbuffer[self.ydata.size:newlen] = data
587 self.ydata = self._growbuffer[:newlen]
588 self.tmax = self.tmin + (newlen-1)*self.deltat
590 def chop(
591 self, tmin, tmax, inplace=True, include_last=False,
592 snap=(round, round), want_incomplete=True):
594 '''
595 Cut the trace to given time span.
597 If the ``inplace`` argument is True (the default) the trace is cut in
598 place, otherwise a new trace with the cut part is returned. By
599 default, the indices where to start and end the trace data array are
600 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
601 using Python's :py:func:`round` function. This behaviour can be changed
602 with the ``snap`` argument, which takes a tuple of two functions (one
603 for the lower and one for the upper end) to be used instead of
604 :py:func:`round`. The last sample is by default not included unless
605 ``include_last`` is set to True. If the given time span exceeds the
606 available time span of the trace, the available part is returned,
607 unless ``want_incomplete`` is set to False - in that case, a
608 :py:exc:`NoData` exception is raised. This exception is always raised,
609 when the requested time span does dot overlap with the trace's time
610 span.
611 '''
613 if want_incomplete:
614 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
615 raise NoData()
616 else:
617 if tmin < self.tmin or self.tmax < tmax:
618 raise NoData()
620 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
621 iplus = 0
622 if include_last:
623 iplus = 1
625 iend = min(
626 self.data_len(),
627 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
629 if ibeg >= iend:
630 raise NoData()
632 obj = self
633 if not inplace:
634 obj = self.copy(data=False)
636 self.drop_growbuffer()
637 if self.ydata is not None:
638 obj.ydata = self.ydata[ibeg:iend].copy()
639 else:
640 obj.ydata = None
642 obj.tmin = obj.tmin+ibeg*obj.deltat
643 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
645 obj._update_ids()
647 return obj
649 def downsample(self, ndecimate, snap=False, initials=None, demean=False,
650 ftype='fir-remez'):
651 '''
652 Downsample trace by a given integer factor.
654 Comparison of the available FIR filters `fir` and `fir-remez`:
657 :param ndecimate: decimation factor, avoid values larger than 8
658 :param snap: whether to put the new sampling instances closest to
659 multiples of the sampling rate.
660 :param initials: ``None``, ``True``, or initial conditions for the
661 anti-aliasing filter, obtained from a previous run. In the latter
662 two cases the final state of the filter is returned instead of
663 ``None``.
664 :param demean: whether to demean the signal before filtering.
665 :param ftype: which FIR filter to use, choose from `fir, fir-remez`.
666 Default is `fir-remez`.
667 '''
668 newdeltat = self.deltat*ndecimate
669 if snap:
670 ilag = int(round(
671 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
672 / self.deltat))
673 else:
674 ilag = 0
676 if snap and ilag > 0 and ilag < self.ydata.size:
677 data = self.ydata.astype(num.float64)
678 self.tmin += ilag*self.deltat
679 else:
680 data = self.ydata.astype(num.float64)
682 if demean:
683 data -= num.mean(data)
685 if data.size != 0:
686 result = util.decimate(
687 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag)
688 else:
689 result = data
691 if initials is None:
692 self.ydata, finals = result, None
693 else:
694 self.ydata, finals = result
696 self.deltat = reuse(self.deltat*ndecimate)
697 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
698 self._update_ids()
700 return finals
702 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
703 initials=None, demean=False, ftype='fir-remez'):
705 '''
706 Downsample to given sampling rate.
708 Tries to downsample the trace to a target sampling interval of
709 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
710 times. If allow_upsample_max is set to a value larger than 1,
711 intermediate upsampling steps are allowed, in order to increase the
712 number of possible downsampling ratios.
714 If the requested ratio is not supported, an exception of type
715 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
717 :param deltat: desired deltat in [s]
718 :param allow_upsample_max: maximum upsampling of the signal to archive
719 the desired deltat. Default is `1`.
720 :param snap: whether to put the new sampling instances closest to
721 multiples of the sampling rate.
722 :param initials: ``None``, ``True``, or initial conditions for the
723 anti-aliasing filter, obtained from a previous run. In the latter
724 two cases the final state of the filter is returned instead of
725 ``None``.
726 :param demean: whether to demean the signal before filtering.
727 :param ftype: which FIR filter to use, choose from `fir, fir-remez`.
728 Default is `fir-remez`.
729 '''
731 ratio = deltat/self.deltat
732 rratio = round(ratio)
734 ok = False
735 for upsratio in range(1, allow_upsample_max+1):
736 dratio = (upsratio/self.deltat) / (1./deltat)
737 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
738 util.decitab(int(round(dratio))):
740 ok = True
741 break
743 if not ok:
744 raise util.UnavailableDecimation('ratio = %g' % ratio)
746 if upsratio > 1:
747 self.drop_growbuffer()
748 ydata = self.ydata
749 self.ydata = num.zeros(
750 ydata.size*upsratio-(upsratio-1), ydata.dtype)
751 self.ydata[::upsratio] = ydata
752 for i in range(1, upsratio):
753 self.ydata[i::upsratio] = \
754 float(i)/upsratio * ydata[:-1] \
755 + float(upsratio-i)/upsratio * ydata[1:]
756 self.deltat = self.deltat/upsratio
758 ratio = deltat/self.deltat
759 rratio = round(ratio)
761 deci_seq = util.decitab(int(rratio))
762 finals = []
763 for i, ndecimate in enumerate(deci_seq):
764 if ndecimate != 1:
765 xinitials = None
766 if initials is not None:
767 xinitials = initials[i]
768 finals.append(self.downsample(
769 ndecimate, snap=snap, initials=xinitials, demean=demean,
770 ftype=ftype))
772 if initials is not None:
773 return finals
775 def resample(self, deltat):
776 '''
777 Resample to given sampling rate ``deltat``.
779 Resampling is performed in the frequency domain.
780 '''
782 ndata = self.ydata.size
783 ntrans = nextpow2(ndata)
784 fntrans2 = ntrans * self.deltat/deltat
785 ntrans2 = int(round(fntrans2))
786 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
787 ndata2 = int(round(ndata*self.deltat/deltat2))
788 if abs(fntrans2 - ntrans2) > 1e-7:
789 logger.warning(
790 'resample: requested deltat %g could not be matched exactly: '
791 '%g' % (deltat, deltat2))
793 data = self.ydata
794 data_pad = num.zeros(ntrans, dtype=float)
795 data_pad[:ndata] = data
796 fdata = num.fft.rfft(data_pad)
797 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
798 n = min(fdata.size, fdata2.size)
799 fdata2[:n] = fdata[:n]
800 data2 = num.fft.irfft(fdata2)
801 data2 = data2[:ndata2]
802 data2 *= float(ntrans2) / float(ntrans)
803 self.deltat = deltat2
804 self.set_ydata(data2)
806 def resample_simple(self, deltat):
807 tyear = 3600*24*365.
809 if deltat == self.deltat:
810 return
812 if abs(self.deltat - deltat) * tyear/deltat < deltat:
813 logger.warning(
814 'resample_simple: less than one sample would have to be '
815 'inserted/deleted per year. Doing nothing.')
816 return
818 ninterval = int(round(deltat / (self.deltat - deltat)))
819 if abs(ninterval) < 20:
820 logger.error(
821 'resample_simple: sample insertion/deletion interval less '
822 'than 20. results would be erroneous.')
823 raise ResamplingFailed()
825 delete = False
826 if ninterval < 0:
827 ninterval = - ninterval
828 delete = True
830 tyearbegin = util.year_start(self.tmin)
832 nmin = int(round((self.tmin - tyearbegin)/deltat))
834 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
835 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
836 if nindices > 0:
837 indices = ibegin + num.arange(nindices) * ninterval
838 data_split = num.split(self.ydata, indices)
839 data = []
840 for ln, h in zip(data_split[:-1], data_split[1:]):
841 if delete:
842 ln = ln[:-1]
844 data.append(ln)
845 if not delete:
846 if ln.size == 0:
847 v = h[0]
848 else:
849 v = 0.5*(ln[-1] + h[0])
850 data.append(num.array([v], dtype=ln.dtype))
852 data.append(data_split[-1])
854 ydata_new = num.concatenate(data)
856 self.tmin = tyearbegin + nmin * deltat
857 self.deltat = deltat
858 self.set_ydata(ydata_new)
860 def stretch(self, tmin_new, tmax_new):
861 '''
862 Stretch signal while preserving sample rate using sinc interpolation.
864 :param tmin_new: new time of first sample
865 :param tmax_new: new time of last sample
867 This method can be used to correct for a small linear time drift or to
868 introduce sub-sample time shifts. The amount of stretching is limited
869 to 10% by the implementation and is expected to be much smaller than
870 that by the approximations used.
871 '''
873 from pyrocko import signal_ext
875 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
876 t_control = num.array([tmin_new, tmax_new], dtype=float)
878 r = (tmax_new - tmin_new) / self.deltat + 1.0
879 n_new = int(round(r))
880 if abs(n_new - r) > 0.001:
881 n_new = int(math.floor(r))
883 assert n_new >= 2
885 tmax_new = tmin_new + (n_new-1) * self.deltat
887 ydata_new = num.empty(n_new, dtype=float)
888 signal_ext.antidrift(i_control, t_control,
889 self.ydata.astype(float),
890 tmin_new, self.deltat, ydata_new)
892 self.tmin = tmin_new
893 self.set_ydata(ydata_new)
894 self._update_ids()
896 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
897 raise_exception=False):
899 '''
900 Check if a given frequency is above the Nyquist frequency of the trace.
902 :param intro: string used to introduce the warning/error message
903 :param warn: whether to emit a warning
904 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
905 exception.
906 '''
908 if frequency >= 0.5/self.deltat:
909 message = '%s (%g Hz) is equal to or higher than nyquist ' \
910 'frequency (%g Hz). (Trace %s)' \
911 % (intro, frequency, 0.5/self.deltat, self.name())
912 if warn:
913 logger.warning(message)
914 if raise_exception:
915 raise AboveNyquist(message)
917 def lowpass(self, order, corner, nyquist_warn=True,
918 nyquist_exception=False, demean=True):
920 '''
921 Apply Butterworth lowpass to the trace.
923 :param order: order of the filter
924 :param corner: corner frequency of the filter
926 Mean is removed before filtering.
927 '''
929 self.nyquist_check(
930 corner, 'Corner frequency of lowpass', nyquist_warn,
931 nyquist_exception)
933 (b, a) = _get_cached_filter_coefs(
934 order, [corner*2.0*self.deltat], btype='low')
936 if len(a) != order+1 or len(b) != order+1:
937 logger.warning(
938 'Erroneous filter coefficients returned by '
939 'scipy.signal.butter(). You may need to downsample the '
940 'signal before filtering.')
942 data = self.ydata.astype(num.float64)
943 if demean:
944 data -= num.mean(data)
945 self.drop_growbuffer()
946 self.ydata = signal.lfilter(b, a, data)
948 def highpass(self, order, corner, nyquist_warn=True,
949 nyquist_exception=False, demean=True):
951 '''
952 Apply butterworth highpass to the trace.
954 :param order: order of the filter
955 :param corner: corner frequency of the filter
957 Mean is removed before filtering.
958 '''
960 self.nyquist_check(
961 corner, 'Corner frequency of highpass', nyquist_warn,
962 nyquist_exception)
964 (b, a) = _get_cached_filter_coefs(
965 order, [corner*2.0*self.deltat], btype='high')
967 data = self.ydata.astype(num.float64)
968 if len(a) != order+1 or len(b) != order+1:
969 logger.warning(
970 'Erroneous filter coefficients returned by '
971 'scipy.signal.butter(). You may need to downsample the '
972 'signal before filtering.')
973 if demean:
974 data -= num.mean(data)
975 self.drop_growbuffer()
976 self.ydata = signal.lfilter(b, a, data)
978 def bandpass(self, order, corner_hp, corner_lp, demean=True):
979 '''
980 Apply butterworth bandpass to the trace.
982 :param order: order of the filter
983 :param corner_hp: lower corner frequency of the filter
984 :param corner_lp: upper corner frequency of the filter
986 Mean is removed before filtering.
987 '''
989 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
990 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
991 (b, a) = _get_cached_filter_coefs(
992 order,
993 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
994 btype='band')
995 data = self.ydata.astype(num.float64)
996 if demean:
997 data -= num.mean(data)
998 self.drop_growbuffer()
999 self.ydata = signal.lfilter(b, a, data)
1001 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1002 '''
1003 Apply bandstop (attenuates frequencies in band) to the trace.
1005 :param order: order of the filter
1006 :param corner_hp: lower corner frequency of the filter
1007 :param corner_lp: upper corner frequency of the filter
1009 Mean is removed before filtering.
1010 '''
1012 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1013 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1014 (b, a) = _get_cached_filter_coefs(
1015 order,
1016 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1017 btype='bandstop')
1018 data = self.ydata.astype(num.float64)
1019 if demean:
1020 data -= num.mean(data)
1021 self.drop_growbuffer()
1022 self.ydata = signal.lfilter(b, a, data)
1024 def envelope(self, inplace=True):
1025 '''
1026 Calculate the envelope of the trace.
1028 :param inplace: calculate envelope in place
1030 The calculation follows:
1032 .. math::
1034 Y' = \\sqrt{Y^2+H(Y)^2}
1036 where H is the Hilbert-Transform of the signal Y.
1037 '''
1039 y = self.ydata.astype(float)
1040 env = num.abs(hilbert(y))
1041 if inplace:
1042 self.drop_growbuffer()
1043 self.ydata = env
1044 else:
1045 tr = self.copy(data=False)
1046 tr.ydata = env
1047 return tr
1049 def taper(self, taperer, inplace=True, chop=False):
1050 '''
1051 Apply a :py:class:`Taper` to the trace.
1053 :param taperer: instance of :py:class:`Taper` subclass
1054 :param inplace: apply taper inplace
1055 :param chop: if ``True``: exclude tapered parts from the resulting
1056 trace
1057 '''
1059 if not inplace:
1060 tr = self.copy()
1061 else:
1062 tr = self
1064 if chop:
1065 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1066 tr.shift(i*tr.deltat)
1067 tr.set_ydata(tr.ydata[i:i+n])
1069 taperer(tr.ydata, tr.tmin, tr.deltat)
1071 if not inplace:
1072 return tr
1074 def whiten(self, order=6):
1075 '''
1076 Whiten signal in time domain using autoregression and recursive filter.
1078 :param order: order of the autoregression process
1079 '''
1081 b, a = self.whitening_coefficients(order)
1082 self.drop_growbuffer()
1083 self.ydata = signal.lfilter(b, a, self.ydata)
1085 def whitening_coefficients(self, order=6):
1086 ar = yulewalker(self.ydata, order)
1087 b, a = [1.] + ar.tolist(), [1.]
1088 return b, a
1090 def ampspec_whiten(
1091 self,
1092 width,
1093 td_taper='auto',
1094 fd_taper='auto',
1095 pad_to_pow2=True,
1096 demean=True):
1098 '''
1099 Whiten signal via frequency domain using moving average on amplitude
1100 spectra.
1102 :param width: width of smoothing kernel [Hz]
1103 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1104 ``None`` or ``'auto'``.
1105 :param fd_taper: frequency domain taper, object of type
1106 :py:class:`Taper` or ``None`` or ``'auto'``.
1107 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1108 of 2^n
1109 :param demean: whether to demean the signal before tapering
1111 The signal is first demeaned and then tapered using ``td_taper``. Then,
1112 the spectrum is calculated and inversely weighted with a smoothed
1113 version of its amplitude spectrum. A moving average is used for the
1114 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1115 Finally, the smoothed and tapered spectrum is back-transformed into the
1116 time domain.
1118 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1119 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1120 '''
1122 ndata = self.data_len()
1124 if pad_to_pow2:
1125 ntrans = nextpow2(ndata)
1126 else:
1127 ntrans = ndata
1129 df = 1./(ntrans*self.deltat)
1130 nw = int(round(width/df))
1131 if ndata//2+1 <= nw:
1132 raise TraceTooShort(
1133 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1135 if td_taper == 'auto':
1136 td_taper = CosFader(1./width)
1138 if fd_taper == 'auto':
1139 fd_taper = CosFader(width)
1141 if td_taper:
1142 self.taper(td_taper)
1144 ydata = self.get_ydata().astype(float)
1145 if demean:
1146 ydata -= ydata.mean()
1148 spec = num.fft.rfft(ydata, ntrans)
1150 amp = num.abs(spec)
1151 nspec = amp.size
1152 csamp = num.cumsum(amp)
1153 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1154 n1, n2 = nw//2, nw//2 + nspec - nw
1155 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1156 amp_smoothed[:n1] = amp_smoothed[n1]
1157 amp_smoothed[n2:] = amp_smoothed[n2-1]
1159 denom = amp_smoothed * amp
1160 numer = amp
1161 eps = num.mean(denom) * 1e-9
1162 if eps == 0.0:
1163 eps = 1e-9
1165 numer += eps
1166 denom += eps
1167 spec *= numer/denom
1169 if fd_taper:
1170 fd_taper(spec, 0., df)
1172 ydata = num.fft.irfft(spec)
1173 self.set_ydata(ydata[:ndata])
1175 def _get_cached_freqs(self, nf, deltaf):
1176 ck = (nf, deltaf)
1177 if ck not in Trace.cached_frequencies:
1178 Trace.cached_frequencies[ck] = deltaf * num.arange(
1179 nf, dtype=float)
1181 return Trace.cached_frequencies[ck]
1183 def bandpass_fft(self, corner_hp, corner_lp):
1184 '''
1185 Apply boxcar bandbpass to trace (in spectral domain).
1186 '''
1188 n = len(self.ydata)
1189 n2 = nextpow2(n)
1190 data = num.zeros(n2, dtype=num.float64)
1191 data[:n] = self.ydata
1192 fdata = num.fft.rfft(data)
1193 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1194 fdata[0] = 0.0
1195 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1196 data = num.fft.irfft(fdata)
1197 self.drop_growbuffer()
1198 self.ydata = data[:n]
1200 def shift(self, tshift):
1201 '''
1202 Time shift the trace.
1203 '''
1205 self.tmin += tshift
1206 self.tmax += tshift
1207 self._update_ids()
1209 def snap(self, inplace=True, interpolate=False):
1210 '''
1211 Shift trace samples to nearest even multiples of the sampling rate.
1213 :param inplace: (boolean) snap traces inplace
1215 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1216 both, the snapped and the original trace is smaller than 0.01 x deltat,
1217 :py:func:`snap` returns the unsnapped instance of the original trace.
1218 '''
1220 tmin = round(self.tmin/self.deltat)*self.deltat
1221 tmax = tmin + (self.ydata.size-1)*self.deltat
1223 if inplace:
1224 xself = self
1225 else:
1226 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1227 abs(self.tmax - tmax) < 1e-2*self.deltat:
1228 return self
1230 xself = self.copy()
1232 if interpolate:
1233 from pyrocko import signal_ext
1234 n = xself.data_len()
1235 ydata_new = num.empty(n, dtype=float)
1236 i_control = num.array([0, n-1], dtype=num.int64)
1237 tref = tmin
1238 t_control = num.array(
1239 [float(xself.tmin-tref), float(xself.tmax-tref)],
1240 dtype=float)
1242 signal_ext.antidrift(i_control, t_control,
1243 xself.ydata.astype(float),
1244 float(tmin-tref), xself.deltat, ydata_new)
1246 xself.ydata = ydata_new
1248 xself.tmin = tmin
1249 xself.tmax = tmax
1250 xself._update_ids()
1252 return xself
1254 def fix_deltat_rounding_errors(self):
1255 '''
1256 Try to undo sampling rate rounding errors.
1258 See :py:func:`fix_deltat_rounding_errors`.
1259 '''
1261 self.deltat = fix_deltat_rounding_errors(self.deltat)
1262 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1264 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1265 '''
1266 Run special STA/LTA filter where the short time window is centered on
1267 the long time window.
1269 :param tshort: length of short time window in [s]
1270 :param tlong: length of long time window in [s]
1271 :param quad: whether to square the data prior to applying the STA/LTA
1272 filter
1273 :param scalingmethod: integer key to select how output values are
1274 scaled / normalized (``1``, ``2``, or ``3``)
1276 ============= ====================================== ===========
1277 Scalingmethod Implementation Range
1278 ============= ====================================== ===========
1279 ``1`` As/Al* Ts/Tl [0,1]
1280 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1281 ``3`` Like ``2`` but clipping range at zero [0,1]
1282 ============= ====================================== ===========
1284 '''
1286 nshort = int(round(tshort/self.deltat))
1287 nlong = int(round(tlong/self.deltat))
1289 assert nshort < nlong
1290 if nlong > len(self.ydata):
1291 raise TraceTooShort(
1292 'Samples in trace: %s, samples needed: %s'
1293 % (len(self.ydata), nlong))
1295 if quad:
1296 sqrdata = self.ydata**2
1297 else:
1298 sqrdata = self.ydata
1300 mavg_short = moving_avg(sqrdata, nshort)
1301 mavg_long = moving_avg(sqrdata, nlong)
1303 self.drop_growbuffer()
1305 if scalingmethod not in (1, 2, 3):
1306 raise Exception('Invalid argument to scalingrange argument.')
1308 if scalingmethod == 1:
1309 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1310 elif scalingmethod in (2, 3):
1311 self.ydata = (mavg_short/mavg_long - 1.) \
1312 / ((float(nlong)/float(nshort)) - 1)
1314 if scalingmethod == 3:
1315 self.ydata = num.maximum(self.ydata, 0.)
1317 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1318 '''
1319 Run special STA/LTA filter where the short time window is overlapping
1320 with the last part of the long time window.
1322 :param tshort: length of short time window in [s]
1323 :param tlong: length of long time window in [s]
1324 :param quad: whether to square the data prior to applying the STA/LTA
1325 filter
1326 :param scalingmethod: integer key to select how output values are
1327 scaled / normalized (``1``, ``2``, or ``3``)
1329 ============= ====================================== ===========
1330 Scalingmethod Implementation Range
1331 ============= ====================================== ===========
1332 ``1`` As/Al* Ts/Tl [0,1]
1333 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1334 ``3`` Like ``2`` but clipping range at zero [0,1]
1335 ============= ====================================== ===========
1337 With ``scalingmethod=1``, the values produced by this variant of the
1338 STA/LTA are equivalent to
1340 .. math::
1341 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1342 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1344 where :math:`f_j` are the input samples, :math:`s` are the number of
1345 samples in the short time window and :math:`l` are the number of
1346 samples in the long time window.
1347 '''
1349 n = self.data_len()
1350 tmin = self.tmin
1352 nshort = max(1, int(round(tshort/self.deltat)))
1353 nlong = max(1, int(round(tlong/self.deltat)))
1355 assert nshort < nlong
1357 if nlong > len(self.ydata):
1358 raise TraceTooShort(
1359 'Samples in trace: %s, samples needed: %s'
1360 % (len(self.ydata), nlong))
1362 if quad:
1363 sqrdata = self.ydata**2
1364 else:
1365 sqrdata = self.ydata
1367 nshift = int(math.floor(0.5 * (nlong - nshort)))
1368 if nlong % 2 != 0 and nshort % 2 == 0:
1369 nshift += 1
1371 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1372 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1374 self.drop_growbuffer()
1376 if scalingmethod not in (1, 2, 3):
1377 raise Exception('Invalid argument to scalingrange argument.')
1379 if scalingmethod == 1:
1380 ydata = mavg_short/mavg_long * nshort/nlong
1381 elif scalingmethod in (2, 3):
1382 ydata = (mavg_short/mavg_long - 1.) \
1383 / ((float(nlong)/float(nshort)) - 1)
1385 if scalingmethod == 3:
1386 ydata = num.maximum(self.ydata, 0.)
1388 self.set_ydata(ydata)
1390 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1392 self.chop(
1393 tmin + (nlong - nshort) * self.deltat,
1394 tmin + (n - nshort) * self.deltat)
1396 def peaks(self, threshold, tsearch,
1397 deadtime=False,
1398 nblock_duration_detection=100):
1400 '''
1401 Detect peaks above a given threshold (method 1).
1403 From every instant, where the signal rises above ``threshold``, a time
1404 length of ``tsearch`` seconds is searched for a maximum. A list with
1405 tuples (time, value) for each detected peak is returned. The
1406 ``deadtime`` argument turns on a special deadtime duration detection
1407 algorithm useful in combination with recursive STA/LTA filters.
1408 '''
1410 y = self.ydata
1411 above = num.where(y > threshold, 1, 0)
1412 deriv = num.zeros(y.size, dtype=num.int8)
1413 deriv[1:] = above[1:]-above[:-1]
1414 itrig_positions = num.nonzero(deriv > 0)[0]
1415 tpeaks = []
1416 apeaks = []
1417 tzeros = []
1418 tzero = self.tmin
1420 for itrig_pos in itrig_positions:
1421 ibeg = itrig_pos
1422 iend = min(
1423 len(self.ydata),
1424 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1425 ipeak = num.argmax(y[ibeg:iend])
1426 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1427 apeak = y[ibeg+ipeak]
1429 if tpeak < tzero:
1430 continue
1432 if deadtime:
1433 ibeg = itrig_pos
1434 iblock = 0
1435 nblock = nblock_duration_detection
1436 totalsum = 0.
1437 while True:
1438 if ibeg+iblock*nblock >= len(y):
1439 tzero = self.tmin + (len(y)-1) * self.deltat
1440 break
1442 logy = num.log(
1443 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1444 logy[0] += totalsum
1445 ysum = num.cumsum(logy)
1446 totalsum = ysum[-1]
1447 below = num.where(ysum <= 0., 1, 0)
1448 deriv = num.zeros(ysum.size, dtype=num.int8)
1449 deriv[1:] = below[1:]-below[:-1]
1450 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1451 if len(izero_positions) > 0:
1452 tzero = self.tmin + self.deltat * (
1453 ibeg + izero_positions[0])
1454 break
1455 iblock += 1
1456 else:
1457 tzero = ibeg*self.deltat + self.tmin + tsearch
1459 tpeaks.append(tpeak)
1460 apeaks.append(apeak)
1461 tzeros.append(tzero)
1463 if deadtime:
1464 return tpeaks, apeaks, tzeros
1465 else:
1466 return tpeaks, apeaks
1468 def peaks2(self, threshold, tsearch):
1470 '''
1471 Detect peaks above a given threshold (method 2).
1473 This variant of peak detection is a bit more robust (and slower) than
1474 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1475 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1476 iteratively the one with the maximum amplitude ``a[j]`` and time
1477 ``t[j]`` is choosen and potential peaks within
1478 ``t[j] - tsearch, t[j] + tsearch``
1479 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1480 no more potential peaks are left.
1481 '''
1483 a = num.copy(self.ydata)
1485 amin = num.min(a)
1487 a[0] = amin
1488 a[1: -1][num.diff(a, 2) <= 0.] = amin
1489 a[-1] = amin
1491 data = []
1492 while True:
1493 imax = num.argmax(a)
1494 amax = a[imax]
1496 if amax < threshold or amax == amin:
1497 break
1499 data.append((self.tmin + imax * self.deltat, amax))
1501 ntsearch = int(round(tsearch / self.deltat))
1502 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1504 if data:
1505 data.sort()
1506 tpeaks, apeaks = list(zip(*data))
1507 else:
1508 tpeaks, apeaks = [], []
1510 return tpeaks, apeaks
1512 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1513 '''
1514 Extend trace to given span.
1516 :param tmin: begin time of new span
1517 :param tmax: end time of new span
1518 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1519 ``'median'``
1520 '''
1522 nold = self.ydata.size
1524 if tmin is not None:
1525 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1526 else:
1527 nl = 0
1529 if tmax is not None:
1530 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1531 else:
1532 nh = nold - 1
1534 n = nh - nl + 1
1535 data = num.zeros(n, dtype=self.ydata.dtype)
1536 data[-nl:-nl + nold] = self.ydata
1537 if self.ydata.size >= 1:
1538 if fillmethod == 'repeat':
1539 data[:-nl] = self.ydata[0]
1540 data[-nl + nold:] = self.ydata[-1]
1541 elif fillmethod == 'median':
1542 v = num.median(self.ydata)
1543 data[:-nl] = v
1544 data[-nl + nold:] = v
1545 elif fillmethod == 'mean':
1546 v = num.mean(self.ydata)
1547 data[:-nl] = v
1548 data[-nl + nold:] = v
1550 self.drop_growbuffer()
1551 self.ydata = data
1553 self.tmin += nl * self.deltat
1554 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1556 self._update_ids()
1558 def transfer(self,
1559 tfade=0.,
1560 freqlimits=None,
1561 transfer_function=None,
1562 cut_off_fading=True,
1563 demean=True,
1564 invert=False):
1566 '''
1567 Return new trace with transfer function applied (convolution).
1569 :param tfade: rise/fall time in seconds of taper applied in timedomain
1570 at both ends of trace.
1571 :param freqlimits: 4-tuple with corner frequencies in Hz.
1572 :param transfer_function: FrequencyResponse object; must provide a
1573 method 'evaluate(freqs)', which returns the transfer function
1574 coefficients at the frequencies 'freqs'.
1575 :param cut_off_fading: whether to cut off rise/fall interval in output
1576 trace.
1577 :param demean: remove mean before applying transfer function
1578 :param invert: set to True to do a deconvolution
1579 '''
1581 if transfer_function is None:
1582 transfer_function = FrequencyResponse()
1584 if self.tmax - self.tmin <= tfade*2.:
1585 raise TraceTooShort(
1586 'Trace %s.%s.%s.%s too short for fading length setting. '
1587 'trace length = %g, fading length = %g'
1588 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1590 if freqlimits is None and (
1591 transfer_function is None or transfer_function.is_scalar()):
1593 # special case for flat responses
1595 output = self.copy()
1596 data = self.ydata
1597 ndata = data.size
1599 if transfer_function is not None:
1600 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1602 if invert:
1603 c = 1.0/c
1605 data *= c
1607 if tfade != 0.0:
1608 data *= costaper(
1609 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1610 ndata, self.deltat)
1612 output.ydata = data
1614 else:
1615 ndata = self.ydata.size
1616 ntrans = nextpow2(ndata*1.2)
1617 coefs = self._get_tapered_coefs(
1618 ntrans, freqlimits, transfer_function, invert=invert)
1620 data = self.ydata
1622 data_pad = num.zeros(ntrans, dtype=float)
1623 data_pad[:ndata] = data
1624 if demean:
1625 data_pad[:ndata] -= data.mean()
1627 if tfade != 0.0:
1628 data_pad[:ndata] *= costaper(
1629 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1630 ndata, self.deltat)
1632 fdata = num.fft.rfft(data_pad)
1633 fdata *= coefs
1634 ddata = num.fft.irfft(fdata)
1635 output = self.copy()
1636 output.ydata = ddata[:ndata]
1638 if cut_off_fading and tfade != 0.0:
1639 try:
1640 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1641 except NoData:
1642 raise TraceTooShort(
1643 'Trace %s.%s.%s.%s too short for fading length setting. '
1644 'trace length = %g, fading length = %g'
1645 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1646 else:
1647 output.ydata = output.ydata.copy()
1649 return output
1651 def differentiate(self, n=1, order=4, inplace=True):
1652 '''
1653 Approximate first or second derivative of the trace.
1655 :param n: 1 for first derivative, 2 for second
1656 :param order: order of the approximation 2 and 4 are supported
1657 :param inplace: if ``True`` the trace is differentiated in place,
1658 otherwise a new trace object with the derivative is returned.
1660 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1662 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1663 '''
1665 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1667 if inplace:
1668 self.ydata = ddata
1669 else:
1670 output = self.copy(data=False)
1671 output.set_ydata(ddata)
1672 return output
1674 def drop_chain_cache(self):
1675 if self._pchain:
1676 self._pchain.clear()
1678 def init_chain(self):
1679 self._pchain = pchain.Chain(
1680 do_downsample,
1681 do_extend,
1682 do_pre_taper,
1683 do_fft,
1684 do_filter,
1685 do_ifft)
1687 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1688 if setup.domain == 'frequency_domain':
1689 _, _, data = self._pchain(
1690 (self, deltat),
1691 (tmin, tmax),
1692 (setup.taper,),
1693 (setup.filter,),
1694 (setup.filter,),
1695 nocache=nocache)
1697 return num.abs(data), num.abs(data)
1699 else:
1700 processed = self._pchain(
1701 (self, deltat),
1702 (tmin, tmax),
1703 (setup.taper,),
1704 (setup.filter,),
1705 (setup.filter,),
1706 (),
1707 nocache=nocache)
1709 if setup.domain == 'time_domain':
1710 data = processed.get_ydata()
1712 elif setup.domain == 'envelope':
1713 processed = processed.envelope(inplace=False)
1715 elif setup.domain == 'absolute':
1716 processed.set_ydata(num.abs(processed.get_ydata()))
1718 return processed.get_ydata(), processed
1720 def misfit(self, candidate, setup, nocache=False, debug=False):
1721 '''
1722 Calculate misfit and normalization factor against candidate trace.
1724 :param candidate: :py:class:`Trace` object
1725 :param setup: :py:class:`MisfitSetup` object
1726 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1727 normalization divisor
1729 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1730 with the higher sampling rate will be downsampled.
1731 '''
1733 a = self
1734 b = candidate
1736 for tr in (a, b):
1737 if not tr._pchain:
1738 tr.init_chain()
1740 deltat = max(a.deltat, b.deltat)
1741 tmin = min(a.tmin, b.tmin) - deltat
1742 tmax = max(a.tmax, b.tmax) + deltat
1744 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1745 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1747 if setup.domain != 'cc_max_norm':
1748 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1749 else:
1750 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1751 ccmax = ctr.max()[1]
1752 m = 0.5 - 0.5 * ccmax
1753 n = 0.5
1755 if debug:
1756 return m, n, aproc, bproc
1757 else:
1758 return m, n
1760 def spectrum(self, pad_to_pow2=False, tfade=None):
1761 '''
1762 Get FFT spectrum of trace.
1764 :param pad_to_pow2: whether to zero-pad the data to next larger
1765 power-of-two length
1766 :param tfade: ``None`` or a time length in seconds, to apply cosine
1767 shaped tapers to both
1769 :returns: a tuple with (frequencies, values)
1770 '''
1772 ndata = self.ydata.size
1774 if pad_to_pow2:
1775 ntrans = nextpow2(ndata)
1776 else:
1777 ntrans = ndata
1779 if tfade is None:
1780 ydata = self.ydata
1781 else:
1782 ydata = self.ydata * costaper(
1783 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1784 ndata, self.deltat)
1786 fydata = num.fft.rfft(ydata, ntrans)
1787 df = 1./(ntrans*self.deltat)
1788 fxdata = num.arange(len(fydata))*df
1789 return fxdata, fydata
1791 def multi_filter(self, filter_freqs, bandwidth):
1793 class Gauss(FrequencyResponse):
1794 f0 = Float.T()
1795 a = Float.T(default=1.0)
1797 def __init__(self, f0, a=1.0, **kwargs):
1798 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1800 def evaluate(self, freqs):
1801 omega0 = 2.*math.pi*self.f0
1802 omega = 2.*math.pi*freqs
1803 return num.exp(-((omega-omega0)
1804 / (self.a*omega0))**2)
1806 freqs, coefs = self.spectrum()
1807 n = self.data_len()
1808 nfilt = len(filter_freqs)
1809 signal_tf = num.zeros((nfilt, n))
1810 centroid_freqs = num.zeros(nfilt)
1811 for ifilt, f0 in enumerate(filter_freqs):
1812 taper = Gauss(f0, a=bandwidth)
1813 weights = taper.evaluate(freqs)
1814 nhalf = freqs.size
1815 analytic_spec = num.zeros(n, dtype=complex)
1816 analytic_spec[:nhalf] = coefs*weights
1818 enorm = num.abs(analytic_spec[:nhalf])**2
1819 enorm /= num.sum(enorm)
1821 if n % 2 == 0:
1822 analytic_spec[1:nhalf-1] *= 2.
1823 else:
1824 analytic_spec[1:nhalf] *= 2.
1826 analytic = num.fft.ifft(analytic_spec)
1827 signal_tf[ifilt, :] = num.abs(analytic)
1829 enorm = num.abs(analytic_spec[:nhalf])**2
1830 enorm /= num.sum(enorm)
1831 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1833 return centroid_freqs, signal_tf
1835 def _get_tapered_coefs(
1836 self, ntrans, freqlimits, transfer_function, invert=False):
1838 deltaf = 1./(self.deltat*ntrans)
1839 nfreqs = ntrans//2 + 1
1840 transfer = num.ones(nfreqs, dtype=complex)
1841 hi = snapper(nfreqs, deltaf)
1842 if freqlimits is not None:
1843 a, b, c, d = freqlimits
1844 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1845 + hi(a)*deltaf
1847 if invert:
1848 coeffs = transfer_function.evaluate(freqs)
1849 if num.any(coeffs == 0.0):
1850 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1852 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1853 else:
1854 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1856 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1857 else:
1858 if invert:
1859 raise Exception(
1860 'transfer: `freqlimits` must be given when `invert` is '
1861 'set to `True`')
1863 freqs = num.arange(nfreqs) * deltaf
1864 tapered_transfer = transfer_function.evaluate(freqs)
1866 tapered_transfer[0] = 0.0 # don't introduce static offsets
1867 return tapered_transfer
1869 def fill_template(self, template, **additional):
1870 '''
1871 Fill string template with trace metadata.
1873 Uses normal python '%(placeholder)s' string templates. The following
1874 placeholders are considered: ``network``, ``station``, ``location``,
1875 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1876 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1877 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1878 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1879 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1880 microseconds, those with ``'_year'`` contain only the year.
1881 '''
1883 template = template.replace('%n', '%(network)s')\
1884 .replace('%s', '%(station)s')\
1885 .replace('%l', '%(location)s')\
1886 .replace('%c', '%(channel)s')\
1887 .replace('%b', '%(tmin)s')\
1888 .replace('%e', '%(tmax)s')\
1889 .replace('%j', '%(julianday)s')
1891 params = dict(
1892 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1893 params['tmin'] = util.time_to_str(
1894 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1895 params['tmax'] = util.time_to_str(
1896 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1897 params['tmin_ms'] = util.time_to_str(
1898 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1899 params['tmax_ms'] = util.time_to_str(
1900 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1901 params['tmin_us'] = util.time_to_str(
1902 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1903 params['tmax_us'] = util.time_to_str(
1904 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1905 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1906 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1907 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1908 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1909 params['julianday'] = util.julian_day_of_year(self.tmin)
1910 params.update(additional)
1911 return template % params
1913 def plot(self):
1914 '''
1915 Show trace with matplotlib.
1917 See also: :py:meth:`Trace.snuffle`.
1918 '''
1920 import pylab
1921 pylab.plot(self.get_xdata(), self.get_ydata())
1922 name = '%s %s %s - %s' % (
1923 self.channel,
1924 self.station,
1925 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)),
1926 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax)))
1928 pylab.title(name)
1929 pylab.show()
1931 def snuffle(self, **kwargs):
1932 '''
1933 Show trace in a snuffler window.
1935 :param stations: list of `pyrocko.model.Station` objects or ``None``
1936 :param events: list of `pyrocko.model.Event` objects or ``None``
1937 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1938 :param ntracks: float, number of tracks to be shown initially (default:
1939 12)
1940 :param follow: time interval (in seconds) for real time follow mode or
1941 ``None``
1942 :param controls: bool, whether to show the main controls (default:
1943 ``True``)
1944 :param opengl: bool, whether to use opengl (default: ``False``)
1945 '''
1947 return snuffle([self], **kwargs)
1950def snuffle(traces, **kwargs):
1951 '''
1952 Show traces in a snuffler window.
1954 :param stations: list of `pyrocko.model.Station` objects or ``None``
1955 :param events: list of `pyrocko.model.Event` objects or ``None``
1956 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1957 :param ntracks: float, number of tracks to be shown initially (default: 12)
1958 :param follow: time interval (in seconds) for real time follow mode or
1959 ``None``
1960 :param controls: bool, whether to show the main controls (default:
1961 ``True``)
1962 :param opengl: bool, whether to use opengl (default: ``False``)
1963 '''
1965 from pyrocko import pile
1966 from pyrocko.gui import snuffler
1967 p = pile.Pile()
1968 if traces:
1969 trf = pile.MemTracesFile(None, traces)
1970 p.add_file(trf)
1971 return snuffler.snuffle(p, **kwargs)
1974class InfiniteResponse(Exception):
1975 '''
1976 This exception is raised by :py:class:`Trace` operations when deconvolution
1977 of a frequency response (instrument response transfer function) would
1978 result in a division by zero.
1979 '''
1982class MisalignedTraces(Exception):
1983 '''
1984 This exception is raised by some :py:class:`Trace` operations when tmin,
1985 tmax or number of samples do not match.
1986 '''
1988 pass
1991class NoData(Exception):
1992 '''
1993 This exception is raised by some :py:class:`Trace` operations when no or
1994 not enough data is available.
1995 '''
1997 pass
2000class AboveNyquist(Exception):
2001 '''
2002 This exception is raised by some :py:class:`Trace` operations when given
2003 frequencies are above the Nyquist frequency.
2004 '''
2006 pass
2009class TraceTooShort(Exception):
2010 '''
2011 This exception is raised by some :py:class:`Trace` operations when the
2012 trace is too short.
2013 '''
2015 pass
2018class ResamplingFailed(Exception):
2019 pass
2022def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2024 '''
2025 Get data range given traces grouped by selected pattern.
2027 :param key: a callable which takes as single argument a trace and returns a
2028 key for the grouping of the results. If this is ``None``, the default,
2029 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2030 used.
2031 :param mode: ``'minmax'`` or floating point number. If this is
2032 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2033 number, mean +- standard deviation times ``mode`` is used.
2035 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2036 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2037 extreme values on either end.
2039 :returns: a dict with the combined data ranges.
2041 Examples::
2043 ranges = minmax(traces, lambda tr: tr.channel)
2044 print ranges['N'] # print min & max of all traces with channel == 'N'
2045 print ranges['E'] # print min & max of all traces with channel == 'E'
2047 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2048 print ranges['GR', 'HAM3'] # print min & max of all traces with
2049 # network == 'GR' and station == 'HAM3'
2051 ranges = minmax(traces, lambda tr: None)
2052 print ranges[None] # prints min & max of all traces
2053 '''
2055 if key is None:
2056 key = _default_key
2058 ranges = defaultdict(list)
2059 for trace in traces:
2060 if isinstance(mode, str) and mode == 'minmax':
2061 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2062 else:
2063 mean = trace.ydata.mean()
2064 std = trace.ydata.std()
2065 mi, ma = mean-std*mode, mean+std*mode
2067 k = key(trace)
2068 ranges[k].append((mi, ma))
2070 for k in ranges:
2071 mins, maxs = num.array(ranges[k]).T
2072 if outer_mode == 'minmax':
2073 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2074 elif outer_mode == 'robust':
2075 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2077 return ranges
2080def minmaxtime(traces, key=None):
2082 '''
2083 Get time range given traces grouped by selected pattern.
2085 :param key: a callable which takes as single argument a trace and returns a
2086 key for the grouping of the results. If this is ``None``, the default,
2087 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2088 used.
2090 :returns: a dict with the combined data ranges.
2091 '''
2093 if key is None:
2094 key = _default_key
2096 ranges = {}
2097 for trace in traces:
2098 mi, ma = trace.tmin, trace.tmax
2099 k = key(trace)
2100 if k not in ranges:
2101 ranges[k] = mi, ma
2102 else:
2103 tmi, tma = ranges[k]
2104 ranges[k] = min(tmi, mi), max(tma, ma)
2106 return ranges
2109def degapper(
2110 traces,
2111 maxgap=5,
2112 fillmethod='interpolate',
2113 deoverlap='use_second',
2114 maxlap=None):
2116 '''
2117 Try to connect traces and remove gaps.
2119 This method will combine adjacent traces, which match in their network,
2120 station, location and channel attributes. Overlapping parts are handled
2121 according to the ``deoverlap`` argument.
2123 :param traces: input traces, must be sorted by their full_id attribute.
2124 :param maxgap: maximum number of samples to interpolate.
2125 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2126 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2127 second trace (default), 'use_first' to use data from first trace,
2128 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2129 values.
2130 :param maxlap: maximum number of samples of overlap which are removed
2132 :returns: list of traces
2133 '''
2135 in_traces = traces
2136 out_traces = []
2137 if not in_traces:
2138 return out_traces
2139 out_traces.append(in_traces.pop(0))
2140 while in_traces:
2142 a = out_traces[-1]
2143 b = in_traces.pop(0)
2145 avirt, bvirt = a.ydata is None, b.ydata is None
2146 assert avirt == bvirt, \
2147 'traces given to degapper() must either all have data or have ' \
2148 'no data.'
2150 virtual = avirt and bvirt
2152 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2153 and a.data_len() >= 1 and b.data_len() >= 1
2154 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2156 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2157 idist = int(round(dist))
2158 if abs(dist - idist) > 0.05 and idist <= maxgap:
2159 # logger.warning('Cannot degap traces with displaced sampling '
2160 # '(%s, %s, %s, %s)' % a.nslc_id)
2161 pass
2162 else:
2163 if 1 < idist <= maxgap:
2164 if not virtual:
2165 if fillmethod == 'interpolate':
2166 filler = a.ydata[-1] + (
2167 ((1.0 + num.arange(idist-1, dtype=float))
2168 / idist) * (b.ydata[0]-a.ydata[-1])
2169 ).astype(a.ydata.dtype)
2170 elif fillmethod == 'zeros':
2171 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2172 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2173 a.tmax = b.tmax
2174 if a.mtime and b.mtime:
2175 a.mtime = max(a.mtime, b.mtime)
2176 continue
2178 elif idist == 1:
2179 if not virtual:
2180 a.ydata = num.concatenate((a.ydata, b.ydata))
2181 a.tmax = b.tmax
2182 if a.mtime and b.mtime:
2183 a.mtime = max(a.mtime, b.mtime)
2184 continue
2186 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2187 if b.tmax > a.tmax:
2188 if not virtual:
2189 na = a.ydata.size
2190 n = -idist+1
2191 if deoverlap == 'use_second':
2192 a.ydata = num.concatenate(
2193 (a.ydata[:-n], b.ydata))
2194 elif deoverlap in ('use_first', 'crossfade_cos'):
2195 a.ydata = num.concatenate(
2196 (a.ydata, b.ydata[n:]))
2197 elif deoverlap == 'add':
2198 a.ydata[-n:] += b.ydata[:n]
2199 a.ydata = num.concatenate(
2200 (a.ydata, b.ydata[n:]))
2201 else:
2202 assert False, 'unknown deoverlap method'
2204 if deoverlap == 'crossfade_cos':
2205 n = -idist+1
2206 taper = 0.5-0.5*num.cos(
2207 (1.+num.arange(n))/(1.+n)*num.pi)
2208 a.ydata[na-n:na] *= 1.-taper
2209 a.ydata[na-n:na] += b.ydata[:n] * taper
2211 a.tmax = b.tmax
2212 if a.mtime and b.mtime:
2213 a.mtime = max(a.mtime, b.mtime)
2214 continue
2215 else:
2216 # make short second trace vanish
2217 continue
2219 if b.data_len() >= 1:
2220 out_traces.append(b)
2222 for tr in out_traces:
2223 tr._update_ids()
2225 return out_traces
2228def rotate(traces, azimuth, in_channels, out_channels):
2229 '''
2230 2D rotation of traces.
2232 :param traces: list of input traces
2233 :param azimuth: difference of the azimuths of the component directions
2234 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2235 :param in_channels: names of the input channels (e.g. 'N', 'E')
2236 :param out_channels: names of the output channels (e.g. 'R', 'T')
2237 :returns: list of rotated traces
2238 '''
2240 phi = azimuth/180.*math.pi
2241 cphi = math.cos(phi)
2242 sphi = math.sin(phi)
2243 rotated = []
2244 in_channels = tuple(_channels_to_names(in_channels))
2245 out_channels = tuple(_channels_to_names(out_channels))
2246 for a in traces:
2247 for b in traces:
2248 if ((a.channel, b.channel) == in_channels and
2249 a.nslc_id[:3] == b.nslc_id[:3] and
2250 abs(a.deltat-b.deltat) < a.deltat*0.001):
2251 tmin = max(a.tmin, b.tmin)
2252 tmax = min(a.tmax, b.tmax)
2254 if tmin < tmax:
2255 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2256 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2257 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2258 logger.warning(
2259 'Cannot rotate traces with displaced sampling '
2260 '(%s, %s, %s, %s)' % a.nslc_id)
2261 continue
2263 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2264 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2265 ac.set_ydata(acydata)
2266 bc.set_ydata(bcydata)
2268 ac.set_codes(channel=out_channels[0])
2269 bc.set_codes(channel=out_channels[1])
2270 rotated.append(ac)
2271 rotated.append(bc)
2273 return rotated
2276def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2277 '''
2278 Rotate traces from NE to RT system.
2280 :param n:
2281 North trace.
2282 :type n:
2283 :py:class:`~pyrocko.trace.Trace`
2285 :param e:
2286 East trace.
2287 :type e:
2288 :py:class:`~pyrocko.trace.Trace`
2290 :param source:
2291 Source of the recorded signal.
2292 :type source:
2293 :py:class:`pyrocko.gf.seismosizer.Source`
2295 :param receiver:
2296 Receiver of the recorded signal.
2297 :type receiver:
2298 :py:class:`pyrocko.model.location.Location`
2300 :param out_channels:
2301 Channel codes of the output channels (radial, transversal).
2302 Default is ('R', 'T').
2303 .
2304 :type out_channels
2305 optional, tuple[str, str]
2307 :returns:
2308 Rotated traces (radial, transversal).
2309 :rtype:
2310 tuple[
2311 :py:class:`~pyrocko.trace.Trace`,
2312 :py:class:`~pyrocko.trace.Trace`]
2313 '''
2314 azimuth = orthodrome.azimuth(receiver, source) + 180.
2315 in_channels = n.channel, e.channel
2316 out = rotate(
2317 [n, e], azimuth,
2318 in_channels=in_channels,
2319 out_channels=out_channels)
2321 assert len(out) == 2
2322 for tr in out:
2323 if tr.channel == out_channels[0]:
2324 r = tr
2325 elif tr.channel == out_channels[1]:
2326 t = tr
2327 else:
2328 assert False
2330 return r, t
2333def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2334 out_channels=('L', 'Q', 'T')):
2335 '''
2336 Rotate traces from ZNE to LQT system.
2338 :param traces: list of traces in arbitrary order
2339 :param backazimuth: backazimuth in degrees clockwise from north
2340 :param incidence: incidence angle in degrees from vertical
2341 :param in_channels: input channel names
2342 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2343 :returns: list of transformed traces
2344 '''
2345 i = incidence/180.*num.pi
2346 b = backazimuth/180.*num.pi
2348 ci = num.cos(i)
2349 cb = num.cos(b)
2350 si = num.sin(i)
2351 sb = num.sin(b)
2353 rotmat = num.array(
2354 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2355 return project(traces, rotmat, in_channels, out_channels)
2358def _decompose(a):
2359 '''
2360 Decompose matrix into independent submatrices.
2361 '''
2363 def depends(iout, a):
2364 row = a[iout, :]
2365 return set(num.arange(row.size).compress(row != 0.0))
2367 def provides(iin, a):
2368 col = a[:, iin]
2369 return set(num.arange(col.size).compress(col != 0.0))
2371 a = num.asarray(a)
2372 outs = set(range(a.shape[0]))
2373 systems = []
2374 while outs:
2375 iout = outs.pop()
2377 gout = set()
2378 for iin in depends(iout, a):
2379 gout.update(provides(iin, a))
2381 if not gout:
2382 continue
2384 gin = set()
2385 for iout2 in gout:
2386 gin.update(depends(iout2, a))
2388 if not gin:
2389 continue
2391 for iout2 in gout:
2392 if iout2 in outs:
2393 outs.remove(iout2)
2395 gin = list(gin)
2396 gin.sort()
2397 gout = list(gout)
2398 gout.sort()
2400 systems.append((gin, gout, a[gout, :][:, gin]))
2402 return systems
2405def _channels_to_names(channels):
2406 names = []
2407 for ch in channels:
2408 if isinstance(ch, model.Channel):
2409 names.append(ch.name)
2410 else:
2411 names.append(ch)
2412 return names
2415def project(traces, matrix, in_channels, out_channels):
2416 '''
2417 Affine transform of three-component traces.
2419 Compute matrix-vector product of three-component traces, to e.g. rotate
2420 traces into a different basis. The traces are distinguished and ordered by
2421 their channel attribute. The tranform is applied to overlapping parts of
2422 any appropriate combinations of the input traces. This should allow this
2423 function to be robust with data gaps. It also tries to apply the
2424 tranformation to subsets of the channels, if this is possible, so that, if
2425 for example a vertical compontent is missing, horizontal components can
2426 still be rotated.
2428 :param traces: list of traces in arbitrary order
2429 :param matrix: tranformation matrix
2430 :param in_channels: input channel names
2431 :param out_channels: output channel names
2432 :returns: list of transformed traces
2433 '''
2435 in_channels = tuple(_channels_to_names(in_channels))
2436 out_channels = tuple(_channels_to_names(out_channels))
2437 systems = _decompose(matrix)
2439 # fallback to full matrix if some are not quadratic
2440 for iins, iouts, submatrix in systems:
2441 if submatrix.shape[0] != submatrix.shape[1]:
2442 return _project3(traces, matrix, in_channels, out_channels)
2444 projected = []
2445 for iins, iouts, submatrix in systems:
2446 in_cha = tuple([in_channels[iin] for iin in iins])
2447 out_cha = tuple([out_channels[iout] for iout in iouts])
2448 if submatrix.shape[0] == 1:
2449 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2450 elif submatrix.shape[1] == 2:
2451 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2452 else:
2453 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2455 return projected
2458def project_dependencies(matrix, in_channels, out_channels):
2459 '''
2460 Figure out what dependencies project() would produce.
2461 '''
2463 in_channels = tuple(_channels_to_names(in_channels))
2464 out_channels = tuple(_channels_to_names(out_channels))
2465 systems = _decompose(matrix)
2467 subpro = []
2468 for iins, iouts, submatrix in systems:
2469 if submatrix.shape[0] != submatrix.shape[1]:
2470 subpro.append((matrix, in_channels, out_channels))
2472 if not subpro:
2473 for iins, iouts, submatrix in systems:
2474 in_cha = tuple([in_channels[iin] for iin in iins])
2475 out_cha = tuple([out_channels[iout] for iout in iouts])
2476 subpro.append((submatrix, in_cha, out_cha))
2478 deps = {}
2479 for mat, in_cha, out_cha in subpro:
2480 for oc in out_cha:
2481 if oc not in deps:
2482 deps[oc] = []
2484 for ic in in_cha:
2485 deps[oc].append(ic)
2487 return deps
2490def _project1(traces, matrix, in_channels, out_channels):
2491 assert len(in_channels) == 1
2492 assert len(out_channels) == 1
2493 assert matrix.shape == (1, 1)
2495 projected = []
2496 for a in traces:
2497 if not (a.channel,) == in_channels:
2498 continue
2500 ac = a.copy()
2501 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2502 ac.set_codes(channel=out_channels[0])
2503 projected.append(ac)
2505 return projected
2508def _project2(traces, matrix, in_channels, out_channels):
2509 assert len(in_channels) == 2
2510 assert len(out_channels) == 2
2511 assert matrix.shape == (2, 2)
2512 projected = []
2513 for a in traces:
2514 for b in traces:
2515 if not ((a.channel, b.channel) == in_channels and
2516 a.nslc_id[:3] == b.nslc_id[:3] and
2517 abs(a.deltat-b.deltat) < a.deltat*0.001):
2518 continue
2520 tmin = max(a.tmin, b.tmin)
2521 tmax = min(a.tmax, b.tmax)
2523 if tmin > tmax:
2524 continue
2526 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2527 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2528 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2529 logger.warning(
2530 'Cannot project traces with displaced sampling '
2531 '(%s, %s, %s, %s)' % a.nslc_id)
2532 continue
2534 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2535 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2537 ac.set_ydata(acydata)
2538 bc.set_ydata(bcydata)
2540 ac.set_codes(channel=out_channels[0])
2541 bc.set_codes(channel=out_channels[1])
2543 projected.append(ac)
2544 projected.append(bc)
2546 return projected
2549def _project3(traces, matrix, in_channels, out_channels):
2550 assert len(in_channels) == 3
2551 assert len(out_channels) == 3
2552 assert matrix.shape == (3, 3)
2553 projected = []
2554 for a in traces:
2555 for b in traces:
2556 for c in traces:
2557 if not ((a.channel, b.channel, c.channel) == in_channels
2558 and a.nslc_id[:3] == b.nslc_id[:3]
2559 and b.nslc_id[:3] == c.nslc_id[:3]
2560 and abs(a.deltat-b.deltat) < a.deltat*0.001
2561 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2563 continue
2565 tmin = max(a.tmin, b.tmin, c.tmin)
2566 tmax = min(a.tmax, b.tmax, c.tmax)
2568 if tmin >= tmax:
2569 continue
2571 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2572 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2573 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2574 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2575 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2577 logger.warning(
2578 'Cannot project traces with displaced sampling '
2579 '(%s, %s, %s, %s)' % a.nslc_id)
2580 continue
2582 acydata = num.dot(
2583 matrix[0],
2584 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2585 bcydata = num.dot(
2586 matrix[1],
2587 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2588 ccydata = num.dot(
2589 matrix[2],
2590 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2592 ac.set_ydata(acydata)
2593 bc.set_ydata(bcydata)
2594 cc.set_ydata(ccydata)
2596 ac.set_codes(channel=out_channels[0])
2597 bc.set_codes(channel=out_channels[1])
2598 cc.set_codes(channel=out_channels[2])
2600 projected.append(ac)
2601 projected.append(bc)
2602 projected.append(cc)
2604 return projected
2607def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2608 '''
2609 Cross correlation of two traces.
2611 :param a,b: input traces
2612 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2613 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2614 :param use_fft: bool, whether to do cross correlation in spectral domain
2616 :returns: trace containing cross correlation coefficients
2618 This function computes the cross correlation between two traces. It
2619 evaluates the discrete equivalent of
2621 .. math::
2623 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2625 where the star denotes complex conjugate. Note, that the arguments here are
2626 swapped when compared with the :py:func:`numpy.correlate` function,
2627 which is internally called. This function should be safe even with older
2628 versions of NumPy, where the correlate function has some problems.
2630 A trace containing the cross correlation coefficients is returned. The time
2631 information of the output trace is set so that the returned cross
2632 correlation can be viewed directly as a function of time lag.
2634 Example::
2636 # align two traces a and b containing a time shifted similar signal:
2637 c = pyrocko.trace.correlate(a,b)
2638 t, coef = c.max() # get time and value of maximum
2639 b.shift(-t) # align b with a
2641 '''
2643 assert_same_sampling_rate(a, b)
2645 ya, yb = a.ydata, b.ydata
2647 # need reversed order here:
2648 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2649 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2651 if normalization == 'normal':
2652 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2653 yc = yc/normfac
2655 elif normalization == 'gliding':
2656 if mode != 'valid':
2657 assert False, 'gliding normalization currently only available ' \
2658 'with "valid" mode.'
2660 if ya.size < yb.size:
2661 yshort, ylong = ya, yb
2662 else:
2663 yshort, ylong = yb, ya
2665 epsilon = 0.00001
2666 normfac_short = num.sqrt(num.sum(yshort**2))
2667 normfac = normfac_short * num.sqrt(
2668 moving_sum(ylong**2, yshort.size, mode='valid')) \
2669 + normfac_short*epsilon
2671 if yb.size <= ya.size:
2672 normfac = normfac[::-1]
2674 yc /= normfac
2676 c = a.copy()
2677 c.set_ydata(yc)
2678 c.set_codes(*merge_codes(a, b, '~'))
2679 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2681 return c
2684def deconvolve(
2685 a, b, waterlevel,
2686 tshift=0.,
2687 pad=0.5,
2688 fd_taper=None,
2689 pad_to_pow2=True):
2691 same_sampling_rate(a, b)
2692 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2693 deltat = a.deltat
2694 npad = int(round(a.data_len()*pad + tshift / deltat))
2696 ndata = max(a.data_len(), b.data_len())
2697 ndata_pad = ndata + npad
2699 if pad_to_pow2:
2700 ntrans = nextpow2(ndata_pad)
2701 else:
2702 ntrans = ndata
2704 aspec = num.fft.rfft(a.ydata, ntrans)
2705 bspec = num.fft.rfft(b.ydata, ntrans)
2707 out = aspec * num.conj(bspec)
2709 bautocorr = bspec*num.conj(bspec)
2710 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2712 out /= denom
2713 df = 1/(ntrans*deltat)
2715 if fd_taper is not None:
2716 fd_taper(out, 0.0, df)
2718 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2719 c = a.copy(data=False)
2720 c.set_ydata(ydata[:ndata])
2721 c.set_codes(*merge_codes(a, b, '/'))
2722 return c
2725def assert_same_sampling_rate(a, b, eps=1.0e-6):
2726 assert same_sampling_rate(a, b, eps), \
2727 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2730def same_sampling_rate(a, b, eps=1.0e-6):
2731 '''
2732 Check if two traces have the same sampling rate.
2734 :param a,b: input traces
2735 :param eps: relative tolerance
2736 '''
2738 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2741def fix_deltat_rounding_errors(deltat):
2742 '''
2743 Try to undo sampling rate rounding errors.
2745 Fix rounding errors of sampling intervals when these are read from single
2746 precision floating point values.
2748 Assumes that the true sampling rate or sampling interval was an integer
2749 value. No correction will be applied if this would change the sampling
2750 rate by more than 0.001%.
2751 '''
2753 if deltat <= 1.0:
2754 deltat_new = 1.0 / round(1.0 / deltat)
2755 else:
2756 deltat_new = round(deltat)
2758 if abs(deltat_new - deltat) / deltat > 1e-5:
2759 deltat_new = deltat
2761 return deltat_new
2764def merge_codes(a, b, sep='-'):
2765 '''
2766 Merge network-station-location-channel codes of a pair of traces.
2767 '''
2769 o = []
2770 for xa, xb in zip(a.nslc_id, b.nslc_id):
2771 if xa == xb:
2772 o.append(xa)
2773 else:
2774 o.append(sep.join((xa, xb)))
2775 return o
2778class Taper(Object):
2779 '''
2780 Base class for tapers.
2782 Does nothing by default.
2783 '''
2785 def __call__(self, y, x0, dx):
2786 pass
2789class CosTaper(Taper):
2790 '''
2791 Cosine Taper.
2793 :param a: start of fading in
2794 :param b: end of fading in
2795 :param c: start of fading out
2796 :param d: end of fading out
2797 '''
2799 a = Float.T()
2800 b = Float.T()
2801 c = Float.T()
2802 d = Float.T()
2804 def __init__(self, a, b, c, d):
2805 Taper.__init__(self, a=a, b=b, c=c, d=d)
2807 def __call__(self, y, x0, dx):
2808 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2810 def span(self, y, x0, dx):
2811 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2813 def time_span(self):
2814 return self.a, self.d
2817class CosFader(Taper):
2818 '''
2819 Cosine Fader.
2821 :param xfade: fade in and fade out time in seconds (optional)
2822 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2824 Only one argument can be set. The other should to be ``None``.
2825 '''
2827 xfade = Float.T(optional=True)
2828 xfrac = Float.T(optional=True)
2830 def __init__(self, xfade=None, xfrac=None):
2831 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2832 assert (xfade is None) != (xfrac is None)
2833 self._xfade = xfade
2834 self._xfrac = xfrac
2836 def __call__(self, y, x0, dx):
2838 xfade = self._xfade
2840 xlen = (y.size - 1)*dx
2841 if xfade is None:
2842 xfade = xlen * self._xfrac
2844 a = x0
2845 b = x0 + xfade
2846 c = x0 + xlen - xfade
2847 d = x0 + xlen
2849 apply_costaper(a, b, c, d, y, x0, dx)
2851 def span(self, y, x0, dx):
2852 return 0, y.size
2854 def time_span(self):
2855 return None, None
2858def none_min(li):
2859 if None in li:
2860 return None
2861 else:
2862 return min(x for x in li if x is not None)
2865def none_max(li):
2866 if None in li:
2867 return None
2868 else:
2869 return max(x for x in li if x is not None)
2872class MultiplyTaper(Taper):
2873 '''
2874 Multiplication of several tapers.
2875 '''
2877 tapers = List.T(Taper.T())
2879 def __init__(self, tapers=None):
2880 if tapers is None:
2881 tapers = []
2883 Taper.__init__(self, tapers=tapers)
2885 def __call__(self, y, x0, dx):
2886 for taper in self.tapers:
2887 taper(y, x0, dx)
2889 def span(self, y, x0, dx):
2890 spans = []
2891 for taper in self.tapers:
2892 spans.append(taper.span(y, x0, dx))
2894 mins, maxs = list(zip(*spans))
2895 return min(mins), max(maxs)
2897 def time_span(self):
2898 spans = []
2899 for taper in self.tapers:
2900 spans.append(taper.time_span())
2902 mins, maxs = list(zip(*spans))
2903 return none_min(mins), none_max(maxs)
2906class GaussTaper(Taper):
2907 '''
2908 Frequency domain Gaussian filter.
2909 '''
2911 alpha = Float.T()
2913 def __init__(self, alpha):
2914 Taper.__init__(self, alpha=alpha)
2915 self._alpha = alpha
2917 def __call__(self, y, x0, dx):
2918 f = x0 + num.arange(y.size)*dx
2919 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2922cached_coefficients = {}
2925def _get_cached_filter_coefs(order, corners, btype):
2926 ck = (order, tuple(corners), btype)
2927 if ck not in cached_coefficients:
2928 if len(corners) == 0:
2929 cached_coefficients[ck] = signal.butter(
2930 order, corners[0], btype=btype)
2931 else:
2932 cached_coefficients[ck] = signal.butter(
2933 order, corners, btype=btype)
2935 return cached_coefficients[ck]
2938class _globals(object):
2939 _numpy_has_correlate_flip_bug = None
2942def _default_key(tr):
2943 return (tr.network, tr.station, tr.location, tr.channel)
2946def numpy_has_correlate_flip_bug():
2947 '''
2948 Check if NumPy's correlate function reveals old behaviour.
2949 '''
2951 if _globals._numpy_has_correlate_flip_bug is None:
2952 a = num.array([0, 0, 1, 0, 0, 0, 0])
2953 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
2954 ab = num.correlate(a, b, mode='same')
2955 ba = num.correlate(b, a, mode='same')
2956 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
2958 return _globals._numpy_has_correlate_flip_bug
2961def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
2962 '''
2963 Call :py:func:`numpy.correlate` with fixes.
2965 c[k] = sum_i a[i+k] * conj(b[i])
2967 Note that the result produced by newer numpy.correlate is always flipped
2968 with respect to the formula given in its documentation (if ascending k
2969 assumed for the output).
2970 '''
2972 if use_fft:
2973 if a.size < b.size:
2974 c = signal.fftconvolve(b[::-1], a, mode=mode)
2975 else:
2976 c = signal.fftconvolve(a, b[::-1], mode=mode)
2977 return c
2979 else:
2980 buggy = numpy_has_correlate_flip_bug()
2982 a = num.asarray(a)
2983 b = num.asarray(b)
2985 if buggy:
2986 b = num.conj(b)
2988 c = num.correlate(a, b, mode=mode)
2990 if buggy and a.size < b.size:
2991 return c[::-1]
2992 else:
2993 return c
2996def numpy_correlate_emulate(a, b, mode='valid'):
2997 '''
2998 Slow version of :py:func:`numpy.correlate` for comparison.
2999 '''
3001 a = num.asarray(a)
3002 b = num.asarray(b)
3003 kmin = -(b.size-1)
3004 klen = a.size-kmin
3005 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
3006 kmin = int(kmin)
3007 kmax = int(kmax)
3008 klen = kmax - kmin + 1
3009 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
3010 for k in range(kmin, kmin+klen):
3011 imin = max(0, -k)
3012 ilen = min(b.size, a.size-k) - imin
3013 c[k-kmin] = num.sum(
3014 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
3016 return c
3019def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
3020 '''
3021 Get range of lags for which :py:func:`numpy.correlate` produces values.
3022 '''
3024 a = num.asarray(a)
3025 b = num.asarray(b)
3027 kmin = -(b.size-1)
3028 if mode == 'full':
3029 klen = a.size-kmin
3030 elif mode == 'same':
3031 klen = max(a.size, b.size)
3032 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
3033 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
3034 elif mode == 'valid':
3035 klen = abs(a.size - b.size) + 1
3036 kmin += min(a.size, b.size) - 1
3038 return kmin, kmin + klen - 1
3041def autocorr(x, nshifts):
3042 '''
3043 Compute biased estimate of the first autocorrelation coefficients.
3045 :param x: input array
3046 :param nshifts: number of coefficients to calculate
3047 '''
3049 mean = num.mean(x)
3050 std = num.std(x)
3051 n = x.size
3052 xdm = x - mean
3053 r = num.zeros(nshifts)
3054 for k in range(nshifts):
3055 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3057 return r
3060def yulewalker(x, order):
3061 '''
3062 Compute autoregression coefficients using Yule-Walker method.
3064 :param x: input array
3065 :param order: number of coefficients to produce
3067 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3068 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3069 recursion which is normally used.
3070 '''
3072 gamma = autocorr(x, order+1)
3073 d = gamma[1:1+order]
3074 a = num.zeros((order, order))
3075 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3076 for i in range(order):
3077 ioff = order-i
3078 a[i, :] = gamma2[ioff:ioff+order]
3080 return num.dot(num.linalg.inv(a), -d)
3083def moving_avg(x, n):
3084 n = int(n)
3085 cx = x.cumsum()
3086 nn = len(x)
3087 y = num.zeros(nn, dtype=cx.dtype)
3088 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3089 y[:n//2] = y[n//2]
3090 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3091 return y
3094def moving_sum(x, n, mode='valid'):
3095 n = int(n)
3096 cx = x.cumsum()
3097 nn = len(x)
3099 if mode == 'valid':
3100 if nn-n+1 <= 0:
3101 return num.zeros(0, dtype=cx.dtype)
3102 y = num.zeros(nn-n+1, dtype=cx.dtype)
3103 y[0] = cx[n-1]
3104 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3106 if mode == 'full':
3107 y = num.zeros(nn+n-1, dtype=cx.dtype)
3108 if n <= nn:
3109 y[0:n] = cx[0:n]
3110 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3111 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3112 else:
3113 y[0:nn] = cx[0:nn]
3114 y[nn:n] = cx[nn-1]
3115 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3117 if mode == 'same':
3118 n1 = (n-1)//2
3119 y = num.zeros(nn, dtype=cx.dtype)
3120 if n <= nn:
3121 y[0:n-n1] = cx[n1:n]
3122 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3123 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3124 else:
3125 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3126 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3127 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3129 return y
3132def nextpow2(i):
3133 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3136def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3137 def snap(x):
3138 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3139 return snap
3142def snapper(nmax, delta, snapfun=math.ceil):
3143 def snap(x):
3144 return max(0, min(int(snapfun(x/delta)), nmax))
3145 return snap
3148def apply_costaper(a, b, c, d, y, x0, dx):
3149 hi = snapper_w_offset(y.size, x0, dx)
3150 y[:hi(a)] = 0.
3151 y[hi(a):hi(b)] *= 0.5 \
3152 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3153 y[hi(c):hi(d)] *= 0.5 \
3154 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3155 y[hi(d):] = 0.
3158def span_costaper(a, b, c, d, y, x0, dx):
3159 hi = snapper_w_offset(y.size, x0, dx)
3160 return hi(a), hi(d) - hi(a)
3163def costaper(a, b, c, d, nfreqs, deltaf):
3164 hi = snapper(nfreqs, deltaf)
3165 tap = num.zeros(nfreqs)
3166 tap[hi(a):hi(b)] = 0.5 \
3167 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3168 tap[hi(b):hi(c)] = 1.
3169 tap[hi(c):hi(d)] = 0.5 \
3170 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3172 return tap
3175def t2ind(t, tdelta, snap=round):
3176 return int(snap(t/tdelta))
3179def hilbert(x, N=None):
3180 '''
3181 Return the hilbert transform of x of length N.
3183 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3184 '''
3186 x = num.asarray(x)
3187 if N is None:
3188 N = len(x)
3189 if N <= 0:
3190 raise ValueError("N must be positive.")
3191 if num.iscomplexobj(x):
3192 logger.warning('imaginary part of x ignored.')
3193 x = num.real(x)
3195 Xf = num.fft.fft(x, N, axis=0)
3196 h = num.zeros(N)
3197 if N % 2 == 0:
3198 h[0] = h[N//2] = 1
3199 h[1:N//2] = 2
3200 else:
3201 h[0] = 1
3202 h[1:(N+1)//2] = 2
3204 if len(x.shape) > 1:
3205 h = h[:, num.newaxis]
3206 x = num.fft.ifft(Xf*h)
3207 return x
3210def near(a, b, eps):
3211 return abs(a-b) < eps
3214def coroutine(func):
3215 def wrapper(*args, **kwargs):
3216 gen = func(*args, **kwargs)
3217 next(gen)
3218 return gen
3220 wrapper.__name__ = func.__name__
3221 wrapper.__dict__ = func.__dict__
3222 wrapper.__doc__ = func.__doc__
3223 return wrapper
3226class States(object):
3227 '''
3228 Utility to store channel-specific state in coroutines.
3229 '''
3231 def __init__(self):
3232 self._states = {}
3234 def get(self, tr):
3235 k = tr.nslc_id
3236 if k in self._states:
3237 tmin, deltat, dtype, value = self._states[k]
3238 if (near(tmin, tr.tmin, deltat/100.)
3239 and near(deltat, tr.deltat, deltat/10000.)
3240 and dtype == tr.ydata.dtype):
3242 return value
3244 return None
3246 def set(self, tr, value):
3247 k = tr.nslc_id
3248 if k in self._states and self._states[k][-1] is not value:
3249 self.free(self._states[k][-1])
3251 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3253 def free(self, value):
3254 pass
3257@coroutine
3258def co_list_append(list):
3259 while True:
3260 list.append((yield))
3263class ScipyBug(Exception):
3264 pass
3267@coroutine
3268def co_lfilter(target, b, a):
3269 '''
3270 Successively filter broken continuous trace data (coroutine).
3272 Create coroutine which takes :py:class:`Trace` objects, filters their data
3273 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3274 objects containing the filtered data to target. This is useful, if one
3275 wants to filter a long continuous time series, which is split into many
3276 successive traces without producing filter artifacts at trace boundaries.
3278 Filter states are kept *per channel*, specifically, for each (network,
3279 station, location, channel) combination occuring in the input traces, a
3280 separate state is created and maintained. This makes it possible to filter
3281 multichannel or multistation data with only one :py:func:`co_lfilter`
3282 instance.
3284 Filter state is reset, when gaps occur.
3286 Use it like this::
3288 from pyrocko.trace import co_lfilter, co_list_append
3290 filtered_traces = []
3291 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3292 for trace in traces:
3293 pipe.send(trace)
3295 pipe.close()
3297 '''
3299 try:
3300 states = States()
3301 output = None
3302 while True:
3303 input = (yield)
3305 zi = states.get(input)
3306 if zi is None:
3307 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3309 output = input.copy(data=False)
3310 try:
3311 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3312 except ValueError:
3313 raise ScipyBug(
3314 'signal.lfilter failed: could be related to a bug '
3315 'in some older scipy versions, e.g. on opensuse42.1')
3317 output.set_ydata(ydata)
3318 states.set(input, zf)
3319 target.send(output)
3321 except GeneratorExit:
3322 target.close()
3325def co_antialias(target, q, n=None, ftype='fir'):
3326 b, a, n = util.decimate_coeffs(q, n, ftype)
3327 anti = co_lfilter(target, b, a)
3328 return anti
3331@coroutine
3332def co_dropsamples(target, q, nfir):
3333 try:
3334 states = States()
3335 while True:
3336 tr = (yield)
3337 newdeltat = q * tr.deltat
3338 ioffset = states.get(tr)
3339 if ioffset is None:
3340 # for fir filter, the first nfir samples are pulluted by
3341 # boundary effects; cut it off.
3342 # for iir this may be (much) more, we do not correct for that.
3343 # put sample instances to a time which is a multiple of the
3344 # new sampling interval.
3345 newtmin_want = math.ceil(
3346 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3347 - (nfir/2*tr.deltat)
3348 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3349 if ioffset < 0:
3350 ioffset = ioffset % q
3352 newtmin_have = tr.tmin + ioffset * tr.deltat
3353 newtr = tr.copy(data=False)
3354 newtr.deltat = newdeltat
3355 # because the fir kernel shifts data by nfir/2 samples:
3356 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3357 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3358 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3359 target.send(newtr)
3361 except GeneratorExit:
3362 target.close()
3365def co_downsample(target, q, n=None, ftype='fir'):
3366 '''
3367 Successively downsample broken continuous trace data (coroutine).
3369 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3370 data and sends new :py:class:`Trace` objects containing the downsampled
3371 data to target. This is useful, if one wants to downsample a long
3372 continuous time series, which is split into many successive traces without
3373 producing filter artifacts and gaps at trace boundaries.
3375 Filter states are kept *per channel*, specifically, for each (network,
3376 station, location, channel) combination occuring in the input traces, a
3377 separate state is created and maintained. This makes it possible to filter
3378 multichannel or multistation data with only one :py:func:`co_lfilter`
3379 instance.
3381 Filter state is reset, when gaps occur. The sampling instances are choosen
3382 so that they occur at (or as close as possible) to even multiples of the
3383 sampling interval of the downsampled trace (based on system time).
3384 '''
3386 b, a, n = util.decimate_coeffs(q, n, ftype)
3387 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3390@coroutine
3391def co_downsample_to(target, deltat):
3393 decimators = {}
3394 try:
3395 while True:
3396 tr = (yield)
3397 ratio = deltat / tr.deltat
3398 rratio = round(ratio)
3399 if abs(rratio - ratio)/ratio > 0.0001:
3400 raise util.UnavailableDecimation('ratio = %g' % ratio)
3402 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3403 if deci_seq not in decimators:
3404 pipe = target
3405 for q in deci_seq[::-1]:
3406 pipe = co_downsample(pipe, q)
3408 decimators[deci_seq] = pipe
3410 decimators[deci_seq].send(tr)
3412 except GeneratorExit:
3413 for g in decimators.values():
3414 g.close()
3417class DomainChoice(StringChoice):
3418 choices = [
3419 'time_domain',
3420 'frequency_domain',
3421 'envelope',
3422 'absolute',
3423 'cc_max_norm']
3426class MisfitSetup(Object):
3427 '''
3428 Contains misfit setup to be used in :py:func:`trace.misfit`
3430 :param description: Description of the setup
3431 :param norm: L-norm classifier
3432 :param taper: Object of :py:class:`Taper`
3433 :param filter: Object of :py:class:`FrequencyResponse`
3434 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3435 'cc_max_norm']
3437 Can be dumped to a yaml file.
3438 '''
3440 xmltagname = 'misfitsetup'
3441 description = String.T(optional=True)
3442 norm = Int.T(optional=False)
3443 taper = Taper.T(optional=False)
3444 filter = FrequencyResponse.T(optional=True)
3445 domain = DomainChoice.T(default='time_domain')
3448def equalize_sampling_rates(trace_1, trace_2):
3449 '''
3450 Equalize sampling rates of two traces (reduce higher sampling rate to
3451 lower).
3453 :param trace_1: :py:class:`Trace` object
3454 :param trace_2: :py:class:`Trace` object
3456 Returns a copy of the resampled trace if resampling is needed.
3457 '''
3459 if same_sampling_rate(trace_1, trace_2):
3460 return trace_1, trace_2
3462 if trace_1.deltat < trace_2.deltat:
3463 t1_out = trace_1.copy()
3464 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3465 logger.debug('Trace downsampled (return copy of trace): %s'
3466 % '.'.join(t1_out.nslc_id))
3467 return t1_out, trace_2
3469 elif trace_1.deltat > trace_2.deltat:
3470 t2_out = trace_2.copy()
3471 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3472 logger.debug('Trace downsampled (return copy of trace): %s'
3473 % '.'.join(t2_out.nslc_id))
3474 return trace_1, t2_out
3477def Lx_norm(u, v, norm=2):
3478 '''
3479 Calculate the misfit denominator *m* and the normalization devisor *n*
3480 according to norm.
3482 The normalization divisor *n* is calculated from ``v``.
3484 :param u: :py:class:`numpy.array`
3485 :param v: :py:class:`numpy.array`
3486 :param norm: (default = 2)
3488 ``u`` and ``v`` must be of same size.
3489 '''
3491 if norm == 1:
3492 return (
3493 num.sum(num.abs(v-u)),
3494 num.sum(num.abs(v)))
3496 elif norm == 2:
3497 return (
3498 num.sqrt(num.sum((v-u)**2)),
3499 num.sqrt(num.sum(v**2)))
3501 else:
3502 return (
3503 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3504 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3507def do_downsample(tr, deltat):
3508 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3509 tr = tr.copy()
3510 tr.downsample_to(deltat, snap=True, demean=False)
3511 else:
3512 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3513 tr = tr.copy()
3514 tr.snap()
3515 return tr
3518def do_extend(tr, tmin, tmax):
3519 if tmin < tr.tmin or tmax > tr.tmax:
3520 tr = tr.copy()
3521 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3523 return tr
3526def do_pre_taper(tr, taper):
3527 return tr.taper(taper, inplace=False, chop=True)
3530def do_fft(tr, filter):
3531 if filter is None:
3532 return tr
3533 else:
3534 ndata = tr.ydata.size
3535 nfft = nextpow2(ndata)
3536 padded = num.zeros(nfft, dtype=float)
3537 padded[:ndata] = tr.ydata
3538 spectrum = num.fft.rfft(padded)
3539 df = 1.0 / (tr.deltat * nfft)
3540 frequencies = num.arange(spectrum.size)*df
3541 return [tr, frequencies, spectrum]
3544def do_filter(inp, filter):
3545 if filter is None:
3546 return inp
3547 else:
3548 tr, frequencies, spectrum = inp
3549 spectrum *= filter.evaluate(frequencies)
3550 return [tr, frequencies, spectrum]
3553def do_ifft(inp):
3554 if isinstance(inp, Trace):
3555 return inp
3556 else:
3557 tr, _, spectrum = inp
3558 ndata = tr.ydata.size
3559 tr = tr.copy(data=False)
3560 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3561 return tr
3564def check_alignment(t1, t2):
3565 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3566 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3567 t1.ydata.shape != t2.ydata.shape:
3568 raise MisalignedTraces(
3569 'Cannot calculate misfit of %s and %s due to misaligned '
3570 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))