1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6This module provides basic signal processing for seismic traces.
7'''
9from __future__ import division, absolute_import
11import time
12import math
13import copy
14import logging
15import hashlib
16from collections import defaultdict
18import numpy as num
19from scipy import signal
21from pyrocko import util, orthodrome, pchain, model
22from pyrocko.util import reuse
23from pyrocko.guts import Object, Float, Int, String, List, \
24 StringChoice, Timestamp
25from pyrocko.guts_array import Array
26from pyrocko.model import squirrel_content
28# backward compatibility
29from pyrocko.util import UnavailableDecimation # noqa
30from pyrocko.response import ( # noqa
31 FrequencyResponse, Evalresp, InverseEvalresp, PoleZeroResponse,
32 ButterworthResponse, SampledResponse, IntegrationResponse,
33 DifferentiationResponse, MultiplyResponse)
36guts_prefix = 'pf'
38logger = logging.getLogger('pyrocko.trace')
41@squirrel_content
42class Trace(Object):
44 '''
45 Create new trace object.
47 A ``Trace`` object represents a single continuous strip of evenly sampled
48 time series data. It is built from a 1D NumPy array containing the data
49 samples and some attributes describing its beginning and ending time, its
50 sampling rate and four string identifiers (its network, station, location
51 and channel code).
53 :param network: network code
54 :param station: station code
55 :param location: location code
56 :param channel: channel code
57 :param extra: extra code
58 :param tmin: system time of first sample in [s]
59 :param tmax: system time of last sample in [s] (if set to ``None`` it is
60 computed from length of ``ydata``)
61 :param deltat: sampling interval in [s]
62 :param ydata: 1D numpy array with data samples (can be ``None`` when
63 ``tmax`` is not ``None``)
64 :param mtime: optional modification time
66 :param meta: additional meta information (not used, but maintained by the
67 library)
69 The length of the network, station, location and channel codes is not
70 resricted by this software, but data formats like SAC, Mini-SEED or GSE
71 have different limits on the lengths of these codes. The codes set here are
72 silently truncated when the trace is stored
73 '''
75 network = String.T(default='', help='Deployment/network code (1-8)')
76 station = String.T(default='STA', help='Station code (1-5)')
77 location = String.T(default='', help='Location code (0-2)')
78 channel = String.T(default='', help='Channel code (3)')
79 extra = String.T(default='', help='Extra/custom code')
81 tmin = Timestamp.T(default=Timestamp.D('1970-01-01 00:00:00'))
82 tmax = Timestamp.T()
84 deltat = Float.T(default=1.0)
85 ydata = Array.T(optional=True, shape=(None,), serialize_as='base64+meta')
87 mtime = Timestamp.T(optional=True)
89 cached_frequencies = {}
91 def __init__(self, network='', station='STA', location='', channel='',
92 tmin=0., tmax=None, deltat=1., ydata=None, mtime=None,
93 meta=None, extra=''):
95 Object.__init__(self, init_props=False)
97 time_float = util.get_time_float()
99 if not isinstance(tmin, time_float):
100 tmin = Trace.tmin.regularize_extra(tmin)
102 if tmax is not None and not isinstance(tmax, time_float):
103 tmax = Trace.tmax.regularize_extra(tmax)
105 if mtime is not None and not isinstance(mtime, time_float):
106 mtime = Trace.mtime.regularize_extra(mtime)
108 self._growbuffer = None
110 tmin = time_float(tmin)
111 if tmax is not None:
112 tmax = time_float(tmax)
114 if mtime is None:
115 mtime = time_float(time.time())
117 self.network, self.station, self.location, self.channel, \
118 self.extra = [
119 reuse(x) for x in (
120 network, station, location, channel, extra)]
122 self.tmin = tmin
123 self.deltat = deltat
125 if tmax is None:
126 if ydata is not None:
127 self.tmax = self.tmin + (ydata.size-1)*self.deltat
128 else:
129 raise Exception(
130 'fixme: trace must be created with tmax or ydata')
131 else:
132 n = int(round((tmax - self.tmin) / self.deltat)) + 1
133 self.tmax = self.tmin + (n - 1) * self.deltat
135 self.meta = meta
136 self.ydata = ydata
137 self.mtime = mtime
138 self._update_ids()
139 self.file = None
140 self._pchain = None
142 def __str__(self):
143 fmt = min(9, max(0, -int(math.floor(math.log10(self.deltat)))))
144 s = 'Trace (%s, %s, %s, %s)\n' % self.nslc_id
145 s += ' timerange: %s - %s\n' % (
146 util.time_to_str(self.tmin, format=fmt),
147 util.time_to_str(self.tmax, format=fmt))
149 s += ' delta t: %g\n' % self.deltat
150 if self.meta:
151 for k in sorted(self.meta.keys()):
152 s += ' %s: %s\n' % (k, self.meta[k])
153 return s
155 @property
156 def codes(self):
157 from pyrocko.squirrel import model
158 return model.CodesNSLCE(
159 self.network, self.station, self.location, self.channel,
160 self.extra)
162 @property
163 def time_span(self):
164 return self.tmin, self.tmax
166 @property
167 def summary(self):
168 return '%s %-16s %s %g' % (
169 self.__class__.__name__, self.str_codes, self.str_time_span,
170 self.deltat)
172 def __getstate__(self):
173 return (self.network, self.station, self.location, self.channel,
174 self.tmin, self.tmax, self.deltat, self.mtime,
175 self.ydata, self.meta, self.extra)
177 def __setstate__(self, state):
178 if len(state) == 11:
179 self.network, self.station, self.location, self.channel, \
180 self.tmin, self.tmax, self.deltat, self.mtime, \
181 self.ydata, self.meta, self.extra = state
183 elif len(state) == 12:
184 # backward compatibility 0
185 self.network, self.station, self.location, self.channel, \
186 self.tmin, self.tmax, self.deltat, self.mtime, \
187 self.ydata, self.meta, _, self.extra = state
189 elif len(state) == 10:
190 # backward compatibility 1
191 self.network, self.station, self.location, self.channel, \
192 self.tmin, self.tmax, self.deltat, self.mtime, \
193 self.ydata, self.meta = state
195 self.extra = ''
197 else:
198 # backward compatibility 2
199 self.network, self.station, self.location, self.channel, \
200 self.tmin, self.tmax, self.deltat, self.mtime = state
201 self.ydata = None
202 self.meta = None
203 self.extra = ''
205 self._growbuffer = None
206 self._update_ids()
208 def hash(self, unsafe=False):
209 sha1 = hashlib.sha1()
210 for code in self.nslc_id:
211 sha1.update(code.encode())
212 sha1.update(self.tmin.hex().encode())
213 sha1.update(self.tmax.hex().encode())
214 sha1.update(self.deltat.hex().encode())
216 if self.ydata is not None:
217 if not unsafe:
218 sha1.update(memoryview(self.ydata))
219 else:
220 sha1.update(memoryview(self.ydata[:32]))
221 sha1.update(memoryview(self.ydata[-32:]))
223 return sha1.hexdigest()
225 def name(self):
226 '''
227 Get a short string description.
228 '''
230 s = '%s.%s.%s.%s, %s, %s' % (self.nslc_id + (
231 util.time_to_str(self.tmin),
232 util.time_to_str(self.tmax)))
234 return s
236 def __eq__(self, other):
237 return (
238 isinstance(other, Trace)
239 and self.network == other.network
240 and self.station == other.station
241 and self.location == other.location
242 and self.channel == other.channel
243 and (abs(self.deltat - other.deltat)
244 < (self.deltat + other.deltat)*1e-6)
245 and abs(self.tmin-other.tmin) < self.deltat*0.01
246 and abs(self.tmax-other.tmax) < self.deltat*0.01
247 and num.all(self.ydata == other.ydata))
249 def almost_equal(self, other, rtol=1e-5, atol=0.0):
250 return (
251 self.network == other.network
252 and self.station == other.station
253 and self.location == other.location
254 and self.channel == other.channel
255 and (abs(self.deltat - other.deltat)
256 < (self.deltat + other.deltat)*1e-6)
257 and abs(self.tmin-other.tmin) < self.deltat*0.01
258 and abs(self.tmax-other.tmax) < self.deltat*0.01
259 and num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol))
261 def assert_almost_equal(self, other, rtol=1e-5, atol=0.0):
263 assert self.network == other.network, \
264 'network codes differ: %s, %s' % (self.network, other.network)
265 assert self.station == other.station, \
266 'station codes differ: %s, %s' % (self.station, other.station)
267 assert self.location == other.location, \
268 'location codes differ: %s, %s' % (self.location, other.location)
269 assert self.channel == other.channel, 'channel codes differ'
270 assert (abs(self.deltat - other.deltat)
271 < (self.deltat + other.deltat)*1e-6), \
272 'sampling intervals differ %g, %g' % (self.deltat, other.delta)
273 assert abs(self.tmin-other.tmin) < self.deltat*0.01, \
274 'start times differ: %s, %s' % (
275 util.time_to_str(self.tmin), util.time_to_str(other.tmin))
276 assert abs(self.tmax-other.tmax) < self.deltat*0.01, \
277 'end times differ: %s, %s' % (
278 util.time_to_str(self.tmax), util.time_to_str(other.tmax))
280 assert num.allclose(self.ydata, other.ydata, rtol=rtol, atol=atol), \
281 'trace values differ'
283 def __hash__(self):
284 return id(self)
286 def __call__(self, t, clip=False, snap=round):
287 it = int(snap((t-self.tmin)/self.deltat))
288 if clip:
289 it = max(0, min(it, self.ydata.size-1))
290 else:
291 if it < 0 or self.ydata.size <= it:
292 raise IndexError()
294 return self.tmin+it*self.deltat, self.ydata[it]
296 def interpolate(self, t, clip=False):
297 '''
298 Value of trace between supporting points through linear interpolation.
300 :param t: time instant
301 :param clip: whether to clip indices to trace ends
302 '''
304 t0, y0 = self(t, clip=clip, snap=math.floor)
305 t1, y1 = self(t, clip=clip, snap=math.ceil)
306 if t0 == t1:
307 return y0
308 else:
309 return y0+(t-t0)/(t1-t0)*(y1-y0)
311 def index_clip(self, i):
312 '''
313 Clip index to valid range.
314 '''
316 return min(max(0, i), self.ydata.size)
318 def add(self, other, interpolate=True, left=0., right=0.):
319 '''
320 Add values of other trace (self += other).
322 Add values of ``other`` trace to the values of ``self``, where it
323 intersects with ``other``. This method does not change the extent of
324 ``self``. If ``interpolate`` is ``True`` (the default), the values of
325 ``other`` to be added are interpolated at sampling instants of
326 ``self``. Linear interpolation is performed. In this case the sampling
327 rate of ``other`` must be equal to or lower than that of ``self``. If
328 ``interpolate`` is ``False``, the sampling rates of the two traces must
329 match.
330 '''
332 if interpolate:
333 assert self.deltat <= other.deltat \
334 or same_sampling_rate(self, other)
336 other_xdata = other.get_xdata()
337 xdata = self.get_xdata()
338 self.ydata += num.interp(
339 xdata, other_xdata, other.ydata, left=left, right=left)
340 else:
341 assert self.deltat == other.deltat
342 ioff = int(round((other.tmin-self.tmin)/self.deltat))
343 ibeg = max(0, ioff)
344 iend = min(self.data_len(), ioff+other.data_len())
345 self.ydata[ibeg:iend] += other.ydata[ibeg-ioff:iend-ioff]
347 def mult(self, other, interpolate=True):
348 '''
349 Muliply with values of other trace ``(self *= other)``.
351 Multiply values of ``other`` trace to the values of ``self``, where it
352 intersects with ``other``. This method does not change the extent of
353 ``self``. If ``interpolate`` is ``True`` (the default), the values of
354 ``other`` to be multiplied are interpolated at sampling instants of
355 ``self``. Linear interpolation is performed. In this case the sampling
356 rate of ``other`` must be equal to or lower than that of ``self``. If
357 ``interpolate`` is ``False``, the sampling rates of the two traces must
358 match.
359 '''
361 if interpolate:
362 assert self.deltat <= other.deltat or \
363 same_sampling_rate(self, other)
365 other_xdata = other.get_xdata()
366 xdata = self.get_xdata()
367 self.ydata *= num.interp(
368 xdata, other_xdata, other.ydata, left=0., right=0.)
369 else:
370 assert self.deltat == other.deltat
371 ibeg1 = int(round((other.tmin-self.tmin)/self.deltat))
372 ibeg2 = int(round((self.tmin-other.tmin)/self.deltat))
373 iend1 = int(round((other.tmax-self.tmin)/self.deltat))+1
374 iend2 = int(round((self.tmax-other.tmin)/self.deltat))+1
376 ibeg1 = self.index_clip(ibeg1)
377 iend1 = self.index_clip(iend1)
378 ibeg2 = self.index_clip(ibeg2)
379 iend2 = self.index_clip(iend2)
381 self.ydata[ibeg1:iend1] *= other.ydata[ibeg2:iend2]
383 def max(self):
384 '''
385 Get time and value of data maximum.
386 '''
388 i = num.argmax(self.ydata)
389 return self.tmin + i*self.deltat, self.ydata[i]
391 def min(self):
392 '''
393 Get time and value of data minimum.
394 '''
396 i = num.argmin(self.ydata)
397 return self.tmin + i*self.deltat, self.ydata[i]
399 def absmax(self):
400 '''
401 Get time and value of maximum of the absolute of data.
402 '''
404 tmi, mi = self.min()
405 tma, ma = self.max()
406 if abs(mi) > abs(ma):
407 return tmi, abs(mi)
408 else:
409 return tma, abs(ma)
411 def set_codes(
412 self, network=None, station=None, location=None, channel=None,
413 extra=None):
415 '''
416 Set network, station, location, and channel codes.
417 '''
419 if network is not None:
420 self.network = network
421 if station is not None:
422 self.station = station
423 if location is not None:
424 self.location = location
425 if channel is not None:
426 self.channel = channel
427 if extra is not None:
428 self.extra = extra
430 self._update_ids()
432 def set_network(self, network):
433 self.network = network
434 self._update_ids()
436 def set_station(self, station):
437 self.station = station
438 self._update_ids()
440 def set_location(self, location):
441 self.location = location
442 self._update_ids()
444 def set_channel(self, channel):
445 self.channel = channel
446 self._update_ids()
448 def overlaps(self, tmin, tmax):
449 '''
450 Check if trace has overlap with a given time span.
451 '''
452 return not (tmax < self.tmin or self.tmax < tmin)
454 def is_relevant(self, tmin, tmax, selector=None):
455 '''
456 Check if trace has overlap with a given time span and matches a
457 condition callback. (internal use)
458 '''
460 return not (tmax <= self.tmin or self.tmax < tmin) \
461 and (selector is None or selector(self))
463 def _update_ids(self):
464 '''
465 Update dependent ids.
466 '''
468 self.full_id = (
469 self.network, self.station, self.location, self.channel, self.tmin)
470 self.nslc_id = reuse(
471 (self.network, self.station, self.location, self.channel))
473 def prune_from_reuse_cache(self):
474 util.deuse(self.nslc_id)
475 util.deuse(self.network)
476 util.deuse(self.station)
477 util.deuse(self.location)
478 util.deuse(self.channel)
480 def set_mtime(self, mtime):
481 '''
482 Set modification time of the trace.
483 '''
485 self.mtime = mtime
487 def get_xdata(self):
488 '''
489 Create array for time axis.
490 '''
492 if self.ydata is None:
493 raise NoData()
495 return self.tmin \
496 + num.arange(len(self.ydata), dtype=num.float64) * self.deltat
498 def get_ydata(self):
499 '''
500 Get data array.
501 '''
503 if self.ydata is None:
504 raise NoData()
506 return self.ydata
508 def set_ydata(self, new_ydata):
509 '''
510 Replace data array.
511 '''
513 self.drop_growbuffer()
514 self.ydata = new_ydata
515 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
517 def data_len(self):
518 if self.ydata is not None:
519 return self.ydata.size
520 else:
521 return int(round((self.tmax-self.tmin)/self.deltat)) + 1
523 def drop_data(self):
524 '''
525 Forget data, make dataless trace.
526 '''
528 self.drop_growbuffer()
529 self.ydata = None
531 def drop_growbuffer(self):
532 '''
533 Detach the traces grow buffer.
534 '''
536 self._growbuffer = None
537 self._pchain = None
539 def copy(self, data=True):
540 '''
541 Make a deep copy of the trace.
542 '''
544 tracecopy = copy.copy(self)
545 tracecopy.drop_growbuffer()
546 if data:
547 tracecopy.ydata = self.ydata.copy()
548 tracecopy.meta = copy.deepcopy(self.meta)
549 return tracecopy
551 def crop_zeros(self):
552 '''
553 Remove any zeros at beginning and end.
554 '''
556 indices = num.where(self.ydata != 0.0)[0]
557 if indices.size == 0:
558 raise NoData()
560 ibeg = indices[0]
561 iend = indices[-1]+1
562 if ibeg == 0 and iend == self.ydata.size-1:
563 return
565 self.drop_growbuffer()
566 self.ydata = self.ydata[ibeg:iend].copy()
567 self.tmin = self.tmin+ibeg*self.deltat
568 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
569 self._update_ids()
571 def append(self, data):
572 '''
573 Append data to the end of the trace.
575 To make this method efficient when successively very few or even single
576 samples are appended, a larger grow buffer is allocated upon first
577 invocation. The traces data is then changed to be a view into the
578 currently filled portion of the grow buffer array.
579 '''
581 assert self.ydata.dtype == data.dtype
582 newlen = data.size + self.ydata.size
583 if self._growbuffer is None or self._growbuffer.size < newlen:
584 self._growbuffer = num.empty(newlen*2, dtype=self.ydata.dtype)
585 self._growbuffer[:self.ydata.size] = self.ydata
586 self._growbuffer[self.ydata.size:newlen] = data
587 self.ydata = self._growbuffer[:newlen]
588 self.tmax = self.tmin + (newlen-1)*self.deltat
590 def chop(
591 self, tmin, tmax, inplace=True, include_last=False,
592 snap=(round, round), want_incomplete=True):
594 '''
595 Cut the trace to given time span.
597 If the ``inplace`` argument is True (the default) the trace is cut in
598 place, otherwise a new trace with the cut part is returned. By
599 default, the indices where to start and end the trace data array are
600 determined by rounding of ``tmin`` and ``tmax`` to sampling instances
601 using Python's :py:func:`round` function. This behaviour can be changed
602 with the ``snap`` argument, which takes a tuple of two functions (one
603 for the lower and one for the upper end) to be used instead of
604 :py:func:`round`. The last sample is by default not included unless
605 ``include_last`` is set to True. If the given time span exceeds the
606 available time span of the trace, the available part is returned,
607 unless ``want_incomplete`` is set to False - in that case, a
608 :py:exc:`NoData` exception is raised. This exception is always raised,
609 when the requested time span does dot overlap with the trace's time
610 span.
611 '''
613 if want_incomplete:
614 if tmax <= self.tmin-self.deltat or self.tmax+self.deltat < tmin:
615 raise NoData()
616 else:
617 if tmin < self.tmin or self.tmax < tmax:
618 raise NoData()
620 ibeg = max(0, t2ind(tmin-self.tmin, self.deltat, snap[0]))
621 iplus = 0
622 if include_last:
623 iplus = 1
625 iend = min(
626 self.data_len(),
627 t2ind(tmax-self.tmin, self.deltat, snap[1])+iplus)
629 if ibeg >= iend:
630 raise NoData()
632 obj = self
633 if not inplace:
634 obj = self.copy(data=False)
636 self.drop_growbuffer()
637 if self.ydata is not None:
638 obj.ydata = self.ydata[ibeg:iend].copy()
639 else:
640 obj.ydata = None
642 obj.tmin = obj.tmin+ibeg*obj.deltat
643 obj.tmax = obj.tmin+((iend-ibeg)-1)*obj.deltat
645 obj._update_ids()
647 return obj
649 def downsample(self, ndecimate, snap=False, initials=None, demean=False,
650 ftype='fir-remez'):
651 '''
652 Downsample trace by a given integer factor.
654 Comparison of the available FIR filters `fir` and `fir-remez`:
657 :param ndecimate: decimation factor, avoid values larger than 8
658 :param snap: whether to put the new sampling instances closest to
659 multiples of the sampling rate.
660 :param initials: ``None``, ``True``, or initial conditions for the
661 anti-aliasing filter, obtained from a previous run. In the latter
662 two cases the final state of the filter is returned instead of
663 ``None``.
664 :param demean: whether to demean the signal before filtering.
665 :param ftype: which FIR filter to use, choose from `fir, fir-remez`.
666 Default is `fir-remez`.
667 '''
668 newdeltat = self.deltat*ndecimate
669 if snap:
670 ilag = int(round(
671 (math.ceil(self.tmin / newdeltat) * newdeltat - self.tmin)
672 / self.deltat))
673 else:
674 ilag = 0
676 if snap and ilag > 0 and ilag < self.ydata.size:
677 data = self.ydata.astype(num.float64)
678 self.tmin += ilag*self.deltat
679 else:
680 data = self.ydata.astype(num.float64)
682 if demean:
683 data -= num.mean(data)
685 if data.size != 0:
686 result = util.decimate(
687 data, ndecimate, ftype=ftype, zi=initials, ioff=ilag)
688 else:
689 result = data
691 if initials is None:
692 self.ydata, finals = result, None
693 else:
694 self.ydata, finals = result
696 self.deltat = reuse(self.deltat*ndecimate)
697 self.tmax = self.tmin+(len(self.ydata)-1)*self.deltat
698 self._update_ids()
700 return finals
702 def downsample_to(self, deltat, snap=False, allow_upsample_max=1,
703 initials=None, demean=False, ftype='fir-remez'):
705 '''
706 Downsample to given sampling rate.
708 Tries to downsample the trace to a target sampling interval of
709 ``deltat``. This runs the :py:meth:`Trace.downsample` one or several
710 times. If allow_upsample_max is set to a value larger than 1,
711 intermediate upsampling steps are allowed, in order to increase the
712 number of possible downsampling ratios.
714 If the requested ratio is not supported, an exception of type
715 :py:exc:`pyrocko.util.UnavailableDecimation` is raised.
717 :param deltat: desired deltat in [s]
718 :param allow_upsample_max: maximum upsampling of the signal to archive
719 the desired deltat. Default is `1`.
720 :param snap: whether to put the new sampling instances closest to
721 multiples of the sampling rate.
722 :param initials: ``None``, ``True``, or initial conditions for the
723 anti-aliasing filter, obtained from a previous run. In the latter
724 two cases the final state of the filter is returned instead of
725 ``None``.
726 :param demean: whether to demean the signal before filtering.
727 :param ftype: which FIR filter to use, choose from `fir, fir-remez`.
728 Default is `fir-remez`.
729 '''
731 ratio = deltat/self.deltat
732 rratio = round(ratio)
734 ok = False
735 for upsratio in range(1, allow_upsample_max+1):
736 dratio = (upsratio/self.deltat) / (1./deltat)
737 if abs(dratio - round(dratio)) / dratio < 0.0001 and \
738 util.decitab(int(round(dratio))):
740 ok = True
741 break
743 if not ok:
744 raise util.UnavailableDecimation('ratio = %g' % ratio)
746 if upsratio > 1:
747 self.drop_growbuffer()
748 ydata = self.ydata
749 self.ydata = num.zeros(
750 ydata.size*upsratio-(upsratio-1), ydata.dtype)
751 self.ydata[::upsratio] = ydata
752 for i in range(1, upsratio):
753 self.ydata[i::upsratio] = \
754 float(i)/upsratio * ydata[:-1] \
755 + float(upsratio-i)/upsratio * ydata[1:]
756 self.deltat = self.deltat/upsratio
758 ratio = deltat/self.deltat
759 rratio = round(ratio)
761 deci_seq = util.decitab(int(rratio))
762 finals = []
763 for i, ndecimate in enumerate(deci_seq):
764 if ndecimate != 1:
765 xinitials = None
766 if initials is not None:
767 xinitials = initials[i]
768 finals.append(self.downsample(
769 ndecimate, snap=snap, initials=xinitials, demean=demean,
770 ftype=ftype))
772 if initials is not None:
773 return finals
775 def resample(self, deltat):
776 '''
777 Resample to given sampling rate ``deltat``.
779 Resampling is performed in the frequency domain.
780 '''
782 ndata = self.ydata.size
783 ntrans = nextpow2(ndata)
784 fntrans2 = ntrans * self.deltat/deltat
785 ntrans2 = int(round(fntrans2))
786 deltat2 = self.deltat * float(ntrans)/float(ntrans2)
787 ndata2 = int(round(ndata*self.deltat/deltat2))
788 if abs(fntrans2 - ntrans2) > 1e-7:
789 logger.warning(
790 'resample: requested deltat %g could not be matched exactly: '
791 '%g' % (deltat, deltat2))
793 data = self.ydata
794 data_pad = num.zeros(ntrans, dtype=float)
795 data_pad[:ndata] = data
796 fdata = num.fft.rfft(data_pad)
797 fdata2 = num.zeros((ntrans2+1)//2, dtype=fdata.dtype)
798 n = min(fdata.size, fdata2.size)
799 fdata2[:n] = fdata[:n]
800 data2 = num.fft.irfft(fdata2)
801 data2 = data2[:ndata2]
802 data2 *= float(ntrans2) / float(ntrans)
803 self.deltat = deltat2
804 self.set_ydata(data2)
806 def resample_simple(self, deltat):
807 tyear = 3600*24*365.
809 if deltat == self.deltat:
810 return
812 if abs(self.deltat - deltat) * tyear/deltat < deltat:
813 logger.warning(
814 'resample_simple: less than one sample would have to be '
815 'inserted/deleted per year. Doing nothing.')
816 return
818 ninterval = int(round(deltat / (self.deltat - deltat)))
819 if abs(ninterval) < 20:
820 logger.error(
821 'resample_simple: sample insertion/deletion interval less '
822 'than 20. results would be erroneous.')
823 raise ResamplingFailed()
825 delete = False
826 if ninterval < 0:
827 ninterval = - ninterval
828 delete = True
830 tyearbegin = util.year_start(self.tmin)
832 nmin = int(round((self.tmin - tyearbegin)/deltat))
834 ibegin = (((nmin-1)//ninterval)+1) * ninterval - nmin
835 nindices = (len(self.ydata) - ibegin - 1) / ninterval + 1
836 if nindices > 0:
837 indices = ibegin + num.arange(nindices) * ninterval
838 data_split = num.split(self.ydata, indices)
839 data = []
840 for ln, h in zip(data_split[:-1], data_split[1:]):
841 if delete:
842 ln = ln[:-1]
844 data.append(ln)
845 if not delete:
846 if ln.size == 0:
847 v = h[0]
848 else:
849 v = 0.5*(ln[-1] + h[0])
850 data.append(num.array([v], dtype=ln.dtype))
852 data.append(data_split[-1])
854 ydata_new = num.concatenate(data)
856 self.tmin = tyearbegin + nmin * deltat
857 self.deltat = deltat
858 self.set_ydata(ydata_new)
860 def stretch(self, tmin_new, tmax_new):
861 '''
862 Stretch signal while preserving sample rate using sinc interpolation.
864 :param tmin_new: new time of first sample
865 :param tmax_new: new time of last sample
867 This method can be used to correct for a small linear time drift or to
868 introduce sub-sample time shifts. The amount of stretching is limited
869 to 10% by the implementation and is expected to be much smaller than
870 that by the approximations used.
871 '''
873 from pyrocko import signal_ext
875 i_control = num.array([0, self.ydata.size-1], dtype=num.int64)
876 t_control = num.array([tmin_new, tmax_new], dtype=float)
878 r = (tmax_new - tmin_new) / self.deltat + 1.0
879 n_new = int(round(r))
880 if abs(n_new - r) > 0.001:
881 n_new = int(math.floor(r))
883 assert n_new >= 2
885 tmax_new = tmin_new + (n_new-1) * self.deltat
887 ydata_new = num.empty(n_new, dtype=float)
888 signal_ext.antidrift(i_control, t_control,
889 self.ydata.astype(float),
890 tmin_new, self.deltat, ydata_new)
892 self.tmin = tmin_new
893 self.set_ydata(ydata_new)
894 self._update_ids()
896 def nyquist_check(self, frequency, intro='Corner frequency', warn=True,
897 raise_exception=False):
899 '''
900 Check if a given frequency is above the Nyquist frequency of the trace.
902 :param intro: string used to introduce the warning/error message
903 :param warn: whether to emit a warning
904 :param raise_exception: whether to raise an :py:exc:`AboveNyquist`
905 exception.
906 '''
908 if frequency >= 0.5/self.deltat:
909 message = '%s (%g Hz) is equal to or higher than nyquist ' \
910 'frequency (%g Hz). (Trace %s)' \
911 % (intro, frequency, 0.5/self.deltat, self.name())
912 if warn:
913 logger.warning(message)
914 if raise_exception:
915 raise AboveNyquist(message)
917 def lowpass(self, order, corner, nyquist_warn=True,
918 nyquist_exception=False, demean=True):
920 '''
921 Apply Butterworth lowpass to the trace.
923 :param order: order of the filter
924 :param corner: corner frequency of the filter
926 Mean is removed before filtering.
927 '''
929 self.nyquist_check(
930 corner, 'Corner frequency of lowpass', nyquist_warn,
931 nyquist_exception)
933 (b, a) = _get_cached_filter_coefs(
934 order, [corner*2.0*self.deltat], btype='low')
936 if len(a) != order+1 or len(b) != order+1:
937 logger.warning(
938 'Erroneous filter coefficients returned by '
939 'scipy.signal.butter(). You may need to downsample the '
940 'signal before filtering.')
942 data = self.ydata.astype(num.float64)
943 if demean:
944 data -= num.mean(data)
945 self.drop_growbuffer()
946 self.ydata = signal.lfilter(b, a, data)
948 def highpass(self, order, corner, nyquist_warn=True,
949 nyquist_exception=False, demean=True):
951 '''
952 Apply butterworth highpass to the trace.
954 :param order: order of the filter
955 :param corner: corner frequency of the filter
957 Mean is removed before filtering.
958 '''
960 self.nyquist_check(
961 corner, 'Corner frequency of highpass', nyquist_warn,
962 nyquist_exception)
964 (b, a) = _get_cached_filter_coefs(
965 order, [corner*2.0*self.deltat], btype='high')
967 data = self.ydata.astype(num.float64)
968 if len(a) != order+1 or len(b) != order+1:
969 logger.warning(
970 'Erroneous filter coefficients returned by '
971 'scipy.signal.butter(). You may need to downsample the '
972 'signal before filtering.')
973 if demean:
974 data -= num.mean(data)
975 self.drop_growbuffer()
976 self.ydata = signal.lfilter(b, a, data)
978 def bandpass(self, order, corner_hp, corner_lp, demean=True):
979 '''
980 Apply butterworth bandpass to the trace.
982 :param order: order of the filter
983 :param corner_hp: lower corner frequency of the filter
984 :param corner_lp: upper corner frequency of the filter
986 Mean is removed before filtering.
987 '''
989 self.nyquist_check(corner_hp, 'Lower corner frequency of bandpass')
990 self.nyquist_check(corner_lp, 'Higher corner frequency of bandpass')
991 (b, a) = _get_cached_filter_coefs(
992 order,
993 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
994 btype='band')
995 data = self.ydata.astype(num.float64)
996 if demean:
997 data -= num.mean(data)
998 self.drop_growbuffer()
999 self.ydata = signal.lfilter(b, a, data)
1001 def bandstop(self, order, corner_hp, corner_lp, demean=True):
1002 '''
1003 Apply bandstop (attenuates frequencies in band) to the trace.
1005 :param order: order of the filter
1006 :param corner_hp: lower corner frequency of the filter
1007 :param corner_lp: upper corner frequency of the filter
1009 Mean is removed before filtering.
1010 '''
1012 self.nyquist_check(corner_hp, 'Lower corner frequency of bandstop')
1013 self.nyquist_check(corner_lp, 'Higher corner frequency of bandstop')
1014 (b, a) = _get_cached_filter_coefs(
1015 order,
1016 [corner*2.0*self.deltat for corner in (corner_hp, corner_lp)],
1017 btype='bandstop')
1018 data = self.ydata.astype(num.float64)
1019 if demean:
1020 data -= num.mean(data)
1021 self.drop_growbuffer()
1022 self.ydata = signal.lfilter(b, a, data)
1024 def envelope(self, inplace=True):
1025 '''
1026 Calculate the envelope of the trace.
1028 :param inplace: calculate envelope in place
1030 The calculation follows:
1032 .. math::
1034 Y' = \\sqrt{Y^2+H(Y)^2}
1036 where H is the Hilbert-Transform of the signal Y.
1037 '''
1039 y = self.ydata.astype(float)
1040 env = num.abs(hilbert(y))
1041 if inplace:
1042 self.drop_growbuffer()
1043 self.ydata = env
1044 else:
1045 tr = self.copy(data=False)
1046 tr.ydata = env
1047 return tr
1049 def taper(self, taperer, inplace=True, chop=False):
1050 '''
1051 Apply a :py:class:`Taper` to the trace.
1053 :param taperer: instance of :py:class:`Taper` subclass
1054 :param inplace: apply taper inplace
1055 :param chop: if ``True``: exclude tapered parts from the resulting
1056 trace
1057 '''
1059 if not inplace:
1060 tr = self.copy()
1061 else:
1062 tr = self
1064 if chop:
1065 i, n = taperer.span(tr.ydata, tr.tmin, tr.deltat)
1066 tr.shift(i*tr.deltat)
1067 tr.set_ydata(tr.ydata[i:i+n])
1069 taperer(tr.ydata, tr.tmin, tr.deltat)
1071 if not inplace:
1072 return tr
1074 def whiten(self, order=6):
1075 '''
1076 Whiten signal in time domain using autoregression and recursive filter.
1078 :param order: order of the autoregression process
1079 '''
1081 b, a = self.whitening_coefficients(order)
1082 self.drop_growbuffer()
1083 self.ydata = signal.lfilter(b, a, self.ydata)
1085 def whitening_coefficients(self, order=6):
1086 ar = yulewalker(self.ydata, order)
1087 b, a = [1.] + ar.tolist(), [1.]
1088 return b, a
1090 def ampspec_whiten(
1091 self,
1092 width,
1093 td_taper='auto',
1094 fd_taper='auto',
1095 pad_to_pow2=True,
1096 demean=True):
1098 '''
1099 Whiten signal via frequency domain using moving average on amplitude
1100 spectra.
1102 :param width: width of smoothing kernel [Hz]
1103 :param td_taper: time domain taper, object of type :py:class:`Taper` or
1104 ``None`` or ``'auto'``.
1105 :param fd_taper: frequency domain taper, object of type
1106 :py:class:`Taper` or ``None`` or ``'auto'``.
1107 :param pad_to_pow2: whether to pad the signal with zeros up to a length
1108 of 2^n
1109 :param demean: whether to demean the signal before tapering
1111 The signal is first demeaned and then tapered using ``td_taper``. Then,
1112 the spectrum is calculated and inversely weighted with a smoothed
1113 version of its amplitude spectrum. A moving average is used for the
1114 smoothing. The smoothed spectrum is then tapered using ``fd_taper``.
1115 Finally, the smoothed and tapered spectrum is back-transformed into the
1116 time domain.
1118 If ``td_taper`` is set to ``'auto'``, ``CosFader(1.0/width)`` is used.
1119 If ``fd_taper`` is set to ``'auto'``, ``CosFader(width)`` is used.
1120 '''
1122 ndata = self.data_len()
1124 if pad_to_pow2:
1125 ntrans = nextpow2(ndata)
1126 else:
1127 ntrans = ndata
1129 df = 1./(ntrans*self.deltat)
1130 nw = int(round(width/df))
1131 if ndata//2+1 <= nw:
1132 raise TraceTooShort(
1133 'Samples in trace: %s, samples needed: %s' % (ndata, nw))
1135 if td_taper == 'auto':
1136 td_taper = CosFader(1./width)
1138 if fd_taper == 'auto':
1139 fd_taper = CosFader(width)
1141 if td_taper:
1142 self.taper(td_taper)
1144 ydata = self.get_ydata().astype(float)
1145 if demean:
1146 ydata -= ydata.mean()
1148 spec = num.fft.rfft(ydata, ntrans)
1150 amp = num.abs(spec)
1151 nspec = amp.size
1152 csamp = num.cumsum(amp)
1153 amp_smoothed = num.empty(nspec, dtype=csamp.dtype)
1154 n1, n2 = nw//2, nw//2 + nspec - nw
1155 amp_smoothed[n1:n2] = (csamp[nw:] - csamp[:-nw]) / nw
1156 amp_smoothed[:n1] = amp_smoothed[n1]
1157 amp_smoothed[n2:] = amp_smoothed[n2-1]
1159 denom = amp_smoothed * amp
1160 numer = amp
1161 eps = num.mean(denom) * 1e-9
1162 if eps == 0.0:
1163 eps = 1e-9
1165 numer += eps
1166 denom += eps
1167 spec *= numer/denom
1169 if fd_taper:
1170 fd_taper(spec, 0., df)
1172 ydata = num.fft.irfft(spec)
1173 self.set_ydata(ydata[:ndata])
1175 def _get_cached_freqs(self, nf, deltaf):
1176 ck = (nf, deltaf)
1177 if ck not in Trace.cached_frequencies:
1178 Trace.cached_frequencies[ck] = deltaf * num.arange(
1179 nf, dtype=float)
1181 return Trace.cached_frequencies[ck]
1183 def bandpass_fft(self, corner_hp, corner_lp):
1184 '''
1185 Apply boxcar bandbpass to trace (in spectral domain).
1186 '''
1188 n = len(self.ydata)
1189 n2 = nextpow2(n)
1190 data = num.zeros(n2, dtype=num.float64)
1191 data[:n] = self.ydata
1192 fdata = num.fft.rfft(data)
1193 freqs = self._get_cached_freqs(len(fdata), 1./(self.deltat*n2))
1194 fdata[0] = 0.0
1195 fdata *= num.logical_and(corner_hp < freqs, freqs < corner_lp)
1196 data = num.fft.irfft(fdata)
1197 self.drop_growbuffer()
1198 self.ydata = data[:n]
1200 def shift(self, tshift):
1201 '''
1202 Time shift the trace.
1203 '''
1205 self.tmin += tshift
1206 self.tmax += tshift
1207 self._update_ids()
1209 def snap(self, inplace=True, interpolate=False):
1210 '''
1211 Shift trace samples to nearest even multiples of the sampling rate.
1213 :param inplace: (boolean) snap traces inplace
1215 If ``inplace`` is ``False`` and the difference of tmin and tmax of
1216 both, the snapped and the original trace is smaller than 0.01 x deltat,
1217 :py:func:`snap` returns the unsnapped instance of the original trace.
1218 '''
1220 tmin = round(self.tmin/self.deltat)*self.deltat
1221 tmax = tmin + (self.ydata.size-1)*self.deltat
1223 if inplace:
1224 xself = self
1225 else:
1226 if abs(self.tmin - tmin) < 1e-2*self.deltat and \
1227 abs(self.tmax - tmax) < 1e-2*self.deltat:
1228 return self
1230 xself = self.copy()
1232 if interpolate:
1233 from pyrocko import signal_ext
1234 n = xself.data_len()
1235 ydata_new = num.empty(n, dtype=float)
1236 i_control = num.array([0, n-1], dtype=num.int64)
1237 tref = tmin
1238 t_control = num.array(
1239 [float(xself.tmin-tref), float(xself.tmax-tref)],
1240 dtype=float)
1242 signal_ext.antidrift(i_control, t_control,
1243 xself.ydata.astype(float),
1244 float(tmin-tref), xself.deltat, ydata_new)
1246 xself.ydata = ydata_new
1248 xself.tmin = tmin
1249 xself.tmax = tmax
1250 xself._update_ids()
1252 return xself
1254 def fix_deltat_rounding_errors(self):
1255 '''
1256 Try to undo sampling rate rounding errors.
1258 See :py:func:`fix_deltat_rounding_errors`.
1259 '''
1261 self.deltat = fix_deltat_rounding_errors(self.deltat)
1262 self.tmax = self.tmin + (self.data_len() - 1) * self.deltat
1264 def sta_lta_centered(self, tshort, tlong, quad=True, scalingmethod=1):
1265 '''
1266 Run special STA/LTA filter where the short time window is centered on
1267 the long time window.
1269 :param tshort: length of short time window in [s]
1270 :param tlong: length of long time window in [s]
1271 :param quad: whether to square the data prior to applying the STA/LTA
1272 filter
1273 :param scalingmethod: integer key to select how output values are
1274 scaled / normalized (``1``, ``2``, or ``3``)
1276 ============= ====================================== ===========
1277 Scalingmethod Implementation Range
1278 ============= ====================================== ===========
1279 ``1`` As/Al* Ts/Tl [0,1]
1280 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1281 ``3`` Like ``2`` but clipping range at zero [0,1]
1282 ============= ====================================== ===========
1284 '''
1286 nshort = int(round(tshort/self.deltat))
1287 nlong = int(round(tlong/self.deltat))
1289 assert nshort < nlong
1290 if nlong > len(self.ydata):
1291 raise TraceTooShort(
1292 'Samples in trace: %s, samples needed: %s'
1293 % (len(self.ydata), nlong))
1295 if quad:
1296 sqrdata = self.ydata**2
1297 else:
1298 sqrdata = self.ydata
1300 mavg_short = moving_avg(sqrdata, nshort)
1301 mavg_long = moving_avg(sqrdata, nlong)
1303 self.drop_growbuffer()
1305 if scalingmethod not in (1, 2, 3):
1306 raise Exception('Invalid argument to scalingrange argument.')
1308 if scalingmethod == 1:
1309 self.ydata = mavg_short/mavg_long * float(nshort)/float(nlong)
1310 elif scalingmethod in (2, 3):
1311 self.ydata = (mavg_short/mavg_long - 1.) \
1312 / ((float(nlong)/float(nshort)) - 1)
1314 if scalingmethod == 3:
1315 self.ydata = num.maximum(self.ydata, 0.)
1317 def sta_lta_right(self, tshort, tlong, quad=True, scalingmethod=1):
1318 '''
1319 Run special STA/LTA filter where the short time window is overlapping
1320 with the last part of the long time window.
1322 :param tshort: length of short time window in [s]
1323 :param tlong: length of long time window in [s]
1324 :param quad: whether to square the data prior to applying the STA/LTA
1325 filter
1326 :param scalingmethod: integer key to select how output values are
1327 scaled / normalized (``1``, ``2``, or ``3``)
1329 ============= ====================================== ===========
1330 Scalingmethod Implementation Range
1331 ============= ====================================== ===========
1332 ``1`` As/Al* Ts/Tl [0,1]
1333 ``2`` (As/Al - 1) / (Tl/Ts - 1) [-Ts/Tl,1]
1334 ``3`` Like ``2`` but clipping range at zero [0,1]
1335 ============= ====================================== ===========
1337 With ``scalingmethod=1``, the values produced by this variant of the
1338 STA/LTA are equivalent to
1340 .. math::
1341 s_i = \\frac{s}{l} \\frac{\\frac{1}{s}\\sum_{j=i}^{i+s-1} f_j}
1342 {\\frac{1}{l}\\sum_{j=i+s-l}^{i+s-1} f_j}
1344 where :math:`f_j` are the input samples, :math:`s` are the number of
1345 samples in the short time window and :math:`l` are the number of
1346 samples in the long time window.
1347 '''
1349 n = self.data_len()
1350 tmin = self.tmin
1352 nshort = max(1, int(round(tshort/self.deltat)))
1353 nlong = max(1, int(round(tlong/self.deltat)))
1355 assert nshort < nlong
1357 if nlong > len(self.ydata):
1358 raise TraceTooShort(
1359 'Samples in trace: %s, samples needed: %s'
1360 % (len(self.ydata), nlong))
1362 if quad:
1363 sqrdata = self.ydata**2
1364 else:
1365 sqrdata = self.ydata
1367 nshift = int(math.floor(0.5 * (nlong - nshort)))
1368 if nlong % 2 != 0 and nshort % 2 == 0:
1369 nshift += 1
1371 mavg_short = moving_avg(sqrdata, nshort)[nshift:]
1372 mavg_long = moving_avg(sqrdata, nlong)[:sqrdata.size-nshift]
1374 self.drop_growbuffer()
1376 if scalingmethod not in (1, 2, 3):
1377 raise Exception('Invalid argument to scalingrange argument.')
1379 if scalingmethod == 1:
1380 ydata = mavg_short/mavg_long * nshort/nlong
1381 elif scalingmethod in (2, 3):
1382 ydata = (mavg_short/mavg_long - 1.) \
1383 / ((float(nlong)/float(nshort)) - 1)
1385 if scalingmethod == 3:
1386 ydata = num.maximum(self.ydata, 0.)
1388 self.set_ydata(ydata)
1390 self.shift((math.ceil(0.5*nlong) - nshort + 1) * self.deltat)
1392 self.chop(
1393 tmin + (nlong - nshort) * self.deltat,
1394 tmin + (n - nshort) * self.deltat)
1396 def peaks(self, threshold, tsearch,
1397 deadtime=False,
1398 nblock_duration_detection=100):
1400 '''
1401 Detect peaks above a given threshold (method 1).
1403 From every instant, where the signal rises above ``threshold``, a time
1404 length of ``tsearch`` seconds is searched for a maximum. A list with
1405 tuples (time, value) for each detected peak is returned. The
1406 ``deadtime`` argument turns on a special deadtime duration detection
1407 algorithm useful in combination with recursive STA/LTA filters.
1408 '''
1410 y = self.ydata
1411 above = num.where(y > threshold, 1, 0)
1412 deriv = num.zeros(y.size, dtype=num.int8)
1413 deriv[1:] = above[1:]-above[:-1]
1414 itrig_positions = num.nonzero(deriv > 0)[0]
1415 tpeaks = []
1416 apeaks = []
1417 tzeros = []
1418 tzero = self.tmin
1420 for itrig_pos in itrig_positions:
1421 ibeg = itrig_pos
1422 iend = min(
1423 len(self.ydata),
1424 itrig_pos + int(math.ceil(tsearch/self.deltat)))
1425 ipeak = num.argmax(y[ibeg:iend])
1426 tpeak = self.tmin + (ipeak+ibeg)*self.deltat
1427 apeak = y[ibeg+ipeak]
1429 if tpeak < tzero:
1430 continue
1432 if deadtime:
1433 ibeg = itrig_pos
1434 iblock = 0
1435 nblock = nblock_duration_detection
1436 totalsum = 0.
1437 while True:
1438 if ibeg+iblock*nblock >= len(y):
1439 tzero = self.tmin + (len(y)-1) * self.deltat
1440 break
1442 logy = num.log(
1443 y[ibeg+iblock*nblock:ibeg+(iblock+1)*nblock])
1444 logy[0] += totalsum
1445 ysum = num.cumsum(logy)
1446 totalsum = ysum[-1]
1447 below = num.where(ysum <= 0., 1, 0)
1448 deriv = num.zeros(ysum.size, dtype=num.int8)
1449 deriv[1:] = below[1:]-below[:-1]
1450 izero_positions = num.nonzero(deriv > 0)[0] + iblock*nblock
1451 if len(izero_positions) > 0:
1452 tzero = self.tmin + self.deltat * (
1453 ibeg + izero_positions[0])
1454 break
1455 iblock += 1
1456 else:
1457 tzero = ibeg*self.deltat + self.tmin + tsearch
1459 tpeaks.append(tpeak)
1460 apeaks.append(apeak)
1461 tzeros.append(tzero)
1463 if deadtime:
1464 return tpeaks, apeaks, tzeros
1465 else:
1466 return tpeaks, apeaks
1468 def peaks2(self, threshold, tsearch):
1470 '''
1471 Detect peaks above a given threshold (method 2).
1473 This variant of peak detection is a bit more robust (and slower) than
1474 the one implemented in :py:meth:`Trace.peaks`. First all samples with
1475 ``a[i-1] < a[i] > a[i+1]`` are masked as potential peaks. From these,
1476 iteratively the one with the maximum amplitude ``a[j]`` and time
1477 ``t[j]`` is choosen and potential peaks within
1478 ``t[j] - tsearch, t[j] + tsearch``
1479 are discarded. The algorithm stops, when ``a[j] < threshold`` or when
1480 no more potential peaks are left.
1481 '''
1483 a = num.copy(self.ydata)
1485 amin = num.min(a)
1487 a[0] = amin
1488 a[1: -1][num.diff(a, 2) <= 0.] = amin
1489 a[-1] = amin
1491 data = []
1492 while True:
1493 imax = num.argmax(a)
1494 amax = a[imax]
1496 if amax < threshold or amax == amin:
1497 break
1499 data.append((self.tmin + imax * self.deltat, amax))
1501 ntsearch = int(round(tsearch / self.deltat))
1502 a[max(imax-ntsearch//2, 0):min(imax+ntsearch//2, a.size)] = amin
1504 if data:
1505 data.sort()
1506 tpeaks, apeaks = list(zip(*data))
1507 else:
1508 tpeaks, apeaks = [], []
1510 return tpeaks, apeaks
1512 def extend(self, tmin=None, tmax=None, fillmethod='zeros'):
1513 '''
1514 Extend trace to given span.
1516 :param tmin: begin time of new span
1517 :param tmax: end time of new span
1518 :param fillmethod: ``'zeros'``, ``'repeat'``, ``'mean'``, or
1519 ``'median'``
1520 '''
1522 nold = self.ydata.size
1524 if tmin is not None:
1525 nl = min(0, int(round((tmin-self.tmin)/self.deltat)))
1526 else:
1527 nl = 0
1529 if tmax is not None:
1530 nh = max(nold - 1, int(round((tmax-self.tmin)/self.deltat)))
1531 else:
1532 nh = nold - 1
1534 n = nh - nl + 1
1535 data = num.zeros(n, dtype=self.ydata.dtype)
1536 data[-nl:-nl + nold] = self.ydata
1537 if self.ydata.size >= 1:
1538 if fillmethod == 'repeat':
1539 data[:-nl] = self.ydata[0]
1540 data[-nl + nold:] = self.ydata[-1]
1541 elif fillmethod == 'median':
1542 v = num.median(self.ydata)
1543 data[:-nl] = v
1544 data[-nl + nold:] = v
1545 elif fillmethod == 'mean':
1546 v = num.mean(self.ydata)
1547 data[:-nl] = v
1548 data[-nl + nold:] = v
1550 self.drop_growbuffer()
1551 self.ydata = data
1553 self.tmin += nl * self.deltat
1554 self.tmax = self.tmin + (self.ydata.size - 1) * self.deltat
1556 self._update_ids()
1558 def transfer(self,
1559 tfade=0.,
1560 freqlimits=None,
1561 transfer_function=None,
1562 cut_off_fading=True,
1563 demean=True,
1564 invert=False):
1566 '''
1567 Return new trace with transfer function applied (convolution).
1569 :param tfade: rise/fall time in seconds of taper applied in timedomain
1570 at both ends of trace.
1571 :param freqlimits: 4-tuple with corner frequencies in Hz.
1572 :param transfer_function: FrequencyResponse object; must provide a
1573 method 'evaluate(freqs)', which returns the transfer function
1574 coefficients at the frequencies 'freqs'.
1575 :param cut_off_fading: whether to cut off rise/fall interval in output
1576 trace.
1577 :param demean: remove mean before applying transfer function
1578 :param invert: set to True to do a deconvolution
1579 '''
1581 if transfer_function is None:
1582 transfer_function = FrequencyResponse()
1584 if self.tmax - self.tmin <= tfade*2.:
1585 raise TraceTooShort(
1586 'Trace %s.%s.%s.%s too short for fading length setting. '
1587 'trace length = %g, fading length = %g'
1588 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1590 if freqlimits is None and (
1591 transfer_function is None or transfer_function.is_scalar()):
1593 # special case for flat responses
1595 output = self.copy()
1596 data = self.ydata
1597 ndata = data.size
1599 if transfer_function is not None:
1600 c = num.abs(transfer_function.evaluate(num.ones(1))[0])
1602 if invert:
1603 c = 1.0/c
1605 data *= c
1607 if tfade != 0.0:
1608 data *= costaper(
1609 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1610 ndata, self.deltat)
1612 output.ydata = data
1614 else:
1615 ndata = self.ydata.size
1616 ntrans = nextpow2(ndata*1.2)
1617 coefs = self._get_tapered_coefs(
1618 ntrans, freqlimits, transfer_function, invert=invert)
1620 data = self.ydata
1622 data_pad = num.zeros(ntrans, dtype=float)
1623 data_pad[:ndata] = data
1624 if demean:
1625 data_pad[:ndata] -= data.mean()
1627 if tfade != 0.0:
1628 data_pad[:ndata] *= costaper(
1629 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1630 ndata, self.deltat)
1632 fdata = num.fft.rfft(data_pad)
1633 fdata *= coefs
1634 ddata = num.fft.irfft(fdata)
1635 output = self.copy()
1636 output.ydata = ddata[:ndata]
1638 if cut_off_fading and tfade != 0.0:
1639 try:
1640 output.chop(output.tmin+tfade, output.tmax-tfade, inplace=True)
1641 except NoData:
1642 raise TraceTooShort(
1643 'Trace %s.%s.%s.%s too short for fading length setting. '
1644 'trace length = %g, fading length = %g'
1645 % (self.nslc_id + (self.tmax-self.tmin, tfade)))
1646 else:
1647 output.ydata = output.ydata.copy()
1649 return output
1651 def differentiate(self, n=1, order=4, inplace=True):
1652 '''
1653 Approximate first or second derivative of the trace.
1655 :param n: 1 for first derivative, 2 for second
1656 :param order: order of the approximation 2 and 4 are supported
1657 :param inplace: if ``True`` the trace is differentiated in place,
1658 otherwise a new trace object with the derivative is returned.
1660 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1662 See :py:func:`~pyrocko.util.diff_fd` for implementation details.
1663 '''
1665 ddata = util.diff_fd(n, order, self.deltat, self.ydata)
1667 if inplace:
1668 self.ydata = ddata
1669 else:
1670 output = self.copy(data=False)
1671 output.set_ydata(ddata)
1672 return output
1674 def drop_chain_cache(self):
1675 if self._pchain:
1676 self._pchain.clear()
1678 def init_chain(self):
1679 self._pchain = pchain.Chain(
1680 do_downsample,
1681 do_extend,
1682 do_pre_taper,
1683 do_fft,
1684 do_filter,
1685 do_ifft)
1687 def run_chain(self, tmin, tmax, deltat, setup, nocache):
1688 if setup.domain == 'frequency_domain':
1689 _, _, data = self._pchain(
1690 (self, deltat),
1691 (tmin, tmax),
1692 (setup.taper,),
1693 (setup.filter,),
1694 (setup.filter,),
1695 nocache=nocache)
1697 return num.abs(data), num.abs(data)
1699 else:
1700 processed = self._pchain(
1701 (self, deltat),
1702 (tmin, tmax),
1703 (setup.taper,),
1704 (setup.filter,),
1705 (setup.filter,),
1706 (),
1707 nocache=nocache)
1709 if setup.domain == 'time_domain':
1710 data = processed.get_ydata()
1712 elif setup.domain == 'envelope':
1713 processed = processed.envelope(inplace=False)
1715 elif setup.domain == 'absolute':
1716 processed.set_ydata(num.abs(processed.get_ydata()))
1718 return processed.get_ydata(), processed
1720 def misfit(self, candidate, setup, nocache=False, debug=False):
1721 '''
1722 Calculate misfit and normalization factor against candidate trace.
1724 :param candidate: :py:class:`Trace` object
1725 :param setup: :py:class:`MisfitSetup` object
1726 :returns: tuple ``(m, n)``, where m is the misfit value and n is the
1727 normalization divisor
1729 If the sampling rates of ``self`` and ``candidate`` differ, the trace
1730 with the higher sampling rate will be downsampled.
1731 '''
1733 a = self
1734 b = candidate
1736 for tr in (a, b):
1737 if not tr._pchain:
1738 tr.init_chain()
1740 deltat = max(a.deltat, b.deltat)
1741 tmin = min(a.tmin, b.tmin) - deltat
1742 tmax = max(a.tmax, b.tmax) + deltat
1744 adata, aproc = a.run_chain(tmin, tmax, deltat, setup, nocache)
1745 bdata, bproc = b.run_chain(tmin, tmax, deltat, setup, nocache)
1747 if setup.domain != 'cc_max_norm':
1748 m, n = Lx_norm(bdata, adata, norm=setup.norm)
1749 else:
1750 ctr = correlate(aproc, bproc, mode='full', normalization='normal')
1751 ccmax = ctr.max()[1]
1752 m = 0.5 - 0.5 * ccmax
1753 n = 0.5
1755 if debug:
1756 return m, n, aproc, bproc
1757 else:
1758 return m, n
1760 def spectrum(self, pad_to_pow2=False, tfade=None):
1761 '''
1762 Get FFT spectrum of trace.
1764 :param pad_to_pow2: whether to zero-pad the data to next larger
1765 power-of-two length
1766 :param tfade: ``None`` or a time length in seconds, to apply cosine
1767 shaped tapers to both
1769 :returns: a tuple with (frequencies, values)
1770 '''
1772 ndata = self.ydata.size
1774 if pad_to_pow2:
1775 ntrans = nextpow2(ndata)
1776 else:
1777 ntrans = ndata
1779 if tfade is None:
1780 ydata = self.ydata
1781 else:
1782 ydata = self.ydata * costaper(
1783 0., tfade, self.deltat*(ndata-1)-tfade, self.deltat*ndata,
1784 ndata, self.deltat)
1786 fydata = num.fft.rfft(ydata, ntrans)
1787 df = 1./(ntrans*self.deltat)
1788 fxdata = num.arange(len(fydata))*df
1789 return fxdata, fydata
1791 def multi_filter(self, filter_freqs, bandwidth):
1793 class Gauss(FrequencyResponse):
1794 f0 = Float.T()
1795 a = Float.T(default=1.0)
1797 def __init__(self, f0, a=1.0, **kwargs):
1798 FrequencyResponse.__init__(self, f0=f0, a=a, **kwargs)
1800 def evaluate(self, freqs):
1801 omega0 = 2.*math.pi*self.f0
1802 omega = 2.*math.pi*freqs
1803 return num.exp(-((omega-omega0)
1804 / (self.a*omega0))**2)
1806 freqs, coefs = self.spectrum()
1807 n = self.data_len()
1808 nfilt = len(filter_freqs)
1809 signal_tf = num.zeros((nfilt, n))
1810 centroid_freqs = num.zeros(nfilt)
1811 for ifilt, f0 in enumerate(filter_freqs):
1812 taper = Gauss(f0, a=bandwidth)
1813 weights = taper.evaluate(freqs)
1814 nhalf = freqs.size
1815 analytic_spec = num.zeros(n, dtype=complex)
1816 analytic_spec[:nhalf] = coefs*weights
1818 enorm = num.abs(analytic_spec[:nhalf])**2
1819 enorm /= num.sum(enorm)
1821 if n % 2 == 0:
1822 analytic_spec[1:nhalf-1] *= 2.
1823 else:
1824 analytic_spec[1:nhalf] *= 2.
1826 analytic = num.fft.ifft(analytic_spec)
1827 signal_tf[ifilt, :] = num.abs(analytic)
1829 enorm = num.abs(analytic_spec[:nhalf])**2
1830 enorm /= num.sum(enorm)
1831 centroid_freqs[ifilt] = num.sum(freqs*enorm)
1833 return centroid_freqs, signal_tf
1835 def _get_tapered_coefs(
1836 self, ntrans, freqlimits, transfer_function, invert=False):
1838 deltaf = 1./(self.deltat*ntrans)
1839 nfreqs = ntrans//2 + 1
1840 transfer = num.ones(nfreqs, dtype=complex)
1841 hi = snapper(nfreqs, deltaf)
1842 if freqlimits is not None:
1843 a, b, c, d = freqlimits
1844 freqs = num.arange(hi(d)-hi(a), dtype=float)*deltaf \
1845 + hi(a)*deltaf
1847 if invert:
1848 coeffs = transfer_function.evaluate(freqs)
1849 if num.any(coeffs == 0.0):
1850 raise InfiniteResponse('%s.%s.%s.%s' % self.nslc_id)
1852 transfer[hi(a):hi(d)] = 1.0 / transfer_function.evaluate(freqs)
1853 else:
1854 transfer[hi(a):hi(d)] = transfer_function.evaluate(freqs)
1856 tapered_transfer = costaper(a, b, c, d, nfreqs, deltaf)*transfer
1857 else:
1858 if invert:
1859 raise Exception(
1860 'transfer: `freqlimits` must be given when `invert` is '
1861 'set to `True`')
1863 freqs = num.arange(nfreqs) * deltaf
1864 tapered_transfer = transfer_function.evaluate(freqs)
1866 tapered_transfer[0] = 0.0 # don't introduce static offsets
1867 return tapered_transfer
1869 def fill_template(self, template, **additional):
1870 '''
1871 Fill string template with trace metadata.
1873 Uses normal python '%(placeholder)s' string templates. The following
1874 placeholders are considered: ``network``, ``station``, ``location``,
1875 ``channel``, ``tmin`` (time of first sample), ``tmax`` (time of last
1876 sample), ``tmin_ms``, ``tmax_ms``, ``tmin_us``, ``tmax_us``,
1877 ``tmin_year``, ``tmax_year``, ``tmin_month``, ``tmax_month``,
1878 ``tmin_day``, ``tmax_day``, ``julianday``. The variants ending with
1879 ``'_ms'`` include milliseconds, those with ``'_us'`` include
1880 microseconds, those with ``'_year'`` contain only the year.
1881 '''
1883 template = template.replace('%n', '%(network)s')\
1884 .replace('%s', '%(station)s')\
1885 .replace('%l', '%(location)s')\
1886 .replace('%c', '%(channel)s')\
1887 .replace('%b', '%(tmin)s')\
1888 .replace('%e', '%(tmax)s')\
1889 .replace('%j', '%(julianday)s')
1891 params = dict(
1892 zip(('network', 'station', 'location', 'channel'), self.nslc_id))
1893 params['tmin'] = util.time_to_str(
1894 self.tmin, format='%Y-%m-%d_%H-%M-%S')
1895 params['tmax'] = util.time_to_str(
1896 self.tmax, format='%Y-%m-%d_%H-%M-%S')
1897 params['tmin_ms'] = util.time_to_str(
1898 self.tmin, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1899 params['tmax_ms'] = util.time_to_str(
1900 self.tmax, format='%Y-%m-%d_%H-%M-%S.3FRAC')
1901 params['tmin_us'] = util.time_to_str(
1902 self.tmin, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1903 params['tmax_us'] = util.time_to_str(
1904 self.tmax, format='%Y-%m-%d_%H-%M-%S.6FRAC')
1905 params['tmin_year'], params['tmin_month'], params['tmin_day'] \
1906 = util.time_to_str(self.tmin, format='%Y-%m-%d').split('-')
1907 params['tmax_year'], params['tmax_month'], params['tmax_day'] \
1908 = util.time_to_str(self.tmax, format='%Y-%m-%d').split('-')
1909 params['julianday'] = util.julian_day_of_year(self.tmin)
1910 params.update(additional)
1911 return template % params
1913 def plot(self):
1914 '''
1915 Show trace with matplotlib.
1917 See also: :py:meth:`Trace.snuffle`.
1918 '''
1920 import pylab
1921 pylab.plot(self.get_xdata(), self.get_ydata())
1922 name = '%s %s %s - %s' % (
1923 self.channel,
1924 self.station,
1925 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmin)),
1926 time.strftime("%d-%m-%y %H:%M:%S", time.gmtime(self.tmax)))
1928 pylab.title(name)
1929 pylab.show()
1931 def snuffle(self, **kwargs):
1932 '''
1933 Show trace in a snuffler window.
1935 :param stations: list of `pyrocko.model.Station` objects or ``None``
1936 :param events: list of `pyrocko.model.Event` objects or ``None``
1937 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1938 :param ntracks: float, number of tracks to be shown initially (default:
1939 12)
1940 :param follow: time interval (in seconds) for real time follow mode or
1941 ``None``
1942 :param controls: bool, whether to show the main controls (default:
1943 ``True``)
1944 :param opengl: bool, whether to use opengl (default: ``False``)
1945 '''
1947 return snuffle([self], **kwargs)
1950def snuffle(traces, **kwargs):
1951 '''
1952 Show traces in a snuffler window.
1954 :param stations: list of `pyrocko.model.Station` objects or ``None``
1955 :param events: list of `pyrocko.model.Event` objects or ``None``
1956 :param markers: list of `pyrocko.gui.util.Marker` objects or ``None``
1957 :param ntracks: float, number of tracks to be shown initially (default: 12)
1958 :param follow: time interval (in seconds) for real time follow mode or
1959 ``None``
1960 :param controls: bool, whether to show the main controls (default:
1961 ``True``)
1962 :param opengl: bool, whether to use opengl (default: ``False``)
1963 '''
1965 from pyrocko import pile
1966 from pyrocko.gui import snuffler
1967 p = pile.Pile()
1968 if traces:
1969 trf = pile.MemTracesFile(None, traces)
1970 p.add_file(trf)
1971 return snuffler.snuffle(p, **kwargs)
1974class InfiniteResponse(Exception):
1975 '''
1976 This exception is raised by :py:class:`Trace` operations when deconvolution
1977 of a frequency response (instrument response transfer function) would
1978 result in a division by zero.
1979 '''
1982class MisalignedTraces(Exception):
1983 '''
1984 This exception is raised by some :py:class:`Trace` operations when tmin,
1985 tmax or number of samples do not match.
1986 '''
1988 pass
1991class NoData(Exception):
1992 '''
1993 This exception is raised by some :py:class:`Trace` operations when no or
1994 not enough data is available.
1995 '''
1997 pass
2000class AboveNyquist(Exception):
2001 '''
2002 This exception is raised by some :py:class:`Trace` operations when given
2003 frequencies are above the Nyquist frequency.
2004 '''
2006 pass
2009class TraceTooShort(Exception):
2010 '''
2011 This exception is raised by some :py:class:`Trace` operations when the
2012 trace is too short.
2013 '''
2015 pass
2018class ResamplingFailed(Exception):
2019 pass
2022def minmax(traces, key=None, mode='minmax', outer_mode='minmax'):
2024 '''
2025 Get data range given traces grouped by selected pattern.
2027 :param key: a callable which takes as single argument a trace and returns a
2028 key for the grouping of the results. If this is ``None``, the default,
2029 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2030 used.
2031 :param mode: ``'minmax'`` or floating point number. If this is
2032 ``'minmax'``, minimum and maximum of the traces are used, if it is a
2033 number, mean +- standard deviation times ``mode`` is used.
2035 param outer_mode: ``'minmax'`` to use mininum and maximum of the
2036 single-trace ranges, or ``'robust'`` to use the interval to discard 10%
2037 extreme values on either end.
2039 :returns: a dict with the combined data ranges.
2041 Examples::
2043 ranges = minmax(traces, lambda tr: tr.channel)
2044 print ranges['N'] # print min & max of all traces with channel == 'N'
2045 print ranges['E'] # print min & max of all traces with channel == 'E'
2047 ranges = minmax(traces, lambda tr: (tr.network, tr.station))
2048 print ranges['GR', 'HAM3'] # print min & max of all traces with
2049 # network == 'GR' and station == 'HAM3'
2051 ranges = minmax(traces, lambda tr: None)
2052 print ranges[None] # prints min & max of all traces
2053 '''
2055 if key is None:
2056 key = _default_key
2058 ranges = defaultdict(list)
2059 for trace in traces:
2060 if isinstance(mode, str) and mode == 'minmax':
2061 mi, ma = num.nanmin(trace.ydata), num.nanmax(trace.ydata)
2062 else:
2063 mean = trace.ydata.mean()
2064 std = trace.ydata.std()
2065 mi, ma = mean-std*mode, mean+std*mode
2067 k = key(trace)
2068 ranges[k].append((mi, ma))
2070 for k in ranges:
2071 mins, maxs = num.array(ranges[k]).T
2072 if outer_mode == 'minmax':
2073 ranges[k] = num.nanmin(mins), num.nanmax(maxs)
2074 elif outer_mode == 'robust':
2075 ranges[k] = num.percentile(mins, 10.), num.percentile(maxs, 90.)
2077 return ranges
2080def minmaxtime(traces, key=None):
2082 '''
2083 Get time range given traces grouped by selected pattern.
2085 :param key: a callable which takes as single argument a trace and returns a
2086 key for the grouping of the results. If this is ``None``, the default,
2087 ``lambda tr: (tr.network, tr.station, tr.location, tr.channel)`` is
2088 used.
2090 :returns: a dict with the combined data ranges.
2091 '''
2093 if key is None:
2094 key = _default_key
2096 ranges = {}
2097 for trace in traces:
2098 mi, ma = trace.tmin, trace.tmax
2099 k = key(trace)
2100 if k not in ranges:
2101 ranges[k] = mi, ma
2102 else:
2103 tmi, tma = ranges[k]
2104 ranges[k] = min(tmi, mi), max(tma, ma)
2106 return ranges
2109def degapper(
2110 traces,
2111 maxgap=5,
2112 fillmethod='interpolate',
2113 deoverlap='use_second',
2114 maxlap=None):
2116 '''
2117 Try to connect traces and remove gaps.
2119 This method will combine adjacent traces, which match in their network,
2120 station, location and channel attributes. Overlapping parts are handled
2121 according to the ``deoverlap`` argument.
2123 :param traces: input traces, must be sorted by their full_id attribute.
2124 :param maxgap: maximum number of samples to interpolate.
2125 :param fillmethod: what to put into the gaps: 'interpolate' or 'zeros'.
2126 :param deoverlap: how to handle overlaps: 'use_second' to use data from
2127 second trace (default), 'use_first' to use data from first trace,
2128 'crossfade_cos' to crossfade with cosine taper, 'add' to add amplitude
2129 values.
2130 :param maxlap: maximum number of samples of overlap which are removed
2132 :returns: list of traces
2133 '''
2135 in_traces = traces
2136 out_traces = []
2137 if not in_traces:
2138 return out_traces
2139 out_traces.append(in_traces.pop(0))
2140 while in_traces:
2142 a = out_traces[-1]
2143 b = in_traces.pop(0)
2145 avirt, bvirt = a.ydata is None, b.ydata is None
2146 assert avirt == bvirt, \
2147 'traces given to degapper() must either all have data or have ' \
2148 'no data.'
2150 virtual = avirt and bvirt
2152 if (a.nslc_id == b.nslc_id and a.deltat == b.deltat
2153 and a.data_len() >= 1 and b.data_len() >= 1
2154 and (virtual or a.ydata.dtype == b.ydata.dtype)):
2156 dist = (b.tmin-(a.tmin+(a.data_len()-1)*a.deltat))/a.deltat
2157 idist = int(round(dist))
2158 if abs(dist - idist) > 0.05 and idist <= maxgap:
2159 # logger.warning('Cannot degap traces with displaced sampling '
2160 # '(%s, %s, %s, %s)' % a.nslc_id)
2161 pass
2162 else:
2163 if 1 < idist <= maxgap:
2164 if not virtual:
2165 if fillmethod == 'interpolate':
2166 filler = a.ydata[-1] + (
2167 ((1.0 + num.arange(idist-1, dtype=float))
2168 / idist) * (b.ydata[0]-a.ydata[-1])
2169 ).astype(a.ydata.dtype)
2170 elif fillmethod == 'zeros':
2171 filler = num.zeros(idist-1, dtype=a.ydata.dtype)
2172 a.ydata = num.concatenate((a.ydata, filler, b.ydata))
2173 a.tmax = b.tmax
2174 if a.mtime and b.mtime:
2175 a.mtime = max(a.mtime, b.mtime)
2176 continue
2178 elif idist == 1:
2179 if not virtual:
2180 a.ydata = num.concatenate((a.ydata, b.ydata))
2181 a.tmax = b.tmax
2182 if a.mtime and b.mtime:
2183 a.mtime = max(a.mtime, b.mtime)
2184 continue
2186 elif idist <= 0 and (maxlap is None or -maxlap < idist):
2187 if b.tmax > a.tmax:
2188 if not virtual:
2189 na = a.ydata.size
2190 n = -idist+1
2191 if deoverlap == 'use_second':
2192 a.ydata = num.concatenate(
2193 (a.ydata[:-n], b.ydata))
2194 elif deoverlap in ('use_first', 'crossfade_cos'):
2195 a.ydata = num.concatenate(
2196 (a.ydata, b.ydata[n:]))
2197 elif deoverlap == 'add':
2198 a.ydata[-n:] += b.ydata[:n]
2199 a.ydata = num.concatenate(
2200 (a.ydata, b.ydata[n:]))
2201 else:
2202 assert False, 'unknown deoverlap method'
2204 if deoverlap == 'crossfade_cos':
2205 n = -idist+1
2206 taper = 0.5-0.5*num.cos(
2207 (1.+num.arange(n))/(1.+n)*num.pi)
2208 a.ydata[na-n:na] *= 1.-taper
2209 a.ydata[na-n:na] += b.ydata[:n] * taper
2211 a.tmax = b.tmax
2212 if a.mtime and b.mtime:
2213 a.mtime = max(a.mtime, b.mtime)
2214 continue
2215 else:
2216 # make short second trace vanish
2217 continue
2219 if b.data_len() >= 1:
2220 out_traces.append(b)
2222 for tr in out_traces:
2223 tr._update_ids()
2225 return out_traces
2228def rotate(traces, azimuth, in_channels, out_channels):
2229 '''
2230 2D rotation of traces.
2232 :param traces: list of input traces
2233 :param azimuth: difference of the azimuths of the component directions
2234 (azimuth of out_channels[0]) - (azimuth of in_channels[0])
2235 :param in_channels: names of the input channels (e.g. 'N', 'E')
2236 :param out_channels: names of the output channels (e.g. 'R', 'T')
2237 :returns: list of rotated traces
2238 '''
2240 phi = azimuth/180.*math.pi
2241 cphi = math.cos(phi)
2242 sphi = math.sin(phi)
2243 rotated = []
2244 in_channels = tuple(_channels_to_names(in_channels))
2245 out_channels = tuple(_channels_to_names(out_channels))
2246 for a in traces:
2247 for b in traces:
2248 if ((a.channel, b.channel) == in_channels and
2249 a.nslc_id[:3] == b.nslc_id[:3] and
2250 abs(a.deltat-b.deltat) < a.deltat*0.001):
2251 tmin = max(a.tmin, b.tmin)
2252 tmax = min(a.tmax, b.tmax)
2254 if tmin < tmax:
2255 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2256 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2257 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2258 logger.warning(
2259 'Cannot rotate traces with displaced sampling '
2260 '(%s, %s, %s, %s)' % a.nslc_id)
2261 continue
2263 acydata = ac.get_ydata()*cphi+bc.get_ydata()*sphi
2264 bcydata = -ac.get_ydata()*sphi+bc.get_ydata()*cphi
2265 ac.set_ydata(acydata)
2266 bc.set_ydata(bcydata)
2268 ac.set_codes(channel=out_channels[0])
2269 bc.set_codes(channel=out_channels[1])
2270 rotated.append(ac)
2271 rotated.append(bc)
2273 return rotated
2276def rotate_to_rt(n, e, source, receiver, out_channels=('R', 'T')):
2277 azimuth = orthodrome.azimuth(receiver, source) + 180.
2278 in_channels = n.channel, e.channel
2279 out = rotate(
2280 [n, e], azimuth,
2281 in_channels=in_channels,
2282 out_channels=out_channels)
2284 assert len(out) == 2
2285 for tr in out:
2286 if tr.channel == 'R':
2287 r = tr
2288 elif tr.channel == 'T':
2289 t = tr
2291 return r, t
2294def rotate_to_lqt(traces, backazimuth, incidence, in_channels,
2295 out_channels=('L', 'Q', 'T')):
2296 '''
2297 Rotate traces from ZNE to LQT system.
2299 :param traces: list of traces in arbitrary order
2300 :param backazimuth: backazimuth in degrees clockwise from north
2301 :param incidence: incidence angle in degrees from vertical
2302 :param in_channels: input channel names
2303 :param out_channels: output channel names (default: ('L', 'Q', 'T'))
2304 :returns: list of transformed traces
2305 '''
2306 i = incidence/180.*num.pi
2307 b = backazimuth/180.*num.pi
2309 ci = num.cos(i)
2310 cb = num.cos(b)
2311 si = num.sin(i)
2312 sb = num.sin(b)
2314 rotmat = num.array(
2315 [[ci, -cb*si, -sb*si], [si, cb*ci, sb*ci], [0., sb, -cb]])
2316 return project(traces, rotmat, in_channels, out_channels)
2319def _decompose(a):
2320 '''
2321 Decompose matrix into independent submatrices.
2322 '''
2324 def depends(iout, a):
2325 row = a[iout, :]
2326 return set(num.arange(row.size).compress(row != 0.0))
2328 def provides(iin, a):
2329 col = a[:, iin]
2330 return set(num.arange(col.size).compress(col != 0.0))
2332 a = num.asarray(a)
2333 outs = set(range(a.shape[0]))
2334 systems = []
2335 while outs:
2336 iout = outs.pop()
2338 gout = set()
2339 for iin in depends(iout, a):
2340 gout.update(provides(iin, a))
2342 if not gout:
2343 continue
2345 gin = set()
2346 for iout2 in gout:
2347 gin.update(depends(iout2, a))
2349 if not gin:
2350 continue
2352 for iout2 in gout:
2353 if iout2 in outs:
2354 outs.remove(iout2)
2356 gin = list(gin)
2357 gin.sort()
2358 gout = list(gout)
2359 gout.sort()
2361 systems.append((gin, gout, a[gout, :][:, gin]))
2363 return systems
2366def _channels_to_names(channels):
2367 names = []
2368 for ch in channels:
2369 if isinstance(ch, model.Channel):
2370 names.append(ch.name)
2371 else:
2372 names.append(ch)
2373 return names
2376def project(traces, matrix, in_channels, out_channels):
2377 '''
2378 Affine transform of three-component traces.
2380 Compute matrix-vector product of three-component traces, to e.g. rotate
2381 traces into a different basis. The traces are distinguished and ordered by
2382 their channel attribute. The tranform is applied to overlapping parts of
2383 any appropriate combinations of the input traces. This should allow this
2384 function to be robust with data gaps. It also tries to apply the
2385 tranformation to subsets of the channels, if this is possible, so that, if
2386 for example a vertical compontent is missing, horizontal components can
2387 still be rotated.
2389 :param traces: list of traces in arbitrary order
2390 :param matrix: tranformation matrix
2391 :param in_channels: input channel names
2392 :param out_channels: output channel names
2393 :returns: list of transformed traces
2394 '''
2396 in_channels = tuple(_channels_to_names(in_channels))
2397 out_channels = tuple(_channels_to_names(out_channels))
2398 systems = _decompose(matrix)
2400 # fallback to full matrix if some are not quadratic
2401 for iins, iouts, submatrix in systems:
2402 if submatrix.shape[0] != submatrix.shape[1]:
2403 return _project3(traces, matrix, in_channels, out_channels)
2405 projected = []
2406 for iins, iouts, submatrix in systems:
2407 in_cha = tuple([in_channels[iin] for iin in iins])
2408 out_cha = tuple([out_channels[iout] for iout in iouts])
2409 if submatrix.shape[0] == 1:
2410 projected.extend(_project1(traces, submatrix, in_cha, out_cha))
2411 elif submatrix.shape[1] == 2:
2412 projected.extend(_project2(traces, submatrix, in_cha, out_cha))
2413 else:
2414 projected.extend(_project3(traces, submatrix, in_cha, out_cha))
2416 return projected
2419def project_dependencies(matrix, in_channels, out_channels):
2420 '''
2421 Figure out what dependencies project() would produce.
2422 '''
2424 in_channels = tuple(_channels_to_names(in_channels))
2425 out_channels = tuple(_channels_to_names(out_channels))
2426 systems = _decompose(matrix)
2428 subpro = []
2429 for iins, iouts, submatrix in systems:
2430 if submatrix.shape[0] != submatrix.shape[1]:
2431 subpro.append((matrix, in_channels, out_channels))
2433 if not subpro:
2434 for iins, iouts, submatrix in systems:
2435 in_cha = tuple([in_channels[iin] for iin in iins])
2436 out_cha = tuple([out_channels[iout] for iout in iouts])
2437 subpro.append((submatrix, in_cha, out_cha))
2439 deps = {}
2440 for mat, in_cha, out_cha in subpro:
2441 for oc in out_cha:
2442 if oc not in deps:
2443 deps[oc] = []
2445 for ic in in_cha:
2446 deps[oc].append(ic)
2448 return deps
2451def _project1(traces, matrix, in_channels, out_channels):
2452 assert len(in_channels) == 1
2453 assert len(out_channels) == 1
2454 assert matrix.shape == (1, 1)
2456 projected = []
2457 for a in traces:
2458 if not (a.channel,) == in_channels:
2459 continue
2461 ac = a.copy()
2462 ac.set_ydata(matrix[0, 0]*a.get_ydata())
2463 ac.set_codes(channel=out_channels[0])
2464 projected.append(ac)
2466 return projected
2469def _project2(traces, matrix, in_channels, out_channels):
2470 assert len(in_channels) == 2
2471 assert len(out_channels) == 2
2472 assert matrix.shape == (2, 2)
2473 projected = []
2474 for a in traces:
2475 for b in traces:
2476 if not ((a.channel, b.channel) == in_channels and
2477 a.nslc_id[:3] == b.nslc_id[:3] and
2478 abs(a.deltat-b.deltat) < a.deltat*0.001):
2479 continue
2481 tmin = max(a.tmin, b.tmin)
2482 tmax = min(a.tmax, b.tmax)
2484 if tmin > tmax:
2485 continue
2487 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2488 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2489 if abs(ac.tmin - bc.tmin) > ac.deltat*0.01:
2490 logger.warning(
2491 'Cannot project traces with displaced sampling '
2492 '(%s, %s, %s, %s)' % a.nslc_id)
2493 continue
2495 acydata = num.dot(matrix[0], (ac.get_ydata(), bc.get_ydata()))
2496 bcydata = num.dot(matrix[1], (ac.get_ydata(), bc.get_ydata()))
2498 ac.set_ydata(acydata)
2499 bc.set_ydata(bcydata)
2501 ac.set_codes(channel=out_channels[0])
2502 bc.set_codes(channel=out_channels[1])
2504 projected.append(ac)
2505 projected.append(bc)
2507 return projected
2510def _project3(traces, matrix, in_channels, out_channels):
2511 assert len(in_channels) == 3
2512 assert len(out_channels) == 3
2513 assert matrix.shape == (3, 3)
2514 projected = []
2515 for a in traces:
2516 for b in traces:
2517 for c in traces:
2518 if not ((a.channel, b.channel, c.channel) == in_channels
2519 and a.nslc_id[:3] == b.nslc_id[:3]
2520 and b.nslc_id[:3] == c.nslc_id[:3]
2521 and abs(a.deltat-b.deltat) < a.deltat*0.001
2522 and abs(b.deltat-c.deltat) < b.deltat*0.001):
2524 continue
2526 tmin = max(a.tmin, b.tmin, c.tmin)
2527 tmax = min(a.tmax, b.tmax, c.tmax)
2529 if tmin >= tmax:
2530 continue
2532 ac = a.chop(tmin, tmax, inplace=False, include_last=True)
2533 bc = b.chop(tmin, tmax, inplace=False, include_last=True)
2534 cc = c.chop(tmin, tmax, inplace=False, include_last=True)
2535 if (abs(ac.tmin - bc.tmin) > ac.deltat*0.01
2536 or abs(bc.tmin - cc.tmin) > bc.deltat*0.01):
2538 logger.warning(
2539 'Cannot project traces with displaced sampling '
2540 '(%s, %s, %s, %s)' % a.nslc_id)
2541 continue
2543 acydata = num.dot(
2544 matrix[0],
2545 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2546 bcydata = num.dot(
2547 matrix[1],
2548 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2549 ccydata = num.dot(
2550 matrix[2],
2551 (ac.get_ydata(), bc.get_ydata(), cc.get_ydata()))
2553 ac.set_ydata(acydata)
2554 bc.set_ydata(bcydata)
2555 cc.set_ydata(ccydata)
2557 ac.set_codes(channel=out_channels[0])
2558 bc.set_codes(channel=out_channels[1])
2559 cc.set_codes(channel=out_channels[2])
2561 projected.append(ac)
2562 projected.append(bc)
2563 projected.append(cc)
2565 return projected
2568def correlate(a, b, mode='valid', normalization=None, use_fft=False):
2569 '''
2570 Cross correlation of two traces.
2572 :param a,b: input traces
2573 :param mode: ``'valid'``, ``'full'``, or ``'same'``
2574 :param normalization: ``'normal'``, ``'gliding'``, or ``None``
2575 :param use_fft: bool, whether to do cross correlation in spectral domain
2577 :returns: trace containing cross correlation coefficients
2579 This function computes the cross correlation between two traces. It
2580 evaluates the discrete equivalent of
2582 .. math::
2584 c(t) = \\int_{-\\infty}^{\\infty} a^{\\ast}(\\tau) b(t+\\tau) d\\tau
2586 where the star denotes complex conjugate. Note, that the arguments here are
2587 swapped when compared with the :py:func:`numpy.correlate` function,
2588 which is internally called. This function should be safe even with older
2589 versions of NumPy, where the correlate function has some problems.
2591 A trace containing the cross correlation coefficients is returned. The time
2592 information of the output trace is set so that the returned cross
2593 correlation can be viewed directly as a function of time lag.
2595 Example::
2597 # align two traces a and b containing a time shifted similar signal:
2598 c = pyrocko.trace.correlate(a,b)
2599 t, coef = c.max() # get time and value of maximum
2600 b.shift(-t) # align b with a
2602 '''
2604 assert_same_sampling_rate(a, b)
2606 ya, yb = a.ydata, b.ydata
2608 # need reversed order here:
2609 yc = numpy_correlate_fixed(yb, ya, mode=mode, use_fft=use_fft)
2610 kmin, kmax = numpy_correlate_lag_range(yb, ya, mode=mode, use_fft=use_fft)
2612 if normalization == 'normal':
2613 normfac = num.sqrt(num.sum(ya**2))*num.sqrt(num.sum(yb**2))
2614 yc = yc/normfac
2616 elif normalization == 'gliding':
2617 if mode != 'valid':
2618 assert False, 'gliding normalization currently only available ' \
2619 'with "valid" mode.'
2621 if ya.size < yb.size:
2622 yshort, ylong = ya, yb
2623 else:
2624 yshort, ylong = yb, ya
2626 epsilon = 0.00001
2627 normfac_short = num.sqrt(num.sum(yshort**2))
2628 normfac = normfac_short * num.sqrt(
2629 moving_sum(ylong**2, yshort.size, mode='valid')) \
2630 + normfac_short*epsilon
2632 if yb.size <= ya.size:
2633 normfac = normfac[::-1]
2635 yc /= normfac
2637 c = a.copy()
2638 c.set_ydata(yc)
2639 c.set_codes(*merge_codes(a, b, '~'))
2640 c.shift(-c.tmin + b.tmin-a.tmin + kmin * c.deltat)
2642 return c
2645def deconvolve(
2646 a, b, waterlevel,
2647 tshift=0.,
2648 pad=0.5,
2649 fd_taper=None,
2650 pad_to_pow2=True):
2652 same_sampling_rate(a, b)
2653 assert abs(a.tmin - b.tmin) < a.deltat * 0.001
2654 deltat = a.deltat
2655 npad = int(round(a.data_len()*pad + tshift / deltat))
2657 ndata = max(a.data_len(), b.data_len())
2658 ndata_pad = ndata + npad
2660 if pad_to_pow2:
2661 ntrans = nextpow2(ndata_pad)
2662 else:
2663 ntrans = ndata
2665 aspec = num.fft.rfft(a.ydata, ntrans)
2666 bspec = num.fft.rfft(b.ydata, ntrans)
2668 out = aspec * num.conj(bspec)
2670 bautocorr = bspec*num.conj(bspec)
2671 denom = num.maximum(bautocorr, waterlevel * bautocorr.max())
2673 out /= denom
2674 df = 1/(ntrans*deltat)
2676 if fd_taper is not None:
2677 fd_taper(out, 0.0, df)
2679 ydata = num.roll(num.fft.irfft(out), int(round(tshift/deltat)))
2680 c = a.copy(data=False)
2681 c.set_ydata(ydata[:ndata])
2682 c.set_codes(*merge_codes(a, b, '/'))
2683 return c
2686def assert_same_sampling_rate(a, b, eps=1.0e-6):
2687 assert same_sampling_rate(a, b, eps), \
2688 'Sampling rates differ: %g != %g' % (a.deltat, b.deltat)
2691def same_sampling_rate(a, b, eps=1.0e-6):
2692 '''
2693 Check if two traces have the same sampling rate.
2695 :param a,b: input traces
2696 :param eps: relative tolerance
2697 '''
2699 return abs(a.deltat - b.deltat) < (a.deltat + b.deltat)*eps
2702def fix_deltat_rounding_errors(deltat):
2703 '''
2704 Try to undo sampling rate rounding errors.
2706 Fix rounding errors of sampling intervals when these are read from single
2707 precision floating point values.
2709 Assumes that the true sampling rate or sampling interval was an integer
2710 value. No correction will be applied if this would change the sampling
2711 rate by more than 0.001%.
2712 '''
2714 if deltat <= 1.0:
2715 deltat_new = 1.0 / round(1.0 / deltat)
2716 else:
2717 deltat_new = round(deltat)
2719 if abs(deltat_new - deltat) / deltat > 1e-5:
2720 deltat_new = deltat
2722 return deltat_new
2725def merge_codes(a, b, sep='-'):
2726 '''
2727 Merge network-station-location-channel codes of a pair of traces.
2728 '''
2730 o = []
2731 for xa, xb in zip(a.nslc_id, b.nslc_id):
2732 if xa == xb:
2733 o.append(xa)
2734 else:
2735 o.append(sep.join((xa, xb)))
2736 return o
2739class Taper(Object):
2740 '''
2741 Base class for tapers.
2743 Does nothing by default.
2744 '''
2746 def __call__(self, y, x0, dx):
2747 pass
2750class CosTaper(Taper):
2751 '''
2752 Cosine Taper.
2754 :param a: start of fading in
2755 :param b: end of fading in
2756 :param c: start of fading out
2757 :param d: end of fading out
2758 '''
2760 a = Float.T()
2761 b = Float.T()
2762 c = Float.T()
2763 d = Float.T()
2765 def __init__(self, a, b, c, d):
2766 Taper.__init__(self, a=a, b=b, c=c, d=d)
2768 def __call__(self, y, x0, dx):
2769 apply_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2771 def span(self, y, x0, dx):
2772 return span_costaper(self.a, self.b, self.c, self.d, y, x0, dx)
2774 def time_span(self):
2775 return self.a, self.d
2778class CosFader(Taper):
2779 '''
2780 Cosine Fader.
2782 :param xfade: fade in and fade out time in seconds (optional)
2783 :param xfrac: fade in and fade out as fraction between 0. and 1. (optional)
2785 Only one argument can be set. The other should to be ``None``.
2786 '''
2788 xfade = Float.T(optional=True)
2789 xfrac = Float.T(optional=True)
2791 def __init__(self, xfade=None, xfrac=None):
2792 Taper.__init__(self, xfade=xfade, xfrac=xfrac)
2793 assert (xfade is None) != (xfrac is None)
2794 self._xfade = xfade
2795 self._xfrac = xfrac
2797 def __call__(self, y, x0, dx):
2799 xfade = self._xfade
2801 xlen = (y.size - 1)*dx
2802 if xfade is None:
2803 xfade = xlen * self._xfrac
2805 a = x0
2806 b = x0 + xfade
2807 c = x0 + xlen - xfade
2808 d = x0 + xlen
2810 apply_costaper(a, b, c, d, y, x0, dx)
2812 def span(self, y, x0, dx):
2813 return 0, y.size
2815 def time_span(self):
2816 return None, None
2819def none_min(li):
2820 if None in li:
2821 return None
2822 else:
2823 return min(x for x in li if x is not None)
2826def none_max(li):
2827 if None in li:
2828 return None
2829 else:
2830 return max(x for x in li if x is not None)
2833class MultiplyTaper(Taper):
2834 '''
2835 Multiplication of several tapers.
2836 '''
2838 tapers = List.T(Taper.T())
2840 def __init__(self, tapers=None):
2841 if tapers is None:
2842 tapers = []
2844 Taper.__init__(self, tapers=tapers)
2846 def __call__(self, y, x0, dx):
2847 for taper in self.tapers:
2848 taper(y, x0, dx)
2850 def span(self, y, x0, dx):
2851 spans = []
2852 for taper in self.tapers:
2853 spans.append(taper.span(y, x0, dx))
2855 mins, maxs = list(zip(*spans))
2856 return min(mins), max(maxs)
2858 def time_span(self):
2859 spans = []
2860 for taper in self.tapers:
2861 spans.append(taper.time_span())
2863 mins, maxs = list(zip(*spans))
2864 return none_min(mins), none_max(maxs)
2867class GaussTaper(Taper):
2868 '''
2869 Frequency domain Gaussian filter.
2870 '''
2872 alpha = Float.T()
2874 def __init__(self, alpha):
2875 Taper.__init__(self, alpha=alpha)
2876 self._alpha = alpha
2878 def __call__(self, y, x0, dx):
2879 f = x0 + num.arange(y.size)*dx
2880 y *= num.exp(-num.pi**2 / (self._alpha**2) * f**2)
2883cached_coefficients = {}
2886def _get_cached_filter_coefs(order, corners, btype):
2887 ck = (order, tuple(corners), btype)
2888 if ck not in cached_coefficients:
2889 if len(corners) == 0:
2890 cached_coefficients[ck] = signal.butter(
2891 order, corners[0], btype=btype)
2892 else:
2893 cached_coefficients[ck] = signal.butter(
2894 order, corners, btype=btype)
2896 return cached_coefficients[ck]
2899class _globals(object):
2900 _numpy_has_correlate_flip_bug = None
2903def _default_key(tr):
2904 return (tr.network, tr.station, tr.location, tr.channel)
2907def numpy_has_correlate_flip_bug():
2908 '''
2909 Check if NumPy's correlate function reveals old behaviour.
2910 '''
2912 if _globals._numpy_has_correlate_flip_bug is None:
2913 a = num.array([0, 0, 1, 0, 0, 0, 0])
2914 b = num.array([0, 0, 0, 0, 1, 0, 0, 0])
2915 ab = num.correlate(a, b, mode='same')
2916 ba = num.correlate(b, a, mode='same')
2917 _globals._numpy_has_correlate_flip_bug = num.all(ab == ba)
2919 return _globals._numpy_has_correlate_flip_bug
2922def numpy_correlate_fixed(a, b, mode='valid', use_fft=False):
2923 '''
2924 Call :py:func:`numpy.correlate` with fixes.
2926 c[k] = sum_i a[i+k] * conj(b[i])
2928 Note that the result produced by newer numpy.correlate is always flipped
2929 with respect to the formula given in its documentation (if ascending k
2930 assumed for the output).
2931 '''
2933 if use_fft:
2934 if a.size < b.size:
2935 c = signal.fftconvolve(b[::-1], a, mode=mode)
2936 else:
2937 c = signal.fftconvolve(a, b[::-1], mode=mode)
2938 return c
2940 else:
2941 buggy = numpy_has_correlate_flip_bug()
2943 a = num.asarray(a)
2944 b = num.asarray(b)
2946 if buggy:
2947 b = num.conj(b)
2949 c = num.correlate(a, b, mode=mode)
2951 if buggy and a.size < b.size:
2952 return c[::-1]
2953 else:
2954 return c
2957def numpy_correlate_emulate(a, b, mode='valid'):
2958 '''
2959 Slow version of :py:func:`numpy.correlate` for comparison.
2960 '''
2962 a = num.asarray(a)
2963 b = num.asarray(b)
2964 kmin = -(b.size-1)
2965 klen = a.size-kmin
2966 kmin, kmax = numpy_correlate_lag_range(a, b, mode=mode)
2967 kmin = int(kmin)
2968 kmax = int(kmax)
2969 klen = kmax - kmin + 1
2970 c = num.zeros(klen, dtype=num.find_common_type((b.dtype, a.dtype), ()))
2971 for k in range(kmin, kmin+klen):
2972 imin = max(0, -k)
2973 ilen = min(b.size, a.size-k) - imin
2974 c[k-kmin] = num.sum(
2975 a[imin+k:imin+ilen+k] * num.conj(b[imin:imin+ilen]))
2977 return c
2980def numpy_correlate_lag_range(a, b, mode='valid', use_fft=False):
2981 '''
2982 Get range of lags for which :py:func:`numpy.correlate` produces values.
2983 '''
2985 a = num.asarray(a)
2986 b = num.asarray(b)
2988 kmin = -(b.size-1)
2989 if mode == 'full':
2990 klen = a.size-kmin
2991 elif mode == 'same':
2992 klen = max(a.size, b.size)
2993 kmin += (a.size+b.size-1 - max(a.size, b.size)) // 2 + \
2994 int(not use_fft and a.size % 2 == 0 and b.size > a.size)
2995 elif mode == 'valid':
2996 klen = abs(a.size - b.size) + 1
2997 kmin += min(a.size, b.size) - 1
2999 return kmin, kmin + klen - 1
3002def autocorr(x, nshifts):
3003 '''
3004 Compute biased estimate of the first autocorrelation coefficients.
3006 :param x: input array
3007 :param nshifts: number of coefficients to calculate
3008 '''
3010 mean = num.mean(x)
3011 std = num.std(x)
3012 n = x.size
3013 xdm = x - mean
3014 r = num.zeros(nshifts)
3015 for k in range(nshifts):
3016 r[k] = 1./((n-num.abs(k))*std) * num.sum(xdm[:n-k] * xdm[k:])
3018 return r
3021def yulewalker(x, order):
3022 '''
3023 Compute autoregression coefficients using Yule-Walker method.
3025 :param x: input array
3026 :param order: number of coefficients to produce
3028 A biased estimate of the autocorrelation is used. The Yule-Walker equations
3029 are solved by :py:func:`numpy.linalg.inv` instead of Levinson-Durbin
3030 recursion which is normally used.
3031 '''
3033 gamma = autocorr(x, order+1)
3034 d = gamma[1:1+order]
3035 a = num.zeros((order, order))
3036 gamma2 = num.concatenate((gamma[::-1], gamma[1:order]))
3037 for i in range(order):
3038 ioff = order-i
3039 a[i, :] = gamma2[ioff:ioff+order]
3041 return num.dot(num.linalg.inv(a), -d)
3044def moving_avg(x, n):
3045 n = int(n)
3046 cx = x.cumsum()
3047 nn = len(x)
3048 y = num.zeros(nn, dtype=cx.dtype)
3049 y[n//2:n//2+(nn-n)] = (cx[n:]-cx[:-n])/n
3050 y[:n//2] = y[n//2]
3051 y[n//2+(nn-n):] = y[n//2+(nn-n)-1]
3052 return y
3055def moving_sum(x, n, mode='valid'):
3056 n = int(n)
3057 cx = x.cumsum()
3058 nn = len(x)
3060 if mode == 'valid':
3061 if nn-n+1 <= 0:
3062 return num.zeros(0, dtype=cx.dtype)
3063 y = num.zeros(nn-n+1, dtype=cx.dtype)
3064 y[0] = cx[n-1]
3065 y[1:nn-n+1] = cx[n:nn]-cx[0:nn-n]
3067 if mode == 'full':
3068 y = num.zeros(nn+n-1, dtype=cx.dtype)
3069 if n <= nn:
3070 y[0:n] = cx[0:n]
3071 y[n:nn] = cx[n:nn]-cx[0:nn-n]
3072 y[nn:nn+n-1] = cx[-1]-cx[nn-n:nn-1]
3073 else:
3074 y[0:nn] = cx[0:nn]
3075 y[nn:n] = cx[nn-1]
3076 y[n:nn+n-1] = cx[nn-1] - cx[0:nn-1]
3078 if mode == 'same':
3079 n1 = (n-1)//2
3080 y = num.zeros(nn, dtype=cx.dtype)
3081 if n <= nn:
3082 y[0:n-n1] = cx[n1:n]
3083 y[n-n1:nn-n1] = cx[n:nn]-cx[0:nn-n]
3084 y[nn-n1:nn] = cx[nn-1] - cx[nn-n:nn-n+n1]
3085 else:
3086 y[0:max(0, nn-n1)] = cx[min(n1, nn):nn]
3087 y[max(nn-n1, 0):min(n-n1, nn)] = cx[nn-1]
3088 y[min(n-n1, nn):nn] = cx[nn-1] - cx[0:max(0, nn-(n-n1))]
3090 return y
3093def nextpow2(i):
3094 return 2**int(math.ceil(math.log(i)/math.log(2.)))
3097def snapper_w_offset(nmax, offset, delta, snapfun=math.ceil):
3098 def snap(x):
3099 return max(0, min(int(snapfun((x-offset)/delta)), nmax))
3100 return snap
3103def snapper(nmax, delta, snapfun=math.ceil):
3104 def snap(x):
3105 return max(0, min(int(snapfun(x/delta)), nmax))
3106 return snap
3109def apply_costaper(a, b, c, d, y, x0, dx):
3110 hi = snapper_w_offset(y.size, x0, dx)
3111 y[:hi(a)] = 0.
3112 y[hi(a):hi(b)] *= 0.5 \
3113 - 0.5*num.cos((dx*num.arange(hi(a), hi(b))-(a-x0))/(b-a)*num.pi)
3114 y[hi(c):hi(d)] *= 0.5 \
3115 + 0.5*num.cos((dx*num.arange(hi(c), hi(d))-(c-x0))/(d-c)*num.pi)
3116 y[hi(d):] = 0.
3119def span_costaper(a, b, c, d, y, x0, dx):
3120 hi = snapper_w_offset(y.size, x0, dx)
3121 return hi(a), hi(d) - hi(a)
3124def costaper(a, b, c, d, nfreqs, deltaf):
3125 hi = snapper(nfreqs, deltaf)
3126 tap = num.zeros(nfreqs)
3127 tap[hi(a):hi(b)] = 0.5 \
3128 - 0.5*num.cos((deltaf*num.arange(hi(a), hi(b))-a)/(b-a)*num.pi)
3129 tap[hi(b):hi(c)] = 1.
3130 tap[hi(c):hi(d)] = 0.5 \
3131 + 0.5*num.cos((deltaf*num.arange(hi(c), hi(d))-c)/(d-c)*num.pi)
3133 return tap
3136def t2ind(t, tdelta, snap=round):
3137 return int(snap(t/tdelta))
3140def hilbert(x, N=None):
3141 '''
3142 Return the hilbert transform of x of length N.
3144 (from scipy.signal, but changed to use fft and ifft from numpy.fft)
3145 '''
3147 x = num.asarray(x)
3148 if N is None:
3149 N = len(x)
3150 if N <= 0:
3151 raise ValueError("N must be positive.")
3152 if num.iscomplexobj(x):
3153 logger.warning('imaginary part of x ignored.')
3154 x = num.real(x)
3156 Xf = num.fft.fft(x, N, axis=0)
3157 h = num.zeros(N)
3158 if N % 2 == 0:
3159 h[0] = h[N//2] = 1
3160 h[1:N//2] = 2
3161 else:
3162 h[0] = 1
3163 h[1:(N+1)//2] = 2
3165 if len(x.shape) > 1:
3166 h = h[:, num.newaxis]
3167 x = num.fft.ifft(Xf*h)
3168 return x
3171def near(a, b, eps):
3172 return abs(a-b) < eps
3175def coroutine(func):
3176 def wrapper(*args, **kwargs):
3177 gen = func(*args, **kwargs)
3178 next(gen)
3179 return gen
3181 wrapper.__name__ = func.__name__
3182 wrapper.__dict__ = func.__dict__
3183 wrapper.__doc__ = func.__doc__
3184 return wrapper
3187class States(object):
3188 '''
3189 Utility to store channel-specific state in coroutines.
3190 '''
3192 def __init__(self):
3193 self._states = {}
3195 def get(self, tr):
3196 k = tr.nslc_id
3197 if k in self._states:
3198 tmin, deltat, dtype, value = self._states[k]
3199 if (near(tmin, tr.tmin, deltat/100.)
3200 and near(deltat, tr.deltat, deltat/10000.)
3201 and dtype == tr.ydata.dtype):
3203 return value
3205 return None
3207 def set(self, tr, value):
3208 k = tr.nslc_id
3209 if k in self._states and self._states[k][-1] is not value:
3210 self.free(self._states[k][-1])
3212 self._states[k] = (tr.tmax+tr.deltat, tr.deltat, tr.ydata.dtype, value)
3214 def free(self, value):
3215 pass
3218@coroutine
3219def co_list_append(list):
3220 while True:
3221 list.append((yield))
3224class ScipyBug(Exception):
3225 pass
3228@coroutine
3229def co_lfilter(target, b, a):
3230 '''
3231 Successively filter broken continuous trace data (coroutine).
3233 Create coroutine which takes :py:class:`Trace` objects, filters their data
3234 through :py:func:`scipy.signal.lfilter` and sends new :py:class:`Trace`
3235 objects containing the filtered data to target. This is useful, if one
3236 wants to filter a long continuous time series, which is split into many
3237 successive traces without producing filter artifacts at trace boundaries.
3239 Filter states are kept *per channel*, specifically, for each (network,
3240 station, location, channel) combination occuring in the input traces, a
3241 separate state is created and maintained. This makes it possible to filter
3242 multichannel or multistation data with only one :py:func:`co_lfilter`
3243 instance.
3245 Filter state is reset, when gaps occur.
3247 Use it like this::
3249 from pyrocko.trace import co_lfilter, co_list_append
3251 filtered_traces = []
3252 pipe = co_lfilter(co_list_append(filtered_traces), a, b)
3253 for trace in traces:
3254 pipe.send(trace)
3256 pipe.close()
3258 '''
3260 try:
3261 states = States()
3262 output = None
3263 while True:
3264 input = (yield)
3266 zi = states.get(input)
3267 if zi is None:
3268 zi = num.zeros(max(len(a), len(b))-1, dtype=float)
3270 output = input.copy(data=False)
3271 try:
3272 ydata, zf = signal.lfilter(b, a, input.get_ydata(), zi=zi)
3273 except ValueError:
3274 raise ScipyBug(
3275 'signal.lfilter failed: could be related to a bug '
3276 'in some older scipy versions, e.g. on opensuse42.1')
3278 output.set_ydata(ydata)
3279 states.set(input, zf)
3280 target.send(output)
3282 except GeneratorExit:
3283 target.close()
3286def co_antialias(target, q, n=None, ftype='fir'):
3287 b, a, n = util.decimate_coeffs(q, n, ftype)
3288 anti = co_lfilter(target, b, a)
3289 return anti
3292@coroutine
3293def co_dropsamples(target, q, nfir):
3294 try:
3295 states = States()
3296 while True:
3297 tr = (yield)
3298 newdeltat = q * tr.deltat
3299 ioffset = states.get(tr)
3300 if ioffset is None:
3301 # for fir filter, the first nfir samples are pulluted by
3302 # boundary effects; cut it off.
3303 # for iir this may be (much) more, we do not correct for that.
3304 # put sample instances to a time which is a multiple of the
3305 # new sampling interval.
3306 newtmin_want = math.ceil(
3307 (tr.tmin+(nfir+1)*tr.deltat) / newdeltat) * newdeltat \
3308 - (nfir/2*tr.deltat)
3309 ioffset = int(round((newtmin_want - tr.tmin)/tr.deltat))
3310 if ioffset < 0:
3311 ioffset = ioffset % q
3313 newtmin_have = tr.tmin + ioffset * tr.deltat
3314 newtr = tr.copy(data=False)
3315 newtr.deltat = newdeltat
3316 # because the fir kernel shifts data by nfir/2 samples:
3317 newtr.tmin = newtmin_have - (nfir/2*tr.deltat)
3318 newtr.set_ydata(tr.get_ydata()[ioffset::q].copy())
3319 states.set(tr, (ioffset % q - tr.data_len() % q) % q)
3320 target.send(newtr)
3322 except GeneratorExit:
3323 target.close()
3326def co_downsample(target, q, n=None, ftype='fir'):
3327 '''
3328 Successively downsample broken continuous trace data (coroutine).
3330 Create coroutine which takes :py:class:`Trace` objects, downsamples their
3331 data and sends new :py:class:`Trace` objects containing the downsampled
3332 data to target. This is useful, if one wants to downsample a long
3333 continuous time series, which is split into many successive traces without
3334 producing filter artifacts and gaps at trace boundaries.
3336 Filter states are kept *per channel*, specifically, for each (network,
3337 station, location, channel) combination occuring in the input traces, a
3338 separate state is created and maintained. This makes it possible to filter
3339 multichannel or multistation data with only one :py:func:`co_lfilter`
3340 instance.
3342 Filter state is reset, when gaps occur. The sampling instances are choosen
3343 so that they occur at (or as close as possible) to even multiples of the
3344 sampling interval of the downsampled trace (based on system time).
3345 '''
3347 b, a, n = util.decimate_coeffs(q, n, ftype)
3348 return co_antialias(co_dropsamples(target, q, n), q, n, ftype)
3351@coroutine
3352def co_downsample_to(target, deltat):
3354 decimators = {}
3355 try:
3356 while True:
3357 tr = (yield)
3358 ratio = deltat / tr.deltat
3359 rratio = round(ratio)
3360 if abs(rratio - ratio)/ratio > 0.0001:
3361 raise util.UnavailableDecimation('ratio = %g' % ratio)
3363 deci_seq = tuple(x for x in util.decitab(int(rratio)) if x != 1)
3364 if deci_seq not in decimators:
3365 pipe = target
3366 for q in deci_seq[::-1]:
3367 pipe = co_downsample(pipe, q)
3369 decimators[deci_seq] = pipe
3371 decimators[deci_seq].send(tr)
3373 except GeneratorExit:
3374 for g in decimators.values():
3375 g.close()
3378class DomainChoice(StringChoice):
3379 choices = [
3380 'time_domain',
3381 'frequency_domain',
3382 'envelope',
3383 'absolute',
3384 'cc_max_norm']
3387class MisfitSetup(Object):
3388 '''
3389 Contains misfit setup to be used in :py:func:`trace.misfit`
3391 :param description: Description of the setup
3392 :param norm: L-norm classifier
3393 :param taper: Object of :py:class:`Taper`
3394 :param filter: Object of :py:class:`FrequencyResponse`
3395 :param domain: ['time_domain', 'frequency_domain', 'envelope', 'absolute',
3396 'cc_max_norm']
3398 Can be dumped to a yaml file.
3399 '''
3401 xmltagname = 'misfitsetup'
3402 description = String.T(optional=True)
3403 norm = Int.T(optional=False)
3404 taper = Taper.T(optional=False)
3405 filter = FrequencyResponse.T(optional=True)
3406 domain = DomainChoice.T(default='time_domain')
3409def equalize_sampling_rates(trace_1, trace_2):
3410 '''
3411 Equalize sampling rates of two traces (reduce higher sampling rate to
3412 lower).
3414 :param trace_1: :py:class:`Trace` object
3415 :param trace_2: :py:class:`Trace` object
3417 Returns a copy of the resampled trace if resampling is needed.
3418 '''
3420 if same_sampling_rate(trace_1, trace_2):
3421 return trace_1, trace_2
3423 if trace_1.deltat < trace_2.deltat:
3424 t1_out = trace_1.copy()
3425 t1_out.downsample_to(deltat=trace_2.deltat, snap=True)
3426 logger.debug('Trace downsampled (return copy of trace): %s'
3427 % '.'.join(t1_out.nslc_id))
3428 return t1_out, trace_2
3430 elif trace_1.deltat > trace_2.deltat:
3431 t2_out = trace_2.copy()
3432 t2_out.downsample_to(deltat=trace_1.deltat, snap=True)
3433 logger.debug('Trace downsampled (return copy of trace): %s'
3434 % '.'.join(t2_out.nslc_id))
3435 return trace_1, t2_out
3438def Lx_norm(u, v, norm=2):
3439 '''
3440 Calculate the misfit denominator *m* and the normalization devisor *n*
3441 according to norm.
3443 The normalization divisor *n* is calculated from ``v``.
3445 :param u: :py:class:`numpy.array`
3446 :param v: :py:class:`numpy.array`
3447 :param norm: (default = 2)
3449 ``u`` and ``v`` must be of same size.
3450 '''
3452 if norm == 1:
3453 return (
3454 num.sum(num.abs(v-u)),
3455 num.sum(num.abs(v)))
3457 elif norm == 2:
3458 return (
3459 num.sqrt(num.sum((v-u)**2)),
3460 num.sqrt(num.sum(v**2)))
3462 else:
3463 return (
3464 num.power(num.sum(num.abs(num.power(v - u, norm))), 1./norm),
3465 num.power(num.sum(num.abs(num.power(v, norm))), 1./norm))
3468def do_downsample(tr, deltat):
3469 if abs(tr.deltat - deltat) / tr.deltat > 1e-6:
3470 tr = tr.copy()
3471 tr.downsample_to(deltat, snap=True, demean=False)
3472 else:
3473 if tr.tmin/tr.deltat > 1e-6 or tr.tmax/tr.deltat > 1e-6:
3474 tr = tr.copy()
3475 tr.snap()
3476 return tr
3479def do_extend(tr, tmin, tmax):
3480 if tmin < tr.tmin or tmax > tr.tmax:
3481 tr = tr.copy()
3482 tr.extend(tmin=tmin, tmax=tmax, fillmethod='repeat')
3484 return tr
3487def do_pre_taper(tr, taper):
3488 return tr.taper(taper, inplace=False, chop=True)
3491def do_fft(tr, filter):
3492 if filter is None:
3493 return tr
3494 else:
3495 ndata = tr.ydata.size
3496 nfft = nextpow2(ndata)
3497 padded = num.zeros(nfft, dtype=float)
3498 padded[:ndata] = tr.ydata
3499 spectrum = num.fft.rfft(padded)
3500 df = 1.0 / (tr.deltat * nfft)
3501 frequencies = num.arange(spectrum.size)*df
3502 return [tr, frequencies, spectrum]
3505def do_filter(inp, filter):
3506 if filter is None:
3507 return inp
3508 else:
3509 tr, frequencies, spectrum = inp
3510 spectrum *= filter.evaluate(frequencies)
3511 return [tr, frequencies, spectrum]
3514def do_ifft(inp):
3515 if isinstance(inp, Trace):
3516 return inp
3517 else:
3518 tr, _, spectrum = inp
3519 ndata = tr.ydata.size
3520 tr = tr.copy(data=False)
3521 tr.set_ydata(num.fft.irfft(spectrum)[:ndata])
3522 return tr
3525def check_alignment(t1, t2):
3526 if abs(t1.tmin-t2.tmin) > t1.deltat * 1e-4 or \
3527 abs(t1.tmax - t2.tmax) > t1.deltat * 1e-4 or \
3528 t1.ydata.shape != t2.ydata.shape:
3529 raise MisalignedTraces(
3530 'Cannot calculate misfit of %s and %s due to misaligned '
3531 'traces.' % ('.'.join(t1.nslc_id), '.'.join(t2.nslc_id)))