Coverage for /usr/local/lib/python3.11/dist-packages/pyrocko/util.py: 78%
1233 statements
« prev ^ index » next coverage.py v6.5.0, created at 2023-11-03 12:47 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2023-11-03 12:47 +0000
1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6Utility functions for Pyrocko.
8.. _time-handling-mode:
10High precision time handling mode
11.................................
13Pyrocko can treat timestamps either as standard double precision (64 bit)
14floating point values, or as high precision floats (``numpy.float128`` or
15``numpy.float96``, whichever is available, `see NumPy Scalars
16<https://numpy.org/doc/stable/reference/arrays.scalars.html>`_), aliased here
17as :py:class:`~pyrocko.util.hpfloat`. High precision time stamps are required
18when handling data with sub-millisecond precision, i.e. kHz/MHz data streams
19and event catalogs derived from such data.
21Not all functions in Pyrocko and in programs depending on Pyrocko may work
22correctly with high precision times. Therefore, Pyrocko's high precision time
23handling mode has to be actively activated by user config, command line option
24or enforced within a certain script/program.
26The default high precision time handling mode can be configured globally with
27the user configuration variable
28:py:gattr:`~pyrocko.config.PyrockoConfig.use_high_precision_time`. Calling the
29function :py:func:`use_high_precision_time` overrides the default from the
30config file. This function may be called at startup of a program/script
31requiring a specific time handling mode.
33To create a valid time stamp for use in Pyrocko (e.g. in
34:py:class:`~pyrocko.model.event.Event` or
35:py:class:`~pyrocko.trace.Trace` objects), use:
37.. code-block :: python
39 import time
40 from pyrocko import util
42 # By default using mode selected in user config, override with:
43 # util.use_high_precision_time(True) # force high precision mode
44 # util.use_high_precision_time(False) # force low precision mode
46 t1 = util.str_to_time('2020-08-27 10:22:00')
47 t2 = util.str_to_time('2020-08-27 10:22:00.111222')
48 t3 = util.to_time_float(time.time())
50 # To get the appropriate float class, use:
52 time_float = util.get_time_float()
53 # -> float, numpy.float128 or numpy.float96
54 [isinstance(t, time_float) for t in [t1, t2, t3]]
55 # -> [True, True, True]
57 # Shortcut:
58 util.check_time_class(t1)
60Module content
61..............
63.. py:class:: hpfloat
65 Alias for NumPy's high precision float data type ``float128`` or
66 ``float96``, if available.
68 On platforms lacking support for high precision floats, an attempt to
69 create a ``hpfloat`` instance, raises :py:exc:`HPFloatUnavailable`.
71'''
73import time
74import logging
75import os
76import sys
77import re
78import calendar
79import math
80import fnmatch
81import inspect
82import weakref
83try:
84 import fcntl
85except ImportError:
86 fcntl = None
87import optparse
88import os.path as op
89import errno
91import numpy as num
92from scipy import signal
93import pyrocko
94from pyrocko import dummy_progressbar
97from urllib.parse import urlencode, quote, unquote # noqa
98from urllib.request import Request, build_opener, HTTPDigestAuthHandler # noqa
99from urllib.request import urlopen as _urlopen # noqa
100from urllib.error import HTTPError, URLError # noqa
103try:
104 import certifi
105 import ssl
106 g_ssl_context = ssl.create_default_context(cafile=certifi.where())
107except ImportError:
108 g_ssl_context = None
111class URLErrorSSL(URLError):
113 def __init__(self, urlerror):
114 self.__dict__ = urlerror.__dict__.copy()
116 def __str__(self):
117 return (
118 'Requesting web resource failed and the problem could be '
119 'related to SSL. Python standard libraries on some older '
120 'systems (like Ubuntu 14.04) are known to have trouble '
121 "with some SSL setups of today's servers: %s"
122 % URLError.__str__(self))
125def urlopen_ssl_check(*args, **kwargs):
126 try:
127 return urlopen(*args, **kwargs)
128 except URLError as e:
129 if str(e).find('SSL') != -1:
130 raise URLErrorSSL(e)
131 else:
132 raise
135def urlopen(*args, **kwargs):
137 if 'context' not in kwargs and g_ssl_context is not None:
138 kwargs['context'] = g_ssl_context
140 return _urlopen(*args, **kwargs)
143try:
144 long
145except NameError:
146 long = int
149force_dummy_progressbar = False
152try:
153 from pyrocko import util_ext
154except ImportError:
155 util_ext = None
158logger = logging.getLogger('pyrocko.util')
161# fallbacks num_full and num_full_like are not needed anymore but
162# kept here because downstream code may still use these.
163try:
164 num_full = num.full
165except AttributeError:
166 def num_full(shape, fill_value, dtype=None, order='C'):
167 a = num.empty(shape, dtype=dtype, order=order)
168 a.fill(fill_value)
169 return a
171try:
172 num_full_like = num.full_like
173except AttributeError:
174 def num_full_like(arr, fill_value, dtype=None, order='K', subok=True):
175 a = num.empty_like(arr, dtype=dtype, order=order, subok=subok)
176 a.fill(fill_value)
177 return a
180g_setup_logging_args = 'pyrocko', 'warning'
183def setup_logging(programname='pyrocko', levelname='warning'):
184 '''
185 Initialize logging.
187 :param programname: program name to be written in log
188 :param levelname: string indicating the logging level ('debug', 'info',
189 'warning', 'error', 'critical')
191 This function is called at startup by most pyrocko programs to set up a
192 consistent logging format. This is simply a shortcut to a call to
193 :py:func:`logging.basicConfig()`.
194 '''
196 global g_setup_logging_args
197 g_setup_logging_args = (programname, levelname)
199 levels = {'debug': logging.DEBUG,
200 'info': logging.INFO,
201 'warning': logging.WARNING,
202 'error': logging.ERROR,
203 'critical': logging.CRITICAL}
205 logging.basicConfig(
206 level=levels[levelname],
207 format=programname+':%(name)-25s - %(levelname)-8s - %(message)s')
210def subprocess_setup_logging_args():
211 '''
212 Get arguments from previous call to setup_logging.
214 These can be sent down to a worker process so it can setup its logging
215 in the same way as the main process.
216 '''
217 return g_setup_logging_args
220def data_file(fn):
221 return os.path.join(os.path.split(__file__)[0], 'data', fn)
224class DownloadError(Exception):
225 '''
226 Raised when a download failed.
227 '''
228 pass
231class PathExists(DownloadError):
232 '''
233 Raised when the download target file already exists.
234 '''
235 pass
238def _download(url, fpath, username=None, password=None,
239 force=False, method='download', stats=None,
240 status_callback=None, entries_wanted=None,
241 recursive=False, header=None):
243 import requests
244 from requests.auth import HTTPBasicAuth
245 from requests.exceptions import HTTPError as req_HTTPError
247 requests.adapters.DEFAULT_RETRIES = 5
248 urljoin = requests.compat.urljoin
250 session = requests.Session()
251 session.header = header
252 session.auth = None if username is None\
253 else HTTPBasicAuth(username, password)
255 status = {
256 'ntotal_files': 0,
257 'nread_files': 0,
258 'ntotal_bytes_all_files': 0,
259 'nread_bytes_all_files': 0,
260 'ntotal_bytes_current_file': 0,
261 'nread_bytes_current_file': 0,
262 'finished': False
263 }
265 try:
266 url_to_size = {}
268 if callable(status_callback):
269 status_callback(status)
271 if not recursive and url.endswith('/'):
272 raise DownloadError(
273 'URL: %s appears to be a directory'
274 ' but recurvise download is False' % url)
276 if recursive and not url.endswith('/'):
277 url += '/'
279 def parse_directory_tree(url, subdir=''):
280 r = session.get(urljoin(url, subdir))
281 r.raise_for_status()
283 entries = re.findall(r'href="([a-zA-Z0-9_.-]+/?)"', r.text)
285 files = sorted(set(subdir + fn for fn in entries
286 if not fn.endswith('/')))
288 if entries_wanted is not None:
289 files = [fn for fn in files
290 if (fn in wanted for wanted in entries_wanted)]
292 status['ntotal_files'] += len(files)
294 dirs = sorted(set(subdir + dn for dn in entries
295 if dn.endswith('/')
296 and dn not in ('./', '../')))
298 for dn in dirs:
299 files.extend(parse_directory_tree(
300 url, subdir=dn))
302 return files
304 def get_content_length(url):
305 if url not in url_to_size:
306 r = session.head(url, headers={'Accept-Encoding': ''})
308 content_length = r.headers.get('content-length', None)
309 if content_length is None:
310 logger.debug('Could not get HTTP header '
311 'Content-Length for %s' % url)
313 content_length = None
315 else:
316 content_length = int(content_length)
317 status['ntotal_bytes_all_files'] += content_length
318 if callable(status_callback):
319 status_callback(status)
321 url_to_size[url] = content_length
323 return url_to_size[url]
325 def download_file(url, fn):
326 logger.info('starting download of %s...' % url)
327 ensuredirs(fn)
329 fsize = get_content_length(url)
330 status['ntotal_bytes_current_file'] = fsize
331 status['nread_bytes_current_file'] = 0
332 if callable(status_callback):
333 status_callback(status)
335 r = session.get(url, stream=True, timeout=5)
336 r.raise_for_status()
338 frx = 0
339 fn_tmp = fn + '.%i.temp' % os.getpid()
340 with open(fn_tmp, 'wb') as f:
341 for d in r.iter_content(chunk_size=1024):
342 f.write(d)
343 frx += len(d)
345 status['nread_bytes_all_files'] += len(d)
346 status['nread_bytes_current_file'] += len(d)
347 if callable(status_callback):
348 status_callback(status)
350 os.rename(fn_tmp, fn)
352 if fsize is not None and frx != fsize:
353 logger.warning(
354 'HTTP header Content-Length: %i bytes does not match '
355 'download size %i bytes' % (fsize, frx))
357 logger.info('finished download of %s' % url)
359 status['nread_files'] += 1
360 if callable(status_callback):
361 status_callback(status)
363 if recursive:
364 if op.exists(fpath) and not force:
365 raise PathExists('path %s already exists' % fpath)
367 files = parse_directory_tree(url)
369 dsize = 0
370 for fn in files:
371 file_url = urljoin(url, fn)
372 dsize += get_content_length(file_url) or 0
374 if method == 'calcsize':
375 return dsize
377 else:
378 for fn in files:
379 file_url = urljoin(url, fn)
380 download_file(file_url, op.join(fpath, fn))
382 else:
383 status['ntotal_files'] += 1
384 if callable(status_callback):
385 status_callback(status)
387 fsize = get_content_length(url)
388 if method == 'calcsize':
389 return fsize
390 else:
391 download_file(url, fpath)
393 except req_HTTPError as e:
394 logging.warning('http error: %s' % e)
395 raise DownloadError('could not download file(s) from: %s' % url)
397 finally:
398 status['finished'] = True
399 if callable(status_callback):
400 status_callback(status)
401 session.close()
404def download_file(
405 url, fpath, username=None, password=None, status_callback=None,
406 **kwargs):
407 return _download(
408 url, fpath, username, password,
409 recursive=False,
410 status_callback=status_callback,
411 **kwargs)
414def download_dir(
415 url, fpath, username=None, password=None, status_callback=None,
416 **kwargs):
418 return _download(
419 url, fpath, username, password,
420 recursive=True,
421 status_callback=status_callback,
422 **kwargs)
425class HPFloatUnavailable(Exception):
426 '''
427 Raised when a high precision float type would be required but is not
428 available.
429 '''
430 pass
433class dummy_hpfloat(object):
434 def __init__(self, *args, **kwargs):
435 raise HPFloatUnavailable(
436 'NumPy lacks support for float128 or float96 data type on this '
437 'platform.')
440if hasattr(num, 'float128'):
441 hpfloat = num.float128
443elif hasattr(num, 'float96'):
444 hpfloat = num.float96
446else:
447 hpfloat = dummy_hpfloat
450g_time_float = None
451g_time_dtype = None
454class TimeFloatSettingError(Exception):
455 pass
458def use_high_precision_time(enabled):
459 '''
460 Globally force a specific time handling mode.
462 See :ref:`High precision time handling mode <time-handling-mode>`.
464 :param enabled: enable/disable use of high precision time type
465 :type enabled: bool
467 This function should be called before handling/reading any time data.
468 It can only be called once.
470 Special attention is required when using multiprocessing on a platform
471 which does not use fork under the hood. In such cases, the desired setting
472 must be set also in the subprocess.
473 '''
474 _setup_high_precision_time_mode(enabled_app=enabled)
477def _setup_high_precision_time_mode(enabled_app=False):
478 global g_time_float
479 global g_time_dtype
481 if not (g_time_float is None and g_time_dtype is None):
482 raise TimeFloatSettingError(
483 'Cannot set time handling mode: too late, it has already been '
484 'fixed by an earlier call.')
486 from pyrocko import config
488 conf = config.config()
489 enabled_config = conf.use_high_precision_time
491 enabled_env = os.environ.get('PYROCKO_USE_HIGH_PRECISION_TIME', None)
492 if enabled_env is not None:
493 try:
494 enabled_env = int(enabled_env) == 1
495 except ValueError:
496 raise TimeFloatSettingError(
497 'Environment variable PYROCKO_USE_HIGH_PRECISION_TIME '
498 'should be set to 0 or 1.')
500 enabled = enabled_config
501 mode_from = 'config variable `use_high_precision_time`'
502 notify = enabled
504 if enabled_env is not None:
505 if enabled_env != enabled:
506 notify = True
507 enabled = enabled_env
508 mode_from = 'environment variable `PYROCKO_USE_HIGH_PRECISION_TIME`'
510 if enabled_app is not None:
511 if enabled_app != enabled:
512 notify = True
513 enabled = enabled_app
514 mode_from = 'application override'
516 logger.debug('''
517Pyrocko high precision time mode selection (latter override earlier):
518 config: %s
519 env: %s
520 app: %s
521 -> enabled: %s'''.lstrip() % (
522 enabled_config, enabled_env, enabled_app, enabled))
524 if notify:
525 logger.info('Pyrocko high precision time mode %s by %s.' % (
526 'activated' if enabled else 'deactivated',
527 mode_from))
529 if enabled:
530 g_time_float = hpfloat
531 g_time_dtype = hpfloat
532 else:
533 g_time_float = float
534 g_time_dtype = num.float64
537def get_time_float():
538 '''
539 Get the effective float class for timestamps.
541 See :ref:`High precision time handling mode <time-handling-mode>`.
543 :returns: :py:class:`float` or :py:class:`hpfloat`, depending on the
544 current time handling mode
545 '''
546 global g_time_float
548 if g_time_float is None:
549 _setup_high_precision_time_mode()
551 return g_time_float
554def get_time_dtype():
555 '''
556 Get effective NumPy float class to handle timestamps.
558 See :ref:`High precision time handling mode <time-handling-mode>`.
559 '''
561 global g_time_dtype
563 if g_time_dtype is None:
564 _setup_high_precision_time_mode()
566 return g_time_dtype
569def to_time_float(t):
570 '''
571 Convert float to valid time stamp in the current time handling mode.
573 See :ref:`High precision time handling mode <time-handling-mode>`.
574 '''
575 return get_time_float()(t)
578class TimestampTypeError(ValueError):
579 pass
582def check_time_class(t, error='raise'):
583 '''
584 Type-check variable against current time handling mode.
586 See :ref:`High precision time handling mode <time-handling-mode>`.
587 '''
589 if t == 0.0:
590 return
592 if not isinstance(t, get_time_float()):
593 message = (
594 'Timestamp %g is of type %s but should be of type %s with '
595 "Pyrocko's currently selected time handling mode.\n\n"
596 'See https://pyrocko.org/docs/current/library/reference/util.html'
597 '#high-precision-time-handling-mode' % (
598 t, type(t), get_time_float()))
600 if error == 'raise':
601 raise TimestampTypeError(message)
602 elif error == 'warn':
603 logger.warning(message)
604 else:
605 assert False
608class Stopwatch(object):
609 '''
610 Simple stopwatch to measure elapsed wall clock time.
612 Usage::
614 s = Stopwatch()
615 time.sleep(1)
616 print s()
617 time.sleep(1)
618 print s()
619 '''
621 def __init__(self):
622 self.start = time.time()
624 def __call__(self):
625 return time.time() - self.start
628def wrap(text, line_length=80):
629 '''
630 Paragraph and list-aware wrapping of text.
631 '''
633 text = text.strip('\n')
634 at_lineend = re.compile(r' *\n')
635 at_para = re.compile(r'((^|(\n\s*)?\n)(\s+[*] )|\n\s*\n)')
637 paragraphs = at_para.split(text)[::5]
638 listindents = at_para.split(text)[4::5]
639 newlist = at_para.split(text)[3::5]
641 listindents[0:0] = [None]
642 listindents.append(True)
643 newlist.append(None)
645 det_indent = re.compile(r'^ *')
647 outlines = []
648 for ip, p in enumerate(paragraphs):
649 if not p:
650 continue
652 if listindents[ip] is None:
653 _indent = det_indent.findall(p)[0]
654 findent = _indent
655 else:
656 findent = listindents[ip]
657 _indent = ' ' * len(findent)
659 ll = line_length - len(_indent)
660 llf = ll
662 oldlines = [s.strip() for s in at_lineend.split(p.rstrip())]
663 p1 = ' '.join(oldlines)
664 possible = re.compile(r'(^.{1,%i}|.{1,%i})( |$)' % (llf, ll))
665 for imatch, match in enumerate(possible.finditer(p1)):
666 parout = match.group(1)
667 if imatch == 0:
668 outlines.append(findent + parout)
669 else:
670 outlines.append(_indent + parout)
672 if ip != len(paragraphs)-1 and (
673 listindents[ip] is None
674 or newlist[ip] is not None
675 or listindents[ip+1] is None):
677 outlines.append('')
679 return outlines
682def ewrap(lines, width=80, indent=''):
683 lines = list(lines)
684 if not lines:
685 return ''
686 fwidth = max(len(s) for s in lines)
687 nx = max(1, (80-len(indent)) // (fwidth+1))
688 i = 0
689 rows = []
690 while i < len(lines):
691 rows.append(indent + ' '.join(x.ljust(fwidth) for x in lines[i:i+nx]))
692 i += nx
694 return '\n'.join(rows)
697class BetterHelpFormatter(optparse.IndentedHelpFormatter):
699 def __init__(self, *args, **kwargs):
700 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
702 def format_option(self, option):
703 '''
704 From IndentedHelpFormatter but using a different wrap method.
705 '''
707 help_text_position = 4 + self.current_indent
708 help_text_width = self.width - help_text_position
710 opts = self.option_strings[option]
711 opts = '%*s%s' % (self.current_indent, '', opts)
712 if option.help:
713 help_text = self.expand_default(option)
715 if self.help_position + len(help_text) + 1 <= self.width:
716 lines = [
717 '',
718 '%-*s %s' % (self.help_position, opts, help_text),
719 '']
720 else:
721 lines = ['']
722 lines.append(opts)
723 lines.append('')
724 if option.help:
725 help_lines = wrap(help_text, help_text_width)
726 lines.extend(['%*s%s' % (help_text_position, '', line)
727 for line in help_lines])
728 lines.append('')
730 return '\n'.join(lines)
732 def format_description(self, description):
733 if not description:
734 return ''
736 if self.current_indent == 0:
737 lines = []
738 else:
739 lines = ['']
741 lines.extend(wrap(description, self.width - self.current_indent))
742 if self.current_indent == 0:
743 lines.append('\n')
745 return '\n'.join(
746 ['%*s%s' % (self.current_indent, '', line) for line in lines])
749class ProgressBar:
750 def __init__(self, label, n):
751 from pyrocko.progress import progress
752 self._context = progress.view()
753 self._context.__enter__()
754 self._task = progress.task(label, n)
756 def update(self, i):
757 self._task.update(i)
759 def finish(self):
760 self._task.done()
761 if self._context:
762 self._context.__exit__()
763 self._context = None
766def progressbar(label, maxval):
767 if force_dummy_progressbar:
768 return dummy_progressbar.ProgressBar(maxval=maxval).start()
770 return ProgressBar(label, maxval)
773def progress_beg(label):
774 '''
775 Notify user that an operation has started.
777 :param label: name of the operation
779 To be used in conjuction with :py:func:`progress_end`.
780 '''
782 sys.stderr.write(label)
783 sys.stderr.flush()
786def progress_end(label=''):
787 '''
788 Notify user that an operation has ended.
790 :param label: name of the operation
792 To be used in conjuction with :py:func:`progress_beg`.
793 '''
795 sys.stderr.write(' done. %s\n' % label)
796 sys.stderr.flush()
799class ArangeError(ValueError):
800 '''
801 Raised by :py:func:`arange2` for inconsistent range specifications.
802 '''
803 pass
806def arange2(start, stop, step, dtype=float, epsilon=1e-6, error='raise'):
807 '''
808 Return evenly spaced numbers over a specified interval.
810 Like :py:func:`numpy.arange` but returning floating point numbers by
811 default and with defined behaviour when stepsize is inconsistent with
812 interval bounds. It is considered inconsistent if the difference between
813 the closest multiple of ``step`` and ``stop`` is larger than ``epsilon *
814 step``. Inconsistencies are handled according to the ``error`` parameter.
815 If it is set to ``'raise'`` an exception of type :py:exc:`ArangeError` is
816 raised. If it is set to ``'round'``, ``'floor'``, or ``'ceil'``, ``stop``
817 is silently changed to the closest, the next smaller, or next larger
818 multiple of ``step``, respectively.
819 '''
821 assert error in ('raise', 'round', 'floor', 'ceil')
823 start = dtype(start)
824 stop = dtype(stop)
825 step = dtype(step)
827 rnd = {'floor': math.floor, 'ceil': math.ceil}.get(error, round)
829 n = int(rnd((stop - start) / step)) + 1
830 stop_check = start + (n-1) * step
832 if error == 'raise' and abs(stop_check - stop) > step * epsilon:
833 raise ArangeError(
834 'inconsistent range specification: start=%g, stop=%g, step=%g'
835 % (start, stop, step))
837 x = num.arange(n, dtype=dtype)
838 x *= step
839 x += start
840 return x
843def polylinefit(x, y, n_or_xnodes):
844 '''
845 Fit piece-wise linear function to data.
847 :param x,y: arrays with coordinates of data
848 :param n_or_xnodes: int, number of segments or x coordinates of polyline
850 :returns: `(xnodes, ynodes, rms_error)` arrays with coordinates of
851 polyline, root-mean-square error
852 '''
854 x = num.asarray(x)
855 y = num.asarray(y)
857 if isinstance(n_or_xnodes, int):
858 n = n_or_xnodes
859 xmin = x.min()
860 xmax = x.max()
861 xnodes = num.linspace(xmin, xmax, n+1)
862 else:
863 xnodes = num.asarray(n_or_xnodes)
864 n = xnodes.size - 1
866 assert len(x) == len(y)
867 assert n > 0
869 ndata = len(x)
870 a = num.zeros((ndata+(n-1), n*2))
871 for i in range(n):
872 xmin_block = xnodes[i]
873 xmax_block = xnodes[i+1]
874 if i == n-1: # don't loose last point
875 indices = num.where(
876 num.logical_and(xmin_block <= x, x <= xmax_block))[0]
877 else:
878 indices = num.where(
879 num.logical_and(xmin_block <= x, x < xmax_block))[0]
881 a[indices, i*2] = x[indices]
882 a[indices, i*2+1] = 1.0
884 w = float(ndata)*100.
885 if i < n-1:
886 a[ndata+i, i*2] = xmax_block*w
887 a[ndata+i, i*2+1] = 1.0*w
888 a[ndata+i, i*2+2] = -xmax_block*w
889 a[ndata+i, i*2+3] = -1.0*w
891 d = num.concatenate((y, num.zeros(n-1)))
892 model = num.linalg.lstsq(a, d, rcond=-1)[0].reshape((n, 2))
894 ynodes = num.zeros(n+1)
895 ynodes[:n] = model[:, 0]*xnodes[:n] + model[:, 1]
896 ynodes[1:] += model[:, 0]*xnodes[1:] + model[:, 1]
897 ynodes[1:n] *= 0.5
899 rms_error = num.sqrt(num.mean((num.interp(x, xnodes, ynodes) - y)**2))
901 return xnodes, ynodes, rms_error
904def plf_integrate_piecewise(x_edges, x, y):
905 '''
906 Calculate definite integral of piece-wise linear function on intervals.
908 Use trapezoidal rule to calculate definite integral of a piece-wise linear
909 function for a series of consecutive intervals. ``x_edges`` and ``x`` must
910 be sorted.
912 :param x_edges: array with edges of the intervals
913 :param x,y: arrays with coordinates of piece-wise linear function's
914 control points
915 '''
917 x_all = num.concatenate((x, x_edges))
918 ii = num.argsort(x_all)
919 y_edges = num.interp(x_edges, x, y)
920 y_all = num.concatenate((y, y_edges))
921 xs = x_all[ii]
922 ys = y_all[ii]
923 y_all[ii[1:]] = num.cumsum((xs[1:] - xs[:-1]) * 0.5 * (ys[1:] + ys[:-1]))
924 return num.diff(y_all[-len(y_edges):])
927def diff_fd_1d_4o(dt, data):
928 '''
929 Approximate first derivative of an array (forth order, central FD).
931 :param dt: sampling interval
932 :param data: NumPy array with data samples
934 :returns: NumPy array with same shape as input
936 Interior points are approximated to fourth order, edge points to first
937 order right- or left-sided respectively, points next to edge to second
938 order central.
939 '''
940 import scipy.signal
942 ddata = num.empty_like(data, dtype=float)
944 if data.size >= 5:
945 ddata[2:-2] = scipy.signal.lfilter(
946 [-1., +8., 0., -8., 1.], [1.], data)[4:] / (12.*dt)
948 if data.size >= 3:
949 ddata[1] = (data[2] - data[0]) / (2. * dt)
950 ddata[-2] = (data[-1] - data[-3]) / (2. * dt)
952 if data.size >= 2:
953 ddata[0] = (data[1] - data[0]) / dt
954 ddata[-1] = (data[-1] - data[-2]) / dt
956 if data.size == 1:
957 ddata[0] = 0.0
959 return ddata
962def diff_fd_1d_2o(dt, data):
963 '''
964 Approximate first derivative of an array (second order, central FD).
966 :param dt: sampling interval
967 :param data: NumPy array with data samples
969 :returns: NumPy array with same shape as input
971 Interior points are approximated to second order, edge points to first
972 order right- or left-sided respectively.
974 Uses :py:func:`numpy.gradient`.
975 '''
977 return num.gradient(data, dt)
980def diff_fd_2d_4o(dt, data):
981 '''
982 Approximate second derivative of an array (forth order, central FD).
984 :param dt: sampling interval
985 :param data: NumPy array with data samples
987 :returns: NumPy array with same shape as input
989 Interior points are approximated to fourth order, next-to-edge points to
990 second order, edge points repeated.
991 '''
992 import scipy.signal
994 ddata = num.empty_like(data, dtype=float)
996 if data.size >= 5:
997 ddata[2:-2] = scipy.signal.lfilter(
998 [-1., +16., -30., +16., -1.], [1.], data)[4:] / (12.*dt**2)
1000 if data.size >= 3:
1001 ddata[:2] = (data[2] - 2.0 * data[1] + data[0]) / dt**2
1002 ddata[-2:] = (data[-1] - 2.0 * data[-2] + data[-3]) / dt**2
1004 if data.size < 3:
1005 ddata[:] = 0.0
1007 return ddata
1010def diff_fd_2d_2o(dt, data):
1011 '''
1012 Approximate second derivative of an array (second order, central FD).
1014 :param dt: sampling interval
1015 :param data: NumPy array with data samples
1017 :returns: NumPy array with same shape as input
1019 Interior points are approximated to second order, edge points repeated.
1020 '''
1021 import scipy.signal
1023 ddata = num.empty_like(data, dtype=float)
1025 if data.size >= 3:
1026 ddata[1:-1] = scipy.signal.lfilter(
1027 [1., -2., 1.], [1.], data)[2:] / (dt**2)
1029 ddata[0] = ddata[1]
1030 ddata[-1] = ddata[-2]
1032 if data.size < 3:
1033 ddata[:] = 0.0
1035 return ddata
1038def diff_fd(n, order, dt, data):
1039 '''
1040 Approximate 1st or 2nd derivative of an array.
1042 :param n: 1 for first derivative, 2 for second
1043 :param order: order of the approximation 2 and 4 are supported
1044 :param dt: sampling interval
1045 :param data: NumPy array with data samples
1047 :returns: NumPy array with same shape as input
1049 This is a frontend to the functions :py:func:`diff_fd_1d_2o`,
1050 :py:func:`diff_fd_1d_4o`, :py:func:`diff_fd_2d_2o`, and
1051 :py:func:`diff_fd_2d_4o`.
1053 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1054 '''
1056 funcs = {
1057 1: {2: diff_fd_1d_2o, 4: diff_fd_1d_4o},
1058 2: {2: diff_fd_2d_2o, 4: diff_fd_2d_4o}}
1060 try:
1061 funcs_n = funcs[n]
1062 except KeyError:
1063 raise ValueError(
1064 'pyrocko.util.diff_fd: '
1065 'Only 1st and 2sd derivatives are supported.')
1067 try:
1068 func = funcs_n[order]
1069 except KeyError:
1070 raise ValueError(
1071 'pyrocko.util.diff_fd: '
1072 'Order %i is not supported for %s derivative. Supported: %s' % (
1073 order, ['', '1st', '2nd'][n],
1074 ', '.join('%i' % order for order in sorted(funcs_n.keys()))))
1076 return func(dt, data)
1079class GlobalVars(object):
1080 reuse_store = dict()
1081 decitab_nmax = 0
1082 decitab = {}
1083 decimate_fir_coeffs = {}
1084 decimate_fir_remez_coeffs = {}
1085 decimate_iir_coeffs = {}
1086 re_frac = None
1089def decimate_coeffs(q, n=None, ftype='iir'):
1091 q = int(q)
1093 if n is None:
1094 if ftype == 'fir':
1095 n = 30
1096 elif ftype == 'fir-remez':
1097 n = 45*q
1098 else:
1099 n = 8
1101 if ftype == 'fir':
1102 coeffs = GlobalVars.decimate_fir_coeffs
1103 if (n, 1./q) not in coeffs:
1104 coeffs[n, 1./q] = signal.firwin(n+1, .75/q, window='hamming')
1106 b = coeffs[n, 1./q]
1107 return b, [1.], n
1109 elif ftype == 'fir-remez':
1110 coeffs = GlobalVars.decimate_fir_remez_coeffs
1111 if (n, 1./q) not in coeffs:
1112 coeffs[n, 1./q] = signal.remez(
1113 n+1, (0., .75/q, 1./q, 1.),
1114 (1., 0.), fs=2, weight=(1, 50))
1115 b = coeffs[n, 1./q]
1116 return b, [1.], n
1118 else:
1119 coeffs = GlobalVars.decimate_iir_coeffs
1120 if (n, 0.05, 0.8/q) not in coeffs:
1121 coeffs[n, 0.05, 0.8/q] = signal.cheby1(n, 0.05, 0.8/q)
1123 b, a = coeffs[n, 0.05, 0.8/q]
1124 return b, a, n
1127def decimate(x, q, n=None, ftype='iir', zi=None, ioff=0):
1128 '''
1129 Downsample the signal x by an integer factor q, using an order n filter
1131 By default, an order 8 Chebyshev type I filter is used or a 30 point FIR
1132 filter with hamming window if ftype is 'fir'.
1134 :param x: the signal to be downsampled (1D :class:`numpy.ndarray`)
1135 :param q: the downsampling factor
1136 :param n: order of the filter (1 less than the length of the filter for a
1137 `fir` filter)
1138 :param ftype: type of the filter; can be `iir`, `fir` or `fir-remez`
1140 :returns: the downsampled signal (1D :class:`numpy.ndarray`)
1142 '''
1144 b, a, n = decimate_coeffs(q, n, ftype)
1146 if zi is None or zi is True:
1147 zi_ = num.zeros(max(len(a), len(b))-1, dtype=float)
1148 else:
1149 zi_ = zi
1151 y, zf = signal.lfilter(b, a, x, zi=zi_)
1153 if zi is not None:
1154 return y[n//2+ioff::q].copy(), zf
1155 else:
1156 return y[n//2+ioff::q].copy()
1159class UnavailableDecimation(Exception):
1160 '''
1161 Exception raised by :py:func:`decitab` for unavailable decimation factors.
1162 '''
1164 pass
1167def gcd(a, b, epsilon=1e-7):
1168 '''
1169 Greatest common divisor.
1170 '''
1172 while b > epsilon*a:
1173 a, b = b, a % b
1175 return a
1178def lcm(a, b):
1179 '''
1180 Least common multiple.
1181 '''
1183 return a*b // gcd(a, b)
1186def mk_decitab(nmax=100):
1187 '''
1188 Make table with decimation sequences.
1190 Decimation from one sampling rate to a lower one is achieved by a
1191 successive application of :py:func:`decimate` with small integer
1192 downsampling factors (because using large downsampling factors can make the
1193 decimation unstable or slow). This function sets up a table with downsample
1194 sequences for factors up to ``nmax``.
1195 '''
1197 tab = GlobalVars.decitab
1198 for i in range(1, 10):
1199 for j in range(1, i+1):
1200 for k in range(1, j+1):
1201 for l_ in range(1, k+1):
1202 for m in range(1, l_+1):
1203 p = i*j*k*l_*m
1204 if p > nmax:
1205 break
1206 if p not in tab:
1207 tab[p] = (i, j, k, l_, m)
1208 if i*j*k*l_ > nmax:
1209 break
1210 if i*j*k > nmax:
1211 break
1212 if i*j > nmax:
1213 break
1214 if i > nmax:
1215 break
1217 GlobalVars.decitab_nmax = nmax
1220def zfmt(n):
1221 return '%%0%ii' % (int(math.log10(n - 1)) + 1)
1224def _year_to_time(year):
1225 tt = (year, 1, 1, 0, 0, 0)
1226 return to_time_float(calendar.timegm(tt))
1229def _working_year(year):
1230 try:
1231 tt = (year, 1, 1, 0, 0, 0)
1232 t = calendar.timegm(tt)
1233 tt2_ = time.gmtime(t)
1234 tt2 = tuple(tt2_)[:6]
1235 if tt != tt2:
1236 return False
1238 s = '%i-01-01 00:00:00' % year
1239 s2 = time.strftime('%Y-%m-%d %H:%M:%S', tt2_)
1240 if s != s2:
1241 return False
1243 t3 = str_to_time(s2, format='%Y-%m-%d %H:%M:%S')
1244 s3 = time_to_str(t3, format='%Y-%m-%d %H:%M:%S')
1245 if s3 != s2:
1246 return False
1248 except Exception:
1249 return False
1251 return True
1254def working_system_time_range(year_min_lim=None, year_max_lim=None):
1255 '''
1256 Check time range supported by the systems's time conversion functions.
1258 Returns system time stamps of start of year of first/last fully supported
1259 year span. If this is before 1900 or after 2100, return first/last century
1260 which is fully supported.
1262 :returns: ``(tmin, tmax, year_min, year_max)``
1263 '''
1265 year0 = 2000
1266 year_min = year0
1267 year_max = year0
1269 itests = list(range(101))
1270 for i in range(19):
1271 itests.append(200 + i*100)
1273 for i in itests:
1274 year = year0 - i
1275 if year_min_lim is not None and year < year_min_lim:
1276 year_min = year_min_lim
1277 break
1278 elif not _working_year(year):
1279 break
1280 else:
1281 year_min = year
1283 for i in itests:
1284 year = year0 + i
1285 if year_max_lim is not None and year > year_max_lim:
1286 year_max = year_max_lim
1287 break
1288 elif not _working_year(year + 1):
1289 break
1290 else:
1291 year_max = year
1293 return (
1294 _year_to_time(year_min),
1295 _year_to_time(year_max),
1296 year_min, year_max)
1299g_working_system_time_range = None
1302def get_working_system_time_range():
1303 '''
1304 Caching variant of :py:func:`working_system_time_range`.
1305 '''
1307 global g_working_system_time_range
1308 if g_working_system_time_range is None:
1309 g_working_system_time_range = working_system_time_range()
1311 return g_working_system_time_range
1314def is_working_time(t):
1315 tmin, tmax, _, _ = get_working_system_time_range()
1316 return tmin <= t <= tmax
1319def julian_day_of_year(timestamp):
1320 '''
1321 Get the day number after the 1st of January of year in ``timestamp``.
1323 :returns: day number as int
1324 '''
1326 return time.gmtime(int(timestamp)).tm_yday
1329def hour_start(timestamp):
1330 '''
1331 Get beginning of hour for any point in time.
1333 :param timestamp: time instant as system timestamp (in seconds)
1335 :returns: instant of hour start as system timestamp
1336 '''
1338 tt = time.gmtime(int(timestamp))
1339 tts = tt[0:4] + (0, 0)
1340 return to_time_float(calendar.timegm(tts))
1343def day_start(timestamp):
1344 '''
1345 Get beginning of day for any point in time.
1347 :param timestamp: time instant as system timestamp (in seconds)
1349 :returns: instant of day start as system timestamp
1350 '''
1352 tt = time.gmtime(int(timestamp))
1353 tts = tt[0:3] + (0, 0, 0)
1354 return to_time_float(calendar.timegm(tts))
1357def month_start(timestamp):
1358 '''
1359 Get beginning of month for any point in time.
1361 :param timestamp: time instant as system timestamp (in seconds)
1363 :returns: instant of month start as system timestamp
1364 '''
1366 tt = time.gmtime(int(timestamp))
1367 tts = tt[0:2] + (1, 0, 0, 0)
1368 return to_time_float(calendar.timegm(tts))
1371def year_start(timestamp):
1372 '''
1373 Get beginning of year for any point in time.
1375 :param timestamp: time instant as system timestamp (in seconds)
1377 :returns: instant of year start as system timestamp
1378 '''
1380 tt = time.gmtime(int(timestamp))
1381 tts = tt[0:1] + (1, 1, 0, 0, 0)
1382 return to_time_float(calendar.timegm(tts))
1385def iter_days(tmin, tmax):
1386 '''
1387 Yields begin and end of days until given time span is covered.
1389 :param tmin,tmax: input time span
1391 :yields: tuples with (begin, end) of days as system timestamps
1392 '''
1394 t = day_start(tmin)
1395 while t < tmax:
1396 tend = day_start(t + 26*60*60)
1397 yield t, tend
1398 t = tend
1401def iter_months(tmin, tmax):
1402 '''
1403 Yields begin and end of months until given time span is covered.
1405 :param tmin,tmax: input time span
1407 :yields: tuples with (begin, end) of months as system timestamps
1408 '''
1410 t = month_start(tmin)
1411 while t < tmax:
1412 tend = month_start(t + 24*60*60*33)
1413 yield t, tend
1414 t = tend
1417def iter_years(tmin, tmax):
1418 '''
1419 Yields begin and end of years until given time span is covered.
1421 :param tmin,tmax: input time span
1423 :yields: tuples with (begin, end) of years as system timestamps
1424 '''
1426 t = year_start(tmin)
1427 while t < tmax:
1428 tend = year_start(t + 24*60*60*369)
1429 yield t, tend
1430 t = tend
1433def today():
1434 return day_start(time.time())
1437def tomorrow():
1438 return day_start(time.time() + 24*60*60)
1441def decitab(n):
1442 '''
1443 Get integer decimation sequence for given downampling factor.
1445 :param n: target decimation factor
1447 :returns: tuple with downsampling sequence
1448 '''
1450 if n > GlobalVars.decitab_nmax:
1451 mk_decitab(n*2)
1452 if n not in GlobalVars.decitab:
1453 raise UnavailableDecimation('ratio = %g' % n)
1454 return GlobalVars.decitab[n]
1457def ctimegm(s, format='%Y-%m-%d %H:%M:%S'):
1458 '''
1459 Convert string representing UTC time to system time.
1461 :param s: string to be interpreted
1462 :param format: format string passed to :py:func:`time.strptime`
1464 :returns: system time stamp
1466 Interpretes string with format ``'%Y-%m-%d %H:%M:%S'``, using strptime.
1468 .. note::
1469 This function is to be replaced by :py:func:`str_to_time`.
1470 '''
1472 return calendar.timegm(time.strptime(s, format))
1475def gmctime(t, format='%Y-%m-%d %H:%M:%S'):
1476 '''
1477 Get string representation from system time, UTC.
1479 Produces string with format ``'%Y-%m-%d %H:%M:%S'``, using strftime.
1481 .. note::
1482 This function is to be repaced by :py:func:`time_to_str`.
1483 '''
1485 return time.strftime(format, time.gmtime(t))
1488def gmctime_v(t, format='%a, %d %b %Y %H:%M:%S'):
1489 '''
1490 Get string representation from system time, UTC. Same as
1491 :py:func:`gmctime` but with a more verbose default format.
1493 .. note::
1494 This function is to be replaced by :py:func:`time_to_str`.
1495 '''
1497 return time.strftime(format, time.gmtime(t))
1500def gmctime_fn(t, format='%Y-%m-%d_%H-%M-%S'):
1501 '''
1502 Get string representation from system time, UTC. Same as
1503 :py:func:`gmctime` but with a default usable in filenames.
1505 .. note::
1506 This function is to be replaced by :py:func:`time_to_str`.
1507 '''
1509 return time.strftime(format, time.gmtime(t))
1512class TimeStrError(Exception):
1513 '''
1514 Raised for invalid time strings.
1515 '''
1516 pass
1519class FractionalSecondsMissing(TimeStrError):
1520 '''
1521 Exception raised by :py:func:`str_to_time` when the given string lacks
1522 fractional seconds.
1523 '''
1525 pass
1528class FractionalSecondsWrongNumberOfDigits(TimeStrError):
1529 '''
1530 Exception raised by :py:func:`str_to_time` when the given string has an
1531 incorrect number of digits in the fractional seconds part.
1532 '''
1534 pass
1537def _endswith_n(s, endings):
1538 for ix, x in enumerate(endings):
1539 if s.endswith(x):
1540 return ix
1541 return -1
1544def str_to_time(s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'):
1545 '''
1546 Convert string representing UTC time to floating point system time.
1548 :param s: string representing UTC time
1549 :param format: time string format
1550 :returns: system time stamp as floating point value
1552 Uses the semantics of :py:func:`time.strptime` but allows for fractional
1553 seconds. If the format ends with ``'.FRAC'``, anything after a dot is
1554 interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``,
1555 the fractional part, including the dot is made optional. The latter has the
1556 consequence, that the time strings and the format may not contain any other
1557 dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is
1558 ensured, that exactly that number of digits are present in the fractional
1559 seconds.
1560 '''
1562 time_float = get_time_float()
1564 if util_ext is not None:
1565 try:
1566 t, tfrac = util_ext.stt(s, format)
1567 except util_ext.UtilExtError as e:
1568 raise TimeStrError(
1569 '%s, string=%s, format=%s' % (str(e), s, format))
1571 return time_float(t)+tfrac
1573 fracsec = 0.
1574 fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC'
1576 iend = _endswith_n(format, fixed_endings)
1577 if iend != -1:
1578 dotpos = s.rfind('.')
1579 if dotpos == -1:
1580 raise FractionalSecondsMissing(
1581 'string=%s, format=%s' % (s, format))
1583 if iend > 0 and iend != (len(s)-dotpos-1):
1584 raise FractionalSecondsWrongNumberOfDigits(
1585 'string=%s, format=%s' % (s, format))
1587 format = format[:-len(fixed_endings[iend])]
1588 fracsec = float(s[dotpos:])
1589 s = s[:dotpos]
1591 elif format.endswith('.OPTFRAC'):
1592 dotpos = s.rfind('.')
1593 format = format[:-8]
1594 if dotpos != -1 and len(s[dotpos:]) > 1:
1595 fracsec = float(s[dotpos:])
1597 if dotpos != -1:
1598 s = s[:dotpos]
1600 try:
1601 return time_float(calendar.timegm(time.strptime(s, format))) \
1602 + fracsec
1603 except ValueError as e:
1604 raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format))
1607stt = str_to_time
1610def str_to_time_fillup(s):
1611 '''
1612 Default :py:func:`str_to_time` with filling in of missing values.
1614 Allows e.g. `'2010-01-01 00:00:00'` as `'2010-01-01 00:00'`,
1615 `'2010-01-01 00'`, ..., or `'2010'`.
1616 '''
1618 if s == 'now':
1619 return time.time()
1620 elif s == 'today':
1621 return today()
1622 elif s == 'tomorrow':
1623 return tomorrow()
1625 if len(s) in (4, 7, 10, 13, 16):
1626 s += '0000-01-01 00:00:00'[len(s):]
1628 return str_to_time(s)
1631def time_to_str(t, format='%Y-%m-%d %H:%M:%S.3FRAC'):
1632 '''
1633 Get string representation for floating point system time.
1635 :param t: floating point system time
1636 :param format: time string format
1637 :returns: string representing UTC time
1639 Uses the semantics of :py:func:`time.strftime` but additionally allows for
1640 fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a
1641 digit between 1 and 9, this is replaced with the fractional part of ``t``
1642 with ``x`` digits precision.
1643 '''
1645 if pyrocko.grumpy > 0:
1646 check_time_class(t, 'warn' if pyrocko.grumpy == 1 else 'raise')
1648 if isinstance(format, int):
1649 if format > 0:
1650 format = '%Y-%m-%d %H:%M:%S.' + '%iFRAC' % format
1651 else:
1652 format = '%Y-%m-%d %H:%M:%S'
1654 if util_ext is not None:
1655 t0 = num.floor(t)
1656 try:
1657 return util_ext.tts(int(t0), float(t - t0), format)
1658 except util_ext.UtilExtError as e:
1659 raise TimeStrError(
1660 '%s, timestamp=%f, format=%s' % (str(e), t, format))
1662 if not GlobalVars.re_frac:
1663 GlobalVars.re_frac = re.compile(r'\.[1-9]FRAC')
1664 GlobalVars.frac_formats = dict(
1665 [('.%sFRAC' % x, '%.'+x+'f') for x in '123456789'])
1667 ts = float(num.floor(t))
1668 tfrac = t-ts
1670 m = GlobalVars.re_frac.search(format)
1671 if m:
1672 sfrac = (GlobalVars.frac_formats[m.group(0)] % tfrac)
1673 if sfrac[0] == '1':
1674 ts += 1.
1676 format, nsub = GlobalVars.re_frac.subn(sfrac[1:], format, 1)
1678 return time.strftime(format, time.gmtime(ts))
1681tts = time_to_str
1682_abbr_weekday = 'Mon Tue Wed Thu Fri Sat Sun'.split()
1683_abbr_month = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
1686def mystrftime(fmt=None, tt=None, milliseconds=0):
1687 # Needed by Snuffler for the time axis. In other cases `time_to_str`
1688 # should be used.
1690 if fmt is None:
1691 fmt = '%Y-%m-%d %H:%M:%S .%r'
1693 # Get these two locale independent, needed by Snuffler.
1694 # Setting the locale seems to have no effect.
1695 fmt = fmt.replace('%a', _abbr_weekday[tt.tm_wday])
1696 fmt = fmt.replace('%b', _abbr_month[tt.tm_mon-1])
1698 fmt = fmt.replace('%r', '%03i' % int(round(milliseconds)))
1699 fmt = fmt.replace('%u', '%06i' % int(round(milliseconds*1000)))
1700 fmt = fmt.replace('%n', '%09i' % int(round(milliseconds*1000000)))
1701 return time.strftime(fmt, tt)
1704def gmtime_x(timestamp):
1705 etimestamp = float(num.floor(timestamp))
1706 tt = time.gmtime(etimestamp)
1707 ms = (timestamp-etimestamp)*1000
1708 return tt, ms
1711def plural_s(n):
1712 if not isinstance(n, int):
1713 n = len(n)
1715 return 's' if n != 1 else ''
1718def ensuredirs(dst):
1719 '''
1720 Create all intermediate path components for a target path.
1722 :param dst: target path
1724 The leaf part of the target path is not created (use :py:func:`ensuredir`
1725 if a the target path is a directory to be created).
1726 '''
1728 d, x = os.path.split(dst.rstrip(os.sep))
1729 dirs = []
1730 while d and not os.path.exists(d):
1731 dirs.append(d)
1732 d, x = os.path.split(d)
1734 dirs.reverse()
1736 for d in dirs:
1737 try:
1738 os.mkdir(d)
1739 except OSError as e:
1740 if not e.errno == errno.EEXIST:
1741 raise
1744def ensuredir(dst):
1745 '''
1746 Create directory and all intermediate path components to it as needed.
1748 :param dst: directory name
1750 Nothing is done if the given target already exists.
1751 '''
1753 if os.path.exists(dst):
1754 return
1756 dst.rstrip(os.sep)
1758 ensuredirs(dst)
1759 try:
1760 os.mkdir(dst)
1761 except OSError as e:
1762 if not e.errno == errno.EEXIST:
1763 raise
1766def reuse(x):
1767 '''
1768 Get unique instance of an object.
1770 :param x: hashable object
1771 :returns: reference to x or an equivalent object
1773 Cache object ``x`` in a global dict for reuse, or if x already
1774 is in that dict, return a reference to it.
1775 '''
1777 grs = GlobalVars.reuse_store
1778 if x not in grs:
1779 grs[x] = x
1780 return grs[x]
1783def deuse(x):
1784 grs = GlobalVars.reuse_store
1785 if x in grs:
1786 del grs[x]
1789class Anon(object):
1790 '''
1791 Dict-to-object utility.
1793 Any given arguments are stored as attributes.
1795 Example::
1797 a = Anon(x=1, y=2)
1798 print a.x, a.y
1799 '''
1801 def __init__(self, **dict):
1802 for k in dict:
1803 self.__dict__[k] = dict[k]
1806def iter_select_files(
1807 paths,
1808 include=None,
1809 exclude=None,
1810 selector=None,
1811 show_progress=True,
1812 pass_through=None):
1814 '''
1815 Recursively select files (generator variant).
1817 See :py:func:`select_files`.
1818 '''
1820 if show_progress:
1821 progress_beg('selecting files...')
1823 ngood = 0
1824 check_include = None
1825 if include is not None:
1826 rinclude = re.compile(include)
1828 def check_include(path):
1829 m = rinclude.search(path)
1830 if not m:
1831 return False
1833 if selector is None:
1834 return True
1836 infos = Anon(**m.groupdict())
1837 return selector(infos)
1839 check_exclude = None
1840 if exclude is not None:
1841 rexclude = re.compile(exclude)
1843 def check_exclude(path):
1844 return not bool(rexclude.search(path))
1846 if check_include and check_exclude:
1848 def check(path):
1849 return check_include(path) and check_exclude(path)
1851 elif check_include:
1852 check = check_include
1854 elif check_exclude:
1855 check = check_exclude
1857 else:
1858 check = None
1860 if isinstance(paths, str):
1861 paths = [paths]
1863 for path in paths:
1864 if pass_through and pass_through(path):
1865 if check is None or check(path):
1866 yield path
1868 elif os.path.isdir(path):
1869 for (dirpath, dirnames, filenames) in os.walk(path):
1870 dirnames.sort()
1871 filenames.sort()
1872 for filename in filenames:
1873 path = op.join(dirpath, filename)
1874 if check is None or check(path):
1875 yield os.path.abspath(path)
1876 ngood += 1
1877 else:
1878 if check is None or check(path):
1879 yield os.path.abspath(path)
1880 ngood += 1
1882 if show_progress:
1883 progress_end('%i file%s selected.' % (ngood, plural_s(ngood)))
1886def select_files(
1887 paths, include=None, exclude=None, selector=None, show_progress=True,
1888 regex=None):
1890 '''
1891 Recursively select files.
1893 :param paths: entry path names
1894 :param include: pattern for conditional inclusion
1895 :param exclude: pattern for conditional exclusion
1896 :param selector: callback for conditional inclusion
1897 :param show_progress: if True, indicate start and stop of processing
1898 :param regex: alias for ``include`` (backwards compatibility)
1899 :returns: list of path names
1901 Recursively finds all files under given entry points ``paths``. If
1902 parameter ``include`` is a regular expression, only files with matching
1903 path names are included. If additionally parameter ``selector`` is given a
1904 callback function, only files for which the callback returns ``True`` are
1905 included. The callback should take a single argument. The callback is
1906 called with a single argument, an object, having as attributes, any named
1907 groups given in ``include``.
1909 Examples
1911 To find all files ending in ``'.mseed'`` or ``'.msd'``::
1913 select_files(paths,
1914 include=r'\\.(mseed|msd)$')
1916 To find all files ending with ``'$Year.$DayOfYear'``, having set 2009 for
1917 the year::
1919 select_files(paths,
1920 include=r'(?P<year>\\d\\d\\d\\d)\\.(?P<doy>\\d\\d\\d)$',
1921 selector=(lambda x: int(x.year) == 2009))
1922 '''
1923 if regex is not None:
1924 assert include is None
1925 include = regex
1927 return list(iter_select_files(
1928 paths, include, exclude, selector, show_progress))
1931def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
1932 '''
1933 Convert positive integer to a base36 string.
1934 '''
1936 if not isinstance(number, (int, long)):
1937 raise TypeError('number must be an integer')
1938 if number < 0:
1939 raise ValueError('number must be positive')
1941 # Special case for small numbers
1942 if number < 36:
1943 return alphabet[number]
1945 base36 = ''
1946 while number != 0:
1947 number, i = divmod(number, 36)
1948 base36 = alphabet[i] + base36
1950 return base36
1953def base36decode(number):
1954 '''
1955 Decode base36 endcoded positive integer.
1956 '''
1958 return int(number, 36)
1961class UnpackError(Exception):
1962 '''
1963 Exception raised when :py:func:`unpack_fixed` encounters an error.
1964 '''
1966 pass
1969ruler = ''.join(['%-10i' % i for i in range(8)]) \
1970 + '\n' + '0123456789' * 8 + '\n'
1973def unpack_fixed(format, line, *callargs):
1974 '''
1975 Unpack fixed format string, as produced by many fortran codes.
1977 :param format: format specification
1978 :param line: string to be processed
1979 :param callargs: callbacks for callback fields in the format
1981 The format is described by a string of comma-separated fields. Each field
1982 is defined by a character for the field type followed by the field width. A
1983 questionmark may be appended to the field description to allow the argument
1984 to be optional (The data string is then allowed to be filled with blanks
1985 and ``None`` is returned in this case).
1987 The following field types are available:
1989 ==== ================================================================
1990 Type Description
1991 ==== ================================================================
1992 A string (full field width is extracted)
1993 a string (whitespace at the beginning and the end is removed)
1994 i integer value
1995 f floating point value
1996 @ special type, a callback must be given for the conversion
1997 x special field type to skip parts of the string
1998 ==== ================================================================
1999 '''
2001 ipos = 0
2002 values = []
2003 icall = 0
2004 for form in format.split(','):
2005 form = form.strip()
2006 optional = form[-1] == '?'
2007 form = form.rstrip('?')
2008 typ = form[0]
2009 ln = int(form[1:])
2010 s = line[ipos:ipos+ln]
2011 cast = {
2012 'x': None,
2013 'A': str,
2014 'a': lambda x: x.strip(),
2015 'i': int,
2016 'f': float,
2017 '@': 'extra'}[typ]
2019 if cast == 'extra':
2020 cast = callargs[icall]
2021 icall += 1
2023 if cast is not None:
2024 if optional and s.strip() == '':
2025 values.append(None)
2026 else:
2027 try:
2028 values.append(cast(s))
2029 except Exception:
2030 mark = [' '] * 80
2031 mark[ipos:ipos+ln] = ['^'] * ln
2032 mark = ''.join(mark)
2033 raise UnpackError(
2034 'Invalid cast to type "%s" at position [%i:%i] of '
2035 'line: \n%s%s\n%s' % (
2036 typ, ipos, ipos+ln, ruler, line.rstrip(), mark))
2038 ipos += ln
2040 return values
2043_pattern_cache = {}
2046def _nslc_pattern(pattern):
2047 if pattern not in _pattern_cache:
2048 rpattern = re.compile(fnmatch.translate(pattern), re.I)
2049 _pattern_cache[pattern] = rpattern
2050 else:
2051 rpattern = _pattern_cache[pattern]
2053 return rpattern
2056def match_nslc(patterns, nslc):
2057 '''
2058 Match network-station-location-channel code against pattern or list of
2059 patterns.
2061 :param patterns: pattern or list of patterns
2062 :param nslc: tuple with (network, station, location, channel) as strings
2064 :returns: ``True`` if the pattern matches or if any of the given patterns
2065 match; or ``False``.
2067 The patterns may contain shell-style wildcards: \\*, ?, [seq], [!seq].
2069 Example::
2071 match_nslc('*.HAM3.*.BH?', ('GR', 'HAM3', '', 'BHZ')) # -> True
2072 '''
2074 if isinstance(patterns, str):
2075 patterns = [patterns]
2077 if not isinstance(nslc, str):
2078 s = '.'.join(nslc)
2079 else:
2080 s = nslc
2082 for pattern in patterns:
2083 if _nslc_pattern(pattern).match(s):
2084 return True
2086 return False
2089def match_nslcs(patterns, nslcs):
2090 '''
2091 Get network-station-location-channel codes that match given pattern or any
2092 of several given patterns.
2094 :param patterns: pattern or list of patterns
2095 :param nslcs: list of (network, station, location, channel) tuples
2097 See also :py:func:`match_nslc`
2098 '''
2100 matching = []
2101 for nslc in nslcs:
2102 if match_nslc(patterns, nslc):
2103 matching.append(nslc)
2105 return matching
2108class Timeout(Exception):
2109 pass
2112def create_lockfile(fn, timeout=None, timewarn=10.):
2113 t0 = time.time()
2115 while True:
2116 try:
2117 f = os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
2118 os.close(f)
2119 return
2121 except OSError as e:
2122 if e.errno in (errno.EEXIST, 13): # 13 occurs on windows
2123 pass # retry
2124 else:
2125 raise
2127 tnow = time.time()
2129 if timeout is not None and tnow - t0 > timeout:
2130 raise Timeout(
2131 'Timeout (%gs) occured while waiting to get exclusive '
2132 'access to: %s' % (timeout, fn))
2134 if timewarn is not None and tnow - t0 > timewarn:
2135 logger.warning(
2136 'Waiting since %gs to get exclusive access to: %s' % (
2137 timewarn, fn))
2139 timewarn *= 2
2141 time.sleep(0.01)
2144def delete_lockfile(fn):
2145 os.unlink(fn)
2148class Lockfile(Exception):
2150 def __init__(self, path, timeout=5, timewarn=10.):
2151 self._path = path
2152 self._timeout = timeout
2153 self._timewarn = timewarn
2155 def __enter__(self):
2156 create_lockfile(
2157 self._path, timeout=self._timeout, timewarn=self._timewarn)
2158 return None
2160 def __exit__(self, type, value, traceback):
2161 delete_lockfile(self._path)
2164class SoleError(Exception):
2165 '''
2166 Exception raised by objects of type :py:class:`Sole`, when an concurrent
2167 instance is running.
2168 '''
2170 pass
2173class Sole(object):
2174 '''
2175 Use POSIX advisory file locking to ensure that only a single instance of a
2176 program is running.
2178 :param pid_path: path to lockfile to be used
2180 Usage::
2182 from pyrocko.util import Sole, SoleError, setup_logging
2183 import os
2185 setup_logging('my_program')
2187 pid_path = os.path.join(os.environ['HOME'], '.my_program_lock')
2188 try:
2189 sole = Sole(pid_path)
2191 except SoleError, e:
2192 logger.fatal( str(e) )
2193 sys.exit(1)
2194 '''
2196 def __init__(self, pid_path):
2197 self._pid_path = pid_path
2198 self._other_running = False
2199 ensuredirs(self._pid_path)
2200 self._lockfile = None
2201 self._os = os
2203 if not fcntl:
2204 raise SoleError(
2205 'Python standard library module "fcntl" not available on '
2206 'this platform.')
2208 self._fcntl = fcntl
2210 try:
2211 self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY)
2212 except Exception:
2213 raise SoleError(
2214 'Cannot open lockfile (path = %s)' % self._pid_path)
2216 try:
2217 fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
2219 except IOError:
2220 self._other_running = True
2221 try:
2222 f = open(self._pid_path, 'r')
2223 pid = f.read().strip()
2224 f.close()
2225 except Exception:
2226 pid = '?'
2228 raise SoleError('Other instance is running (pid = %s)' % pid)
2230 try:
2231 os.ftruncate(self._lockfile, 0)
2232 os.write(self._lockfile, '%i\n' % os.getpid())
2233 os.fsync(self._lockfile)
2235 except Exception:
2236 # the pid is only stored for user information, so this is allowed
2237 # to fail
2238 pass
2240 def __del__(self):
2241 if not self._other_running:
2242 if self._lockfile is not None:
2243 self._fcntl.lockf(self._lockfile, self._fcntl.LOCK_UN)
2244 self._os.close(self._lockfile)
2245 try:
2246 self._os.unlink(self._pid_path)
2247 except Exception:
2248 pass
2251re_escapequotes = re.compile(r"(['\\])")
2254def escapequotes(s):
2255 return re_escapequotes.sub(r'\\\1', s)
2258class TableWriter(object):
2259 '''
2260 Write table of space separated values to a file.
2262 :param f: file like object
2264 Strings containing spaces are quoted on output.
2265 '''
2267 def __init__(self, f):
2268 self._f = f
2270 def writerow(self, row, minfieldwidths=None):
2272 '''
2273 Write one row of values to underlying file.
2275 :param row: iterable of values
2276 :param minfieldwidths: minimum field widths for the values
2278 Each value in in ``row`` is converted to a string and optionally padded
2279 with blanks. The resulting strings are output separated with blanks. If
2280 any values given are strings and if they contain whitespace, they are
2281 quoted with single quotes, and any internal single quotes are
2282 backslash-escaped.
2283 '''
2285 out = []
2287 for i, x in enumerate(row):
2288 w = 0
2289 if minfieldwidths and i < len(minfieldwidths):
2290 w = minfieldwidths[i]
2292 if isinstance(x, str):
2293 if re.search(r"\s|'", x):
2294 x = "'%s'" % escapequotes(x)
2296 x = x.ljust(w)
2297 else:
2298 x = str(x).rjust(w)
2300 out.append(x)
2302 self._f.write(' '.join(out).rstrip() + '\n')
2305class TableReader(object):
2307 '''
2308 Read table of space separated values from a file.
2310 :param f: file-like object
2312 This uses Pythons shlex module to tokenize lines. Should deal correctly
2313 with quoted strings.
2314 '''
2316 def __init__(self, f):
2317 self._f = f
2318 self.eof = False
2320 def readrow(self):
2321 '''
2322 Read one row from the underlying file, tokenize it with shlex.
2324 :returns: tokenized line as a list of strings.
2325 '''
2327 line = self._f.readline()
2328 if not line:
2329 self.eof = True
2330 return []
2331 line.strip()
2332 if line.startswith('#'):
2333 return []
2335 return qsplit(line)
2338def gform(number, significant_digits=3):
2339 '''
2340 Pretty print floating point numbers.
2342 Align floating point numbers at the decimal dot.
2344 ::
2346 | -d.dde+xxx|
2347 | -d.dde+xx |
2348 |-ddd. |
2349 | -dd.d |
2350 | -d.dd |
2351 | -0.ddd |
2352 | -0.0ddd |
2353 | -0.00ddd |
2354 | -d.dde-xx |
2355 | -d.dde-xxx|
2356 | nan|
2359 The formatted string has length ``significant_digits * 2 + 6``.
2360 '''
2362 no_exp_range = (pow(10., -1),
2363 pow(10., significant_digits))
2364 width = significant_digits+significant_digits-1+1+1+5
2366 if (no_exp_range[0] <= abs(number) < no_exp_range[1]) or number == 0.:
2367 s = ('%#.*g' % (significant_digits, number)).rstrip('0')
2368 else:
2369 s = '%.*E' % (significant_digits-1, number)
2370 s = (' '*(-s.find('.')+(significant_digits+1))+s).ljust(width)
2371 if s.strip().lower() == 'nan':
2372 s = 'nan'.rjust(width)
2373 return s
2376def human_bytesize(value):
2378 exts = 'Bytes kB MB GB TB PB EB ZB YB'.split()
2380 if value == 1:
2381 return '1 Byte'
2383 for i, ext in enumerate(exts):
2384 x = float(value) / 1000**i
2385 if round(x) < 10. and not value < 1000:
2386 return '%.1f %s' % (x, ext)
2387 if round(x) < 1000.:
2388 return '%.0f %s' % (x, ext)
2390 return '%i Bytes' % value
2393re_compatibility = re.compile(
2394 r'!pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.' +
2395 r'(dummy|poel|qseis|qssp))\.'
2396)
2399def pf_is_old(fn):
2400 oldstyle = False
2401 with open(fn, 'r') as f:
2402 for line in f:
2403 if re_compatibility.search(line):
2404 oldstyle = True
2406 return oldstyle
2409def pf_upgrade(fn):
2410 need = pf_is_old(fn)
2411 if need:
2412 fn_temp = fn + '.temp'
2414 with open(fn, 'r') as fin:
2415 with open(fn_temp, 'w') as fout:
2416 for line in fin:
2417 line = re_compatibility.sub('!pf.', line)
2418 fout.write(line)
2420 os.rename(fn_temp, fn)
2422 return need
2425def read_leap_seconds(tzfile='/usr/share/zoneinfo/right/UTC'):
2426 '''
2427 Extract leap second information from tzdata.
2429 Based on example at http://stackoverflow.com/questions/19332902/\
2430 extract-historic-leap-seconds-from-tzdata
2432 See also 'man 5 tzfile'.
2433 '''
2435 from struct import unpack, calcsize
2436 out = []
2437 with open(tzfile, 'rb') as f:
2438 # read header
2439 fmt = '>4s c 15x 6l'
2440 (magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
2441 typecnt, charcnt) = unpack(fmt, f.read(calcsize(fmt)))
2442 assert magic == 'TZif'.encode('US-ASCII'), 'Not a timezone file'
2444 # skip over some uninteresting data
2445 fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
2446 timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
2447 f.read(calcsize(fmt))
2449 # read leap-seconds
2450 fmt = '>2l'
2451 for i in range(leapcnt):
2452 tleap, nleap = unpack(fmt, f.read(calcsize(fmt)))
2453 out.append((tleap-nleap+1, nleap))
2455 return out
2458class LeapSecondsError(Exception):
2459 pass
2462class LeapSecondsOutdated(LeapSecondsError):
2463 pass
2466class InvalidLeapSecondsFile(LeapSecondsOutdated):
2467 pass
2470def parse_leap_seconds_list(fn):
2471 data = []
2472 texpires = None
2473 try:
2474 t0 = int(round(str_to_time('1900-01-01 00:00:00')))
2475 except TimeStrError:
2476 t0 = int(round(str_to_time('1970-01-01 00:00:00'))) - 2208988800
2478 tnow = int(round(time.time()))
2480 if not op.exists(fn):
2481 raise LeapSecondsOutdated('no leap seconds file found')
2483 try:
2484 with open(fn, 'rb') as f:
2485 for line in f:
2486 if line.strip().startswith(b'<!DOCTYPE'):
2487 raise InvalidLeapSecondsFile('invalid leap seconds file')
2489 if line.startswith(b'#@'):
2490 texpires = int(line.split()[1]) + t0
2491 elif line.startswith(b'#') or len(line) < 5:
2492 pass
2493 else:
2494 toks = line.split()
2495 t = int(toks[0]) + t0
2496 nleap = int(toks[1]) - 10
2497 data.append((t, nleap))
2499 except IOError:
2500 raise LeapSecondsError('cannot read leap seconds file %s' % fn)
2502 if texpires is None or tnow > texpires:
2503 raise LeapSecondsOutdated('leap seconds list is outdated')
2505 return data
2508def read_leap_seconds2():
2509 from pyrocko import config
2510 conf = config.config()
2511 fn = conf.leapseconds_path
2512 url = conf.leapseconds_url
2513 # check for outdated default URL
2514 if url == 'http://www.ietf.org/timezones/data/leap-seconds.list':
2515 url = 'https://www.ietf.org/timezones/data/leap-seconds.list'
2516 logger.info(
2517 'Leap seconds default URL is now: %s\nUsing new default.' % url)
2519 if url == 'https://www.ietf.org/timezones/data/leap-seconds.list':
2520 url = 'https://hpiers.obspm.fr/iers/bul/bulc/ntp/leap-seconds.list'
2521 logger.info(
2522 'Leap seconds default URL is now: %s\nUsing new default.' % url)
2524 for i in range(3):
2525 try:
2526 return parse_leap_seconds_list(fn)
2528 except LeapSecondsOutdated:
2529 try:
2530 logger.info('updating leap seconds list...')
2531 download_file(url, fn)
2533 except Exception as e:
2534 raise LeapSecondsError(
2535 'cannot download leap seconds list from %s to %s (%s)'
2536 % (url, fn, e))
2538 raise LeapSecondsError('Could not retrieve/read leap seconds file.')
2541def gps_utc_offset(t_utc):
2542 '''
2543 Time offset t_gps - t_utc for a given t_utc.
2544 '''
2545 ls = read_leap_seconds2()
2546 i = 0
2547 if t_utc < ls[0][0]:
2548 return ls[0][1] - 1 - 9
2550 while i < len(ls) - 1:
2551 if ls[i][0] <= t_utc and t_utc < ls[i+1][0]:
2552 return ls[i][1] - 9
2553 i += 1
2555 return ls[-1][1] - 9
2558def utc_gps_offset(t_gps):
2559 '''
2560 Time offset t_utc - t_gps for a given t_gps.
2561 '''
2562 ls = read_leap_seconds2()
2564 if t_gps < ls[0][0] + ls[0][1] - 9:
2565 return - (ls[0][1] - 1 - 9)
2567 i = 0
2568 while i < len(ls) - 1:
2569 if ls[i][0] + ls[i][1] - 9 <= t_gps \
2570 and t_gps < ls[i+1][0] + ls[i+1][1] - 9:
2571 return - (ls[i][1] - 9)
2572 i += 1
2574 return - (ls[-1][1] - 9)
2577def make_iload_family(iload_fh, doc_fmt='FMT', doc_yielded_objects='FMT'):
2578 import itertools
2579 import glob
2580 from pyrocko.io.io_common import FileLoadError
2582 def iload_filename(filename, **kwargs):
2583 try:
2584 with open(filename, 'rb') as f:
2585 for cr in iload_fh(f, **kwargs):
2586 yield cr
2588 except FileLoadError as e:
2589 e.set_context('filename', filename)
2590 raise
2592 def iload_dirname(dirname, **kwargs):
2593 for entry in os.listdir(dirname):
2594 fpath = op.join(dirname, entry)
2595 if op.isfile(fpath):
2596 for cr in iload_filename(fpath, **kwargs):
2597 yield cr
2599 def iload_glob(pattern, **kwargs):
2601 for fn in glob.iglob(pattern):
2602 for cr in iload_filename(fn, **kwargs):
2603 yield cr
2605 def iload(source, **kwargs):
2606 if isinstance(source, str):
2607 if op.isdir(source):
2608 return iload_dirname(source, **kwargs)
2609 elif op.isfile(source):
2610 return iload_filename(source, **kwargs)
2611 else:
2612 return iload_glob(source, **kwargs)
2614 elif hasattr(source, 'read'):
2615 return iload_fh(source, **kwargs)
2616 else:
2617 return itertools.chain.from_iterable(
2618 iload(subsource, **kwargs) for subsource in source)
2620 iload_filename.__doc__ = '''
2621 Read %s information from named file.
2622 ''' % doc_fmt
2624 iload_dirname.__doc__ = '''
2625 Read %s information from directory of %s files.
2626 ''' % (doc_fmt, doc_fmt)
2628 iload_glob.__doc__ = '''
2629 Read %s information from files matching a glob pattern.
2630 ''' % doc_fmt
2632 iload.__doc__ = '''
2633 Load %s information from given source(s)
2635 The ``source`` can be specified as the name of a %s file, the name of a
2636 directory containing %s files, a glob pattern of %s files, an open
2637 filehandle or an iterator yielding any of the forementioned sources.
2639 This function behaves as a generator yielding %s objects.
2640 ''' % (doc_fmt, doc_fmt, doc_fmt, doc_fmt, doc_yielded_objects)
2642 for f in iload_filename, iload_dirname, iload_glob, iload:
2643 f.__module__ = iload_fh.__module__
2645 return iload_filename, iload_dirname, iload_glob, iload
2648class Inconsistency(Exception):
2649 pass
2652def consistency_check(list_of_tuples, message='values differ:'):
2653 '''
2654 Check for inconsistencies.
2656 Given a list of tuples, check that all tuple elements except for first one
2657 match. E.g. ``[('STA.N', 55.3, 103.2), ('STA.E', 55.3, 103.2)]`` would be
2658 valid because the coordinates at the two channels are the same.
2659 '''
2661 if len(list_of_tuples) >= 2:
2662 if any(t[1:] != list_of_tuples[0][1:] for t in list_of_tuples[1:]):
2663 raise Inconsistency('%s\n' % message + '\n'.join(
2664 ' %s: %s' % (t[0], ', '.join(str(x) for x in t[1:]))
2665 for t in list_of_tuples))
2668class defaultzerodict(dict):
2669 def __missing__(self, k):
2670 return 0
2673def mostfrequent(x):
2674 c = defaultzerodict()
2675 for e in x:
2676 c[e] += 1
2678 return sorted(list(c.keys()), key=lambda k: c[k])[-1]
2681def consistency_merge(list_of_tuples,
2682 message='values differ:',
2683 error='raise',
2684 merge=mostfrequent):
2686 assert error in ('raise', 'warn', 'ignore')
2688 if len(list_of_tuples) == 0:
2689 raise Exception('cannot merge empty sequence')
2691 try:
2692 consistency_check(list_of_tuples, message)
2693 return list_of_tuples[0][1:]
2694 except Inconsistency as e:
2695 if error == 'raise':
2696 raise
2698 elif error == 'warn':
2699 logger.warning(str(e))
2701 return tuple([merge(x) for x in list(zip(*list_of_tuples))[1:]])
2704def short_to_list(nmax, it):
2705 import itertools
2707 if isinstance(it, list):
2708 return it
2710 li = []
2711 for i in range(nmax+1):
2712 try:
2713 li.append(next(it))
2714 except StopIteration:
2715 return li
2717 return itertools.chain(li, it)
2720def parse_md(f):
2721 try:
2722 with open(op.join(
2723 op.dirname(op.abspath(f)),
2724 'README.md'), 'r') as readme:
2725 mdstr = readme.read()
2726 except IOError as e:
2727 return 'Failed to get README.md: %s' % e
2729 # Remve the title
2730 mdstr = re.sub(r'^# .*\n?', '', mdstr)
2731 # Append sphinx reference to `pyrocko.` modules
2732 mdstr = re.sub(r'`pyrocko\.(.*)`', r':py:mod:`pyrocko.\1`', mdstr)
2733 # Convert Subsections to toc-less rubrics
2734 mdstr = re.sub(r'## (.*)\n', r'.. rubric:: \1\n', mdstr)
2735 return mdstr
2738def mpl_show(plt):
2739 import matplotlib
2740 if matplotlib.get_backend().lower() == 'agg':
2741 logger.warning('Cannot show() when using matplotlib "agg" backend')
2742 else:
2743 plt.show()
2746g_re_qsplit = re.compile(
2747 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|(\S+)')
2748g_re_qsplit_sep = {}
2751def get_re_qsplit(sep):
2752 if sep is None:
2753 return g_re_qsplit
2754 else:
2755 if sep not in g_re_qsplit_sep:
2756 assert len(sep) == 1
2757 assert sep not in '\'"'
2758 esep = re.escape(sep)
2759 g_re_qsplit_sep[sep] = re.compile(
2760 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|'
2761 + r'([^' + esep + r']+|(?<=' + esep + r')(?=' + esep + r')|^(?=' + esep + r')|(?<=' + esep + r')$)') # noqa
2762 return g_re_qsplit_sep[sep]
2765g_re_trivial = re.compile(r'\A[^\'"\s]+\Z')
2766g_re_trivial_sep = {}
2769def get_re_trivial(sep):
2770 if sep is None:
2771 return g_re_trivial
2772 else:
2773 if sep not in g_re_qsplit_sep:
2774 assert len(sep) == 1
2775 assert sep not in '\'"'
2776 esep = re.escape(sep)
2777 g_re_trivial_sep[sep] = re.compile(r'\A[^\'"' + esep + r']+\Z')
2779 return g_re_trivial_sep[sep]
2782g_re_escape_s = re.compile(r'([\\\'])')
2783g_re_unescape_s = re.compile(r'\\([\\\'])')
2784g_re_escape_d = re.compile(r'([\\"])')
2785g_re_unescape_d = re.compile(r'\\([\\"])')
2788def escape_s(s):
2789 '''
2790 Backslash-escape single-quotes and backslashes.
2792 Example: ``Jack's`` => ``Jack\\'s``
2794 '''
2795 return g_re_escape_s.sub(r'\\\1', s)
2798def unescape_s(s):
2799 '''
2800 Unescape backslash-escaped single-quotes and backslashes.
2802 Example: ``Jack\\'s`` => ``Jack's``
2803 '''
2804 return g_re_unescape_s.sub(r'\1', s)
2807def escape_d(s):
2808 '''
2809 Backslash-escape double-quotes and backslashes.
2811 Example: ``"Hello \\O/"`` => ``\\"Hello \\\\O/\\"``
2812 '''
2813 return g_re_escape_d.sub(r'\\\1', s)
2816def unescape_d(s):
2817 '''
2818 Unescape backslash-escaped double-quotes and backslashes.
2820 Example: ``\\"Hello \\\\O/\\"`` => ``"Hello \\O/"``
2821 '''
2822 return g_re_unescape_d.sub(r'\1', s)
2825def qjoin_s(it, sep=None):
2826 '''
2827 Join sequence of strings into a line, single-quoting non-trivial strings.
2829 Example: ``["55", "Sparrow's Island"]`` => ``"55 'Sparrow\\\\'s Island'"``
2830 '''
2831 re_trivial = get_re_trivial(sep)
2833 if sep is None:
2834 sep = ' '
2836 return sep.join(
2837 w if re_trivial.search(w) else "'%s'" % escape_s(w) for w in it)
2840def qjoin_d(it, sep=None):
2841 '''
2842 Join sequence of strings into a line, double-quoting non-trivial strings.
2844 Example: ``['55', 'Pete "The Robot" Smith']`` =>
2845 ``'55' "Pete \\\\"The Robot\\\\" Smith"'``
2846 '''
2847 re_trivial = get_re_trivial(sep)
2848 if sep is None:
2849 sep = ' '
2851 return sep.join(
2852 w if re_trivial.search(w) else '"%s"' % escape_d(w) for w in it)
2855def qsplit(s, sep=None):
2856 '''
2857 Split line into list of strings, allowing for quoted strings.
2859 Example: ``"55 'Sparrow\\\\'s Island'"`` =>
2860 ``["55", "Sparrow's Island"]``,
2861 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` =>
2862 ``['55', 'Pete "The Robot" Smith']``
2863 '''
2864 re_qsplit = get_re_qsplit(sep)
2865 return [
2866 (unescape_d(x[0]) or unescape_s(x[1]) or x[2])
2867 for x in re_qsplit.findall(s)]
2870g_have_warned_threadpoolctl = False
2873class threadpool_limits_dummy(object):
2875 def __init__(self, *args, **kwargs):
2876 pass
2878 def __enter__(self):
2879 global g_have_warned_threadpoolctl
2881 if not g_have_warned_threadpoolctl:
2882 logger.warning(
2883 'Cannot control number of BLAS threads because '
2884 '`threadpoolctl` module is not available. You may want to '
2885 'install `threadpoolctl`.')
2887 g_have_warned_threadpoolctl = True
2889 return self
2891 def __exit__(self, type, value, traceback):
2892 pass
2895def get_threadpool_limits():
2896 '''
2897 Try to import threadpoolctl.threadpool_limits, provide dummy if not avail.
2898 '''
2900 try:
2901 from threadpoolctl import threadpool_limits
2902 return threadpool_limits
2904 except ImportError:
2905 return threadpool_limits_dummy
2908def fmt_summary(entries, widths):
2909 return ' | '.join(
2910 entry.ljust(width) for (entry, width) in zip(entries, widths))
2913def smart_weakref(obj, callback=None):
2914 if inspect.ismethod(obj):
2915 return weakref.WeakMethod(obj, callback)
2916 else:
2917 return weakref.ref(obj, callback)