1# http://pyrocko.org - GPLv3
3#
4# The Pyrocko Developers, 21st Century
5# ---|P------/S----------~Lg----------
6'''
7Utility functions for Pyrocko.
9.. _time-handling-mode:
11High precision time handling mode
12.................................
14Pyrocko can treat timestamps either as standard double precision (64 bit)
15floating point values, or as high precision float (:py:class:`numpy.float128`
16or :py:class:`numpy.float96`, whichever is available), aliased here as
17:py:class:`pyrocko.util.hpfloat`. High precision time stamps are required when
18handling data with sub-millisecond precision, i.e. kHz/MHz data streams and
19event catalogs derived from such data.
21Not all functions in Pyrocko and in programs depending on Pyrocko may work
22correctly with high precision times. Therefore, Pyrocko's high precision time
23handling mode has to be actively activated by user config, command line option
24or enforced within a certain script/program.
26The default high precision time handling mode can be configured globally with
27the user config variable
28:py:gattr:`~pyrocko.config.Config.use_high_precision_time`. Calling the
29function :py:func:`use_high_precision_time` overrides the default from the
30config file. This function may be called at startup of a program/script
31requiring a specific time handling mode.
33To create a valid time stamp for use in Pyrocko (e.g. in
34:py:class:`~pyrocko.model.Event` or :py:class:`~pyrocko.trace.Trace` objects),
35use:
37.. code-block :: python
39 import time
40 from pyrocko import util
42 # By default using mode selected in user config, override with:
43 # util.use_high_precision_time(True) # force high precision mode
44 # util.use_high_precision_time(False) # force low precision mode
46 t1 = util.str_to_time('2020-08-27 10:22:00')
47 t2 = util.str_to_time('2020-08-27 10:22:00.111222')
48 t3 = util.to_time_float(time.time())
50 # To get the appropriate float class, use:
52 time_float = util.get_time_float()
53 # -> float, numpy.float128 or numpy.float96
54 [isinstance(t, time_float) for t in [t1, t2, t3]]
55 # -> [True, True, True]
57 # Shortcut:
58 util.check_time_class(t1)
60Module content
61..............
62'''
64from __future__ import division, print_function
66import time
67import logging
68import os
69import sys
70import re
71import calendar
72import math
73import fnmatch
74try:
75 import fcntl
76except ImportError:
77 fcntl = None
78import optparse
79import os.path as op
80import errno
82import numpy as num
83from scipy import signal
84import pyrocko
85from pyrocko import dummy_progressbar
88try:
89 from urllib.parse import urlencode, quote, unquote # noqa
90 from urllib.request import (
91 Request, build_opener, HTTPDigestAuthHandler, urlopen as _urlopen) # noqa
92 from urllib.error import HTTPError, URLError # noqa
94except ImportError:
95 from urllib import urlencode, quote, unquote # noqa
96 from urllib2 import (Request, build_opener, HTTPDigestAuthHandler, # noqa
97 HTTPError, URLError, urlopen as _urlopen) # noqa
99try:
100 import certifi
101 import ssl
102 g_ssl_context = ssl.create_default_context(cafile=certifi.where())
103except ImportError:
104 g_ssl_context = None
107class URLErrorSSL(URLError):
109 def __init__(self, urlerror):
110 self.__dict__ = urlerror.__dict__.copy()
112 def __str__(self):
113 return (
114 'Requesting web resource failed and the problem could be '
115 'related to SSL. Python standard libraries on some older '
116 'systems (like Ubuntu 14.04) are known to have trouble '
117 'with some SSL setups of today\'s servers: %s'
118 % URLError.__str__(self))
121def urlopen_ssl_check(*args, **kwargs):
122 try:
123 return urlopen(*args, **kwargs)
124 except URLError as e:
125 if str(e).find('SSL') != -1:
126 raise URLErrorSSL(e)
127 else:
128 raise
131def urlopen(*args, **kwargs):
133 if 'context' not in kwargs and g_ssl_context is not None:
134 kwargs['context'] = g_ssl_context
136 return _urlopen(*args, **kwargs)
139try:
140 long
141except NameError:
142 long = int
145force_dummy_progressbar = False
148try:
149 from pyrocko import util_ext
150except ImportError:
151 util_ext = None
154logger = logging.getLogger('pyrocko.util')
156try:
157 import progressbar as progressbar_mod
158except ImportError:
159 from pyrocko import dummy_progressbar as progressbar_mod
162try:
163 num_full = num.full
164except AttributeError:
165 def num_full(shape, fill_value, dtype=None, order='C'):
166 a = num.empty(shape, dtype=dtype, order=order)
167 a.fill(fill_value)
168 return a
170try:
171 num_full_like = num.full_like
172except AttributeError:
173 def num_full_like(arr, fill_value, dtype=None, order='K', subok=True):
174 a = num.empty_like(arr, dtype=dtype, order=order, subok=subok)
175 a.fill(fill_value)
176 return a
179def progressbar_module():
180 return progressbar_mod
183g_setup_logging_args = 'pyrocko', 'warning'
186def setup_logging(programname='pyrocko', levelname='warning'):
187 '''
188 Initialize logging.
190 :param programname: program name to be written in log
191 :param levelname: string indicating the logging level ('debug', 'info',
192 'warning', 'error', 'critical')
194 This function is called at startup by most pyrocko programs to set up a
195 consistent logging format. This is simply a shortcut to a call to
196 :py:func:`logging.basicConfig()`.
197 '''
199 global g_setup_logging_args
200 g_setup_logging_args = (programname, levelname)
202 levels = {'debug': logging.DEBUG,
203 'info': logging.INFO,
204 'warning': logging.WARNING,
205 'error': logging.ERROR,
206 'critical': logging.CRITICAL}
208 logging.basicConfig(
209 level=levels[levelname],
210 format=programname+':%(name)-25s - %(levelname)-8s - %(message)s')
213def subprocess_setup_logging_args():
214 '''
215 Get arguments from previous call to setup_logging.
217 These can be sent down to a worker process so it can setup its logging
218 in the same way as the main process.
219 '''
220 return g_setup_logging_args
223def data_file(fn):
224 return os.path.join(os.path.split(__file__)[0], 'data', fn)
227class DownloadError(Exception):
228 pass
231class PathExists(DownloadError):
232 pass
235def _download(url, fpath, username=None, password=None,
236 force=False, method='download', stats=None,
237 status_callback=None, entries_wanted=None,
238 recursive=False, header=None):
240 import requests
241 from requests.auth import HTTPBasicAuth
242 from requests.exceptions import HTTPError as req_HTTPError
244 requests.adapters.DEFAULT_RETRIES = 5
245 urljoin = requests.compat.urljoin
247 session = requests.Session()
248 session.header = header
249 session.auth = None if username is None\
250 else HTTPBasicAuth(username, password)
252 status = {
253 'ntotal_files': 0,
254 'nread_files': 0,
255 'ntotal_bytes_all_files': 0,
256 'nread_bytes_all_files': 0,
257 'ntotal_bytes_current_file': 0,
258 'nread_bytes_current_file': 0,
259 'finished': False
260 }
262 try:
263 url_to_size = {}
265 if callable(status_callback):
266 status_callback(status)
268 if not recursive and url.endswith('/'):
269 raise DownloadError(
270 'URL: %s appears to be a directory'
271 ' but recurvise download is False' % url)
273 if recursive and not url.endswith('/'):
274 url += '/'
276 def parse_directory_tree(url, subdir=''):
277 r = session.get(urljoin(url, subdir))
278 r.raise_for_status()
280 entries = re.findall(r'href="([a-zA-Z0-9_.-]+/?)"', r.text)
282 files = sorted(set(subdir + fn for fn in entries
283 if not fn.endswith('/')))
285 if entries_wanted is not None:
286 files = [fn for fn in files
287 if (fn in wanted for wanted in entries_wanted)]
289 status['ntotal_files'] += len(files)
291 dirs = sorted(set(subdir + dn for dn in entries
292 if dn.endswith('/')
293 and dn not in ('./', '../')))
295 for dn in dirs:
296 files.extend(parse_directory_tree(
297 url, subdir=dn))
299 return files
301 def get_content_length(url):
302 if url not in url_to_size:
303 r = session.head(url, headers={'Accept-Encoding': ''})
305 content_length = r.headers.get('content-length', None)
306 if content_length is None:
307 logger.debug('Could not get HTTP header '
308 'Content-Length for %s' % url)
310 content_length = None
312 else:
313 content_length = int(content_length)
314 status['ntotal_bytes_all_files'] += content_length
315 if callable(status_callback):
316 status_callback(status)
318 url_to_size[url] = content_length
320 return url_to_size[url]
322 def download_file(url, fn):
323 logger.info('starting download of %s...' % url)
324 ensuredirs(fn)
326 fsize = get_content_length(url)
327 status['ntotal_bytes_current_file'] = fsize
328 status['nread_bytes_current_file'] = 0
329 if callable(status_callback):
330 status_callback(status)
332 r = session.get(url, stream=True, timeout=5)
333 r.raise_for_status()
335 frx = 0
336 fn_tmp = fn + '.%i.temp' % os.getpid()
337 with open(fn_tmp, 'wb') as f:
338 for d in r.iter_content(chunk_size=1024):
339 f.write(d)
340 frx += len(d)
342 status['nread_bytes_all_files'] += len(d)
343 status['nread_bytes_current_file'] += len(d)
344 if callable(status_callback):
345 status_callback(status)
347 os.rename(fn_tmp, fn)
349 if fsize is not None and frx != fsize:
350 logger.warning(
351 'HTTP header Content-Length: %i bytes does not match '
352 'download size %i bytes' % (fsize, frx))
354 logger.info('finished download of %s' % url)
356 status['nread_files'] += 1
357 if callable(status_callback):
358 status_callback(status)
360 if recursive:
361 if op.exists(fpath) and not force:
362 raise PathExists('path %s already exists' % fpath)
364 files = parse_directory_tree(url)
366 dsize = 0
367 for fn in files:
368 file_url = urljoin(url, fn)
369 dsize += get_content_length(file_url) or 0
371 if method == 'calcsize':
372 return dsize
374 else:
375 for fn in files:
376 file_url = urljoin(url, fn)
377 download_file(file_url, op.join(fpath, fn))
379 else:
380 status['ntotal_files'] += 1
381 if callable(status_callback):
382 status_callback(status)
384 fsize = get_content_length(url)
385 if method == 'calcsize':
386 return fsize
387 else:
388 download_file(url, fpath)
390 except req_HTTPError as e:
391 logging.warning("http error: %s" % e)
392 raise DownloadError('could not download file(s) from: %s' % url)
394 finally:
395 status['finished'] = True
396 if callable(status_callback):
397 status_callback(status)
398 session.close()
401def download_file(
402 url, fpath, username=None, password=None, status_callback=None,
403 **kwargs):
404 return _download(
405 url, fpath, username, password,
406 recursive=False,
407 status_callback=status_callback,
408 **kwargs)
411def download_dir(
412 url, fpath, username=None, password=None, status_callback=None,
413 **kwargs):
415 return _download(
416 url, fpath, username, password,
417 recursive=True,
418 status_callback=status_callback,
419 **kwargs)
422class HPFloatUnavailable(Exception):
423 pass
426class dummy_hpfloat(object):
427 def __init__(self, *args, **kwargs):
428 raise HPFloatUnavailable(
429 'NumPy lacks support for float128 or float96 data type on this '
430 'platform.')
433if hasattr(num, 'float128'):
434 hpfloat = num.float128
436elif hasattr(num, 'float96'):
437 hpfloat = num.float96
439else:
440 hpfloat = dummy_hpfloat
443g_time_float = None
444g_time_dtype = None
447class TimeFloatSettingError(Exception):
448 pass
451def use_high_precision_time(enabled):
452 '''
453 Globally force a specific time handling mode.
455 See :ref:`High precision time handling mode <time-handling-mode>`.
457 :param enabled: enable/disable use of high precision time type
458 :type enabled: bool
460 This function should be called before handling/reading any time data.
461 It can only be called once.
463 Special attention is required when using multiprocessing on a platform
464 which does not use fork under the hood. In such cases, the desired setting
465 must be set also in the subprocess.
466 '''
467 _setup_high_precision_time_mode(enabled_app=enabled)
470def _setup_high_precision_time_mode(enabled_app=False):
471 global g_time_float
472 global g_time_dtype
474 if not (g_time_float is None and g_time_dtype is None):
475 raise TimeFloatSettingError(
476 'Cannot set time handling mode: too late, it has already been '
477 'fixed by an earlier call.')
479 from pyrocko import config
481 conf = config.config()
482 enabled_config = conf.use_high_precision_time
484 enabled_env = os.environ.get('PYROCKO_USE_HIGH_PRECISION_TIME', None)
485 if enabled_env is not None:
486 try:
487 enabled_env = int(enabled_env) == 1
488 except ValueError:
489 raise TimeFloatSettingError(
490 'Environment variable PYROCKO_USE_HIGH_PRECISION_TIME '
491 'should be set to 0 or 1.')
493 enabled = enabled_config
494 mode_from = 'config variable `use_high_precision_time`'
495 notify = enabled
497 if enabled_env is not None:
498 if enabled_env != enabled:
499 notify = True
500 enabled = enabled_env
501 mode_from = 'environment variable `PYROCKO_USE_HIGH_PRECISION_TIME`'
503 if enabled_app is not None:
504 if enabled_app != enabled:
505 notify = True
506 enabled = enabled_app
507 mode_from = 'application override'
509 logger.debug('''
510Pyrocko high precision time mode selection (latter override earlier):
511 config: %s
512 env: %s
513 app: %s
514 -> enabled: %s'''.lstrip() % (
515 enabled_config, enabled_env, enabled_app, enabled))
517 if notify:
518 logger.info('Pyrocko high precision time mode %s by %s.' % (
519 'activated' if enabled else 'deactivated',
520 mode_from))
522 if enabled:
523 g_time_float = hpfloat
524 g_time_dtype = hpfloat
525 else:
526 g_time_float = float
527 g_time_dtype = num.float64
530def get_time_float():
531 '''
532 Get the effective float class for timestamps.
534 See :ref:`High precision time handling mode <time-handling-mode>`.
536 :returns: :py:class:`float` or :py:class:`hpfloat`, depending on the
537 current time handling mode
538 '''
539 global g_time_float
541 if g_time_float is None:
542 _setup_high_precision_time_mode()
544 return g_time_float
547def get_time_dtype():
548 '''
549 Get effective NumPy float class to handle timestamps.
551 See :ref:`High precision time handling mode <time-handling-mode>`.
552 '''
554 global g_time_dtype
556 if g_time_dtype is None:
557 _setup_high_precision_time_mode()
559 return g_time_dtype
562def to_time_float(t):
563 '''
564 Convert float to valid time stamp in the current time handling mode.
566 See :ref:`High precision time handling mode <time-handling-mode>`.
567 '''
568 return get_time_float()(t)
571class TimestampTypeError(ValueError):
572 pass
575def check_time_class(t, error='raise'):
576 '''
577 Type-check variable against current time handling mode.
579 See :ref:`High precision time handling mode <time-handling-mode>`.
580 '''
582 if t == 0.0:
583 return
585 if not isinstance(t, get_time_float()):
586 message = (
587 'Timestamp %g is of type %s but should be of type %s with '
588 'Pyrocko\'s currently selected time handling mode.\n\n'
589 'See https://pyrocko.org/docs/current/library/reference/util.html'
590 '#high-precision-time-handling-mode' % (
591 t, type(t), get_time_float()))
593 if error == 'raise':
594 raise TimestampTypeError(message)
595 elif error == 'warn':
596 logger.warning(message)
597 else:
598 assert False
601class Stopwatch(object):
602 '''
603 Simple stopwatch to measure elapsed wall clock time.
605 Usage::
607 s = Stopwatch()
608 time.sleep(1)
609 print s()
610 time.sleep(1)
611 print s()
612 '''
614 def __init__(self):
615 self.start = time.time()
617 def __call__(self):
618 return time.time() - self.start
621def wrap(text, line_length=80):
622 '''
623 Paragraph and list-aware wrapping of text.
624 '''
626 text = text.strip('\n')
627 at_lineend = re.compile(r' *\n')
628 at_para = re.compile(r'((^|(\n\s*)?\n)(\s+[*] )|\n\s*\n)')
630 paragraphs = at_para.split(text)[::5]
631 listindents = at_para.split(text)[4::5]
632 newlist = at_para.split(text)[3::5]
634 listindents[0:0] = [None]
635 listindents.append(True)
636 newlist.append(None)
638 det_indent = re.compile(r'^ *')
640 outlines = []
641 for ip, p in enumerate(paragraphs):
642 if not p:
643 continue
645 if listindents[ip] is None:
646 _indent = det_indent.findall(p)[0]
647 findent = _indent
648 else:
649 findent = listindents[ip]
650 _indent = ' ' * len(findent)
652 ll = line_length - len(_indent)
653 llf = ll
655 oldlines = [s.strip() for s in at_lineend.split(p.rstrip())]
656 p1 = ' '.join(oldlines)
657 possible = re.compile(r'(^.{1,%i}|.{1,%i})( |$)' % (llf, ll))
658 for imatch, match in enumerate(possible.finditer(p1)):
659 parout = match.group(1)
660 if imatch == 0:
661 outlines.append(findent + parout)
662 else:
663 outlines.append(_indent + parout)
665 if ip != len(paragraphs)-1 and (
666 listindents[ip] is None
667 or newlist[ip] is not None
668 or listindents[ip+1] is None):
670 outlines.append('')
672 return outlines
675def ewrap(lines, width=80, indent=''):
676 lines = list(lines)
677 if not lines:
678 return ''
679 fwidth = max(len(s) for s in lines)
680 nx = max(1, (80-len(indent)) // (fwidth+1))
681 i = 0
682 rows = []
683 while i < len(lines):
684 rows.append(indent + ' '.join(x.ljust(fwidth) for x in lines[i:i+nx]))
685 i += nx
687 return '\n'.join(rows)
690class BetterHelpFormatter(optparse.IndentedHelpFormatter):
692 def __init__(self, *args, **kwargs):
693 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
695 def format_option(self, option):
696 '''
697 From IndentedHelpFormatter but using a different wrap method.
698 '''
700 help_text_position = 4 + self.current_indent
701 help_text_width = self.width - help_text_position
703 opts = self.option_strings[option]
704 opts = "%*s%s" % (self.current_indent, "", opts)
705 if option.help:
706 help_text = self.expand_default(option)
708 if self.help_position + len(help_text) + 1 <= self.width:
709 lines = [
710 '',
711 '%-*s %s' % (self.help_position, opts, help_text),
712 '']
713 else:
714 lines = ['']
715 lines.append(opts)
716 lines.append('')
717 if option.help:
718 help_lines = wrap(help_text, help_text_width)
719 lines.extend(["%*s%s" % (help_text_position, "", line)
720 for line in help_lines])
721 lines.append('')
723 return "\n".join(lines)
725 def format_description(self, description):
726 if not description:
727 return ''
729 if self.current_indent == 0:
730 lines = []
731 else:
732 lines = ['']
734 lines.extend(wrap(description, self.width - self.current_indent))
735 if self.current_indent == 0:
736 lines.append('\n')
738 return '\n'.join(
739 ['%*s%s' % (self.current_indent, '', line) for line in lines])
742def progressbar(label, maxval):
743 progressbar_mod = progressbar_module()
744 if force_dummy_progressbar:
745 progressbar_mod = dummy_progressbar
747 widgets = [
748 label, ' ',
749 progressbar_mod.Bar(marker='-', left='[', right=']'), ' ',
750 progressbar_mod.Percentage(), ' ',
751 progressbar_mod.ETA()]
753 pbar = progressbar_mod.ProgressBar(widgets=widgets, maxval=maxval).start()
754 return pbar
757def progress_beg(label):
758 '''
759 Notify user that an operation has started.
761 :param label: name of the operation
763 To be used in conjuction with :py:func:`progress_end`.
764 '''
766 sys.stderr.write(label)
767 sys.stderr.flush()
770def progress_end(label=''):
771 '''
772 Notify user that an operation has ended.
774 :param label: name of the operation
776 To be used in conjuction with :py:func:`progress_beg`.
777 '''
779 sys.stderr.write(' done. %s\n' % label)
780 sys.stderr.flush()
783class ArangeError(Exception):
784 pass
787def arange2(start, stop, step, dtype=float, epsilon=1e-6, error='raise'):
788 '''
789 Return evenly spaced numbers over a specified interval.
791 Like :py:func:`numpy.arange` but returning floating point numbers by
792 default and with defined behaviour when stepsize is inconsistent with
793 interval bounds. It is considered inconsistent if the difference between
794 the closest multiple of ``step`` and ``stop`` is larger than ``epsilon *
795 step``. Inconsistencies are handled according to the ``error`` parameter.
796 If it is set to ``'raise'`` an exception of type :py:exc:`ArangeError` is
797 raised. If it is set to ``'round'``, ``'floor'``, or ``'ceil'``, ``stop``
798 is silently changed to the closest, the next smaller, or next larger
799 multiple of ``step``, respectively.
800 '''
802 assert error in ('raise', 'round', 'floor', 'ceil')
804 start = dtype(start)
805 stop = dtype(stop)
806 step = dtype(step)
808 rnd = {'floor': math.floor, 'ceil': math.ceil}.get(error, round)
810 n = int(rnd((stop - start) / step)) + 1
811 stop_check = start + (n-1) * step
813 if error == 'raise' and abs(stop_check - stop) > step * epsilon:
814 raise ArangeError(
815 'inconsistent range specification: start=%g, stop=%g, step=%g'
816 % (start, stop, step))
818 x = num.arange(n, dtype=dtype)
819 x *= step
820 x += start
821 return x
824def polylinefit(x, y, n_or_xnodes):
825 '''
826 Fit piece-wise linear function to data.
828 :param x,y: arrays with coordinates of data
829 :param n_or_xnodes: int, number of segments or x coordinates of polyline
831 :returns: `(xnodes, ynodes, rms_error)` arrays with coordinates of
832 polyline, root-mean-square error
833 '''
835 x = num.asarray(x)
836 y = num.asarray(y)
838 if isinstance(n_or_xnodes, int):
839 n = n_or_xnodes
840 xmin = x.min()
841 xmax = x.max()
842 xnodes = num.linspace(xmin, xmax, n+1)
843 else:
844 xnodes = num.asarray(n_or_xnodes)
845 n = xnodes.size - 1
847 assert len(x) == len(y)
848 assert n > 0
850 ndata = len(x)
851 a = num.zeros((ndata+(n-1), n*2))
852 for i in range(n):
853 xmin_block = xnodes[i]
854 xmax_block = xnodes[i+1]
855 if i == n-1: # don't loose last point
856 indices = num.where(
857 num.logical_and(xmin_block <= x, x <= xmax_block))[0]
858 else:
859 indices = num.where(
860 num.logical_and(xmin_block <= x, x < xmax_block))[0]
862 a[indices, i*2] = x[indices]
863 a[indices, i*2+1] = 1.0
865 w = float(ndata)*100.
866 if i < n-1:
867 a[ndata+i, i*2] = xmax_block*w
868 a[ndata+i, i*2+1] = 1.0*w
869 a[ndata+i, i*2+2] = -xmax_block*w
870 a[ndata+i, i*2+3] = -1.0*w
872 d = num.concatenate((y, num.zeros(n-1)))
873 model = num.linalg.lstsq(a, d, rcond=-1)[0].reshape((n, 2))
875 ynodes = num.zeros(n+1)
876 ynodes[:n] = model[:, 0]*xnodes[:n] + model[:, 1]
877 ynodes[1:] += model[:, 0]*xnodes[1:] + model[:, 1]
878 ynodes[1:n] *= 0.5
880 rms_error = num.sqrt(num.mean((num.interp(x, xnodes, ynodes) - y)**2))
882 return xnodes, ynodes, rms_error
885def plf_integrate_piecewise(x_edges, x, y):
886 '''
887 Calculate definite integral of piece-wise linear function on intervals.
889 Use trapezoidal rule to calculate definite integral of a piece-wise linear
890 function for a series of consecutive intervals. ``x_edges`` and ``x`` must
891 be sorted.
893 :param x_edges: array with edges of the intervals
894 :param x,y: arrays with coordinates of piece-wise linear function's
895 control points
896 '''
898 x_all = num.concatenate((x, x_edges))
899 ii = num.argsort(x_all)
900 y_edges = num.interp(x_edges, x, y)
901 y_all = num.concatenate((y, y_edges))
902 xs = x_all[ii]
903 ys = y_all[ii]
904 y_all[ii[1:]] = num.cumsum((xs[1:] - xs[:-1]) * 0.5 * (ys[1:] + ys[:-1]))
905 return num.diff(y_all[-len(y_edges):])
908def diff_fd_1d_4o(dt, data):
909 '''
910 Approximate first derivative of an array (forth order, central FD).
912 :param dt: sampling interval
913 :param data: NumPy array with data samples
915 :returns: NumPy array with same shape as input
917 Interior points are approximated to fourth order, edge points to first
918 order right- or left-sided respectively, points next to edge to second
919 order central.
920 '''
921 import scipy.signal
923 ddata = num.empty_like(data, dtype=float)
925 if data.size >= 5:
926 ddata[2:-2] = scipy.signal.lfilter(
927 [-1., +8., 0., -8., 1.], [1.], data)[4:] / (12.*dt)
929 if data.size >= 3:
930 ddata[1] = (data[2] - data[0]) / (2. * dt)
931 ddata[-2] = (data[-1] - data[-3]) / (2. * dt)
933 if data.size >= 2:
934 ddata[0] = (data[1] - data[0]) / dt
935 ddata[-1] = (data[-1] - data[-2]) / dt
937 if data.size == 1:
938 ddata[0] = 0.0
940 return ddata
943def diff_fd_1d_2o(dt, data):
944 '''
945 Approximate first derivative of an array (second order, central FD).
947 :param dt: sampling interval
948 :param data: NumPy array with data samples
950 :returns: NumPy array with same shape as input
952 Interior points are approximated to second order, edge points to first
953 order right- or left-sided respectively.
955 Uses :py:func:`numpy.gradient`.
956 '''
958 return num.gradient(data, dt)
961def diff_fd_2d_4o(dt, data):
962 '''
963 Approximate second derivative of an array (forth order, central FD).
965 :param dt: sampling interval
966 :param data: NumPy array with data samples
968 :returns: NumPy array with same shape as input
970 Interior points are approximated to fourth order, next-to-edge points to
971 second order, edge points repeated.
972 '''
973 import scipy.signal
975 ddata = num.empty_like(data, dtype=float)
977 if data.size >= 5:
978 ddata[2:-2] = scipy.signal.lfilter(
979 [-1., +16., -30., +16., -1.], [1.], data)[4:] / (12.*dt**2)
981 if data.size >= 3:
982 ddata[:2] = (data[2] - 2.0 * data[1] + data[0]) / dt**2
983 ddata[-2:] = (data[-1] - 2.0 * data[-2] + data[-3]) / dt**2
985 if data.size < 3:
986 ddata[:] = 0.0
988 return ddata
991def diff_fd_2d_2o(dt, data):
992 '''
993 Approximate second derivative of an array (second order, central FD).
995 :param dt: sampling interval
996 :param data: NumPy array with data samples
998 :returns: NumPy array with same shape as input
1000 Interior points are approximated to second order, edge points repeated.
1001 '''
1002 import scipy.signal
1004 ddata = num.empty_like(data, dtype=float)
1006 if data.size >= 3:
1007 ddata[1:-1] = scipy.signal.lfilter(
1008 [1., -2., 1.], [1.], data)[2:] / (dt**2)
1010 ddata[0] = ddata[1]
1011 ddata[-1] = ddata[-2]
1013 if data.size < 3:
1014 ddata[:] = 0.0
1016 return ddata
1019def diff_fd(n, order, dt, data):
1020 '''
1021 Approximate 1st or 2nd derivative of an array.
1023 :param n: 1 for first derivative, 2 for second
1024 :param order: order of the approximation 2 and 4 are supported
1025 :param dt: sampling interval
1026 :param data: NumPy array with data samples
1028 :returns: NumPy array with same shape as input
1030 This is a frontend to the functions :py:func:`diff_fd_1d_2o`,
1031 :py:func:`diff_fd_1d_4o`, :py:func:`diff_fd_2d_2o`, and
1032 :py:func:`diff_fd_2d_4o`.
1034 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1035 '''
1037 funcs = {
1038 1: {2: diff_fd_1d_2o, 4: diff_fd_1d_4o},
1039 2: {2: diff_fd_2d_2o, 4: diff_fd_2d_4o}}
1041 try:
1042 funcs_n = funcs[n]
1043 except KeyError:
1044 raise ValueError(
1045 'pyrocko.util.diff_fd: '
1046 'Only 1st and 2sd derivatives are supported.')
1048 try:
1049 func = funcs_n[order]
1050 except KeyError:
1051 raise ValueError(
1052 'pyrocko.util.diff_fd: '
1053 'Order %i is not supported for %s derivative. Supported: %s' % (
1054 order, ['', '1st', '2nd'][n],
1055 ', '.join('%i' % order for order in sorted(funcs_n.keys()))))
1057 return func(dt, data)
1060class GlobalVars(object):
1061 reuse_store = dict()
1062 decitab_nmax = 0
1063 decitab = {}
1064 decimate_fir_coeffs = {}
1065 decimate_fir_remez_coeffs = {}
1066 decimate_iir_coeffs = {}
1067 re_frac = None
1070def decimate_coeffs(q, n=None, ftype='iir'):
1072 q = int(q)
1074 if n is None:
1075 if ftype == 'fir':
1076 n = 30
1077 elif ftype == 'fir-remez':
1078 n = 45*q
1079 else:
1080 n = 8
1082 if ftype == 'fir':
1083 coeffs = GlobalVars.decimate_fir_coeffs
1084 if (n, 1./q) not in coeffs:
1085 coeffs[n, 1./q] = signal.firwin(n+1, .75/q, window='hamming')
1087 b = coeffs[n, 1./q]
1088 return b, [1.], n
1090 elif ftype == 'fir-remez':
1091 coeffs = GlobalVars.decimate_fir_remez_coeffs
1092 if (n, 1./q) not in coeffs:
1093 coeffs[n, 1./q] = signal.remez(
1094 n+1, (0., .75/q, 1./q, 1.),
1095 (1., 0.), fs=2, weight=(1, 50))
1096 b = coeffs[n, 1./q]
1097 return b, [1.], n
1099 else:
1100 coeffs = GlobalVars.decimate_iir_coeffs
1101 if (n, 0.05, 0.8/q) not in coeffs:
1102 coeffs[n, 0.05, 0.8/q] = signal.cheby1(n, 0.05, 0.8/q)
1104 b, a = coeffs[n, 0.05, 0.8/q]
1105 return b, a, n
1108def decimate(x, q, n=None, ftype='iir', zi=None, ioff=0):
1109 '''
1110 Downsample the signal x by an integer factor q, using an order n filter
1112 By default, an order 8 Chebyshev type I filter is used or a 30 point FIR
1113 filter with hamming window if ftype is 'fir'.
1115 :param x: the signal to be downsampled (1D :class:`numpy.ndarray`)
1116 :param q: the downsampling factor
1117 :param n: order of the filter (1 less than the length of the filter for a
1118 `fir` filter)
1119 :param ftype: type of the filter; can be `iir`, `fir` or `fir-remez`
1121 :returns: the downsampled signal (1D :class:`numpy.ndarray`)
1123 '''
1125 b, a, n = decimate_coeffs(q, n, ftype)
1127 if zi is None or zi is True:
1128 zi_ = num.zeros(max(len(a), len(b))-1, dtype=float)
1129 else:
1130 zi_ = zi
1132 y, zf = signal.lfilter(b, a, x, zi=zi_)
1134 if zi is not None:
1135 return y[n//2+ioff::q].copy(), zf
1136 else:
1137 return y[n//2+ioff::q].copy()
1140class UnavailableDecimation(Exception):
1141 '''
1142 Exception raised by :py:func:`decitab` for unavailable decimation factors.
1143 '''
1145 pass
1148def gcd(a, b, epsilon=1e-7):
1149 '''
1150 Greatest common divisor.
1151 '''
1153 while b > epsilon*a:
1154 a, b = b, a % b
1156 return a
1159def lcm(a, b):
1160 '''
1161 Least common multiple.
1162 '''
1164 return a*b // gcd(a, b)
1167def mk_decitab(nmax=100):
1168 '''
1169 Make table with decimation sequences.
1171 Decimation from one sampling rate to a lower one is achieved by a
1172 successive application of :py:func:`decimation` with small integer
1173 downsampling factors (because using large downampling factors can make the
1174 decimation unstable or slow). This function sets up a table with downsample
1175 sequences for factors up to ``nmax``.
1176 '''
1178 tab = GlobalVars.decitab
1179 for i in range(1, 10):
1180 for j in range(1, i+1):
1181 for k in range(1, j+1):
1182 for l_ in range(1, k+1):
1183 for m in range(1, l_+1):
1184 p = i*j*k*l_*m
1185 if p > nmax:
1186 break
1187 if p not in tab:
1188 tab[p] = (i, j, k, l_, m)
1189 if i*j*k*l_ > nmax:
1190 break
1191 if i*j*k > nmax:
1192 break
1193 if i*j > nmax:
1194 break
1195 if i > nmax:
1196 break
1198 GlobalVars.decitab_nmax = nmax
1201def zfmt(n):
1202 return '%%0%ii' % (int(math.log10(n - 1)) + 1)
1205def _year_to_time(year):
1206 tt = (year, 1, 1, 0, 0, 0)
1207 return to_time_float(calendar.timegm(tt))
1210def _working_year(year):
1211 try:
1212 tt = (year, 1, 1, 0, 0, 0)
1213 t = calendar.timegm(tt)
1214 tt2_ = time.gmtime(t)
1215 tt2 = tuple(tt2_)[:6]
1216 if tt != tt2:
1217 return False
1219 s = '%i-01-01 00:00:00' % year
1220 s2 = time.strftime('%Y-%m-%d %H:%M:%S', tt2_)
1221 if s != s2:
1222 return False
1224 t3 = str_to_time(s2, format='%Y-%m-%d %H:%M:%S')
1225 s3 = time_to_str(t3, format='%Y-%m-%d %H:%M:%S')
1226 if s3 != s2:
1227 return False
1229 except Exception:
1230 return False
1232 return True
1235def working_system_time_range(year_min_lim=None, year_max_lim=None):
1236 '''
1237 Check time range supported by the systems's time conversion functions.
1239 Returns system time stamps of start of year of first/last fully supported
1240 year span. If this is before 1900 or after 2100, return first/last century
1241 which is fully supported.
1243 :returns: ``(tmin, tmax, year_min, year_max)``
1244 '''
1246 year0 = 2000
1247 year_min = year0
1248 year_max = year0
1250 itests = list(range(101))
1251 for i in range(19):
1252 itests.append(200 + i*100)
1254 for i in itests:
1255 year = year0 - i
1256 if year_min_lim is not None and year < year_min_lim:
1257 year_min = year_min_lim
1258 break
1259 elif not _working_year(year):
1260 break
1261 else:
1262 year_min = year
1264 for i in itests:
1265 year = year0 + i
1266 if year_max_lim is not None and year > year_max_lim:
1267 year_max = year_max_lim
1268 break
1269 elif not _working_year(year + 1):
1270 break
1271 else:
1272 year_max = year
1274 return (
1275 _year_to_time(year_min),
1276 _year_to_time(year_max),
1277 year_min, year_max)
1280g_working_system_time_range = None
1283def get_working_system_time_range():
1284 '''
1285 Caching variant of :py:func:`working_system_time_range`.
1286 '''
1288 global g_working_system_time_range
1289 if g_working_system_time_range is None:
1290 g_working_system_time_range = working_system_time_range()
1292 return g_working_system_time_range
1295def is_working_time(t):
1296 tmin, tmax, _, _ = get_working_system_time_range()
1297 return tmin <= t <= tmax
1300def julian_day_of_year(timestamp):
1301 '''
1302 Get the day number after the 1st of January of year in ``timestamp``.
1304 :returns: day number as int
1305 '''
1307 return time.gmtime(int(timestamp)).tm_yday
1310def hour_start(timestamp):
1311 '''
1312 Get beginning of hour for any point in time.
1314 :param timestamp: time instant as system timestamp (in seconds)
1316 :returns: instant of hour start as system timestamp
1317 '''
1319 tt = time.gmtime(int(timestamp))
1320 tts = tt[0:4] + (0, 0)
1321 return to_time_float(calendar.timegm(tts))
1324def day_start(timestamp):
1325 '''
1326 Get beginning of day for any point in time.
1328 :param timestamp: time instant as system timestamp (in seconds)
1330 :returns: instant of day start as system timestamp
1331 '''
1333 tt = time.gmtime(int(timestamp))
1334 tts = tt[0:3] + (0, 0, 0)
1335 return to_time_float(calendar.timegm(tts))
1338def month_start(timestamp):
1339 '''
1340 Get beginning of month for any point in time.
1342 :param timestamp: time instant as system timestamp (in seconds)
1344 :returns: instant of month start as system timestamp
1345 '''
1347 tt = time.gmtime(int(timestamp))
1348 tts = tt[0:2] + (1, 0, 0, 0)
1349 return to_time_float(calendar.timegm(tts))
1352def year_start(timestamp):
1353 '''
1354 Get beginning of year for any point in time.
1356 :param timestamp: time instant as system timestamp (in seconds)
1358 :returns: instant of year start as system timestamp
1359 '''
1361 tt = time.gmtime(int(timestamp))
1362 tts = tt[0:1] + (1, 1, 0, 0, 0)
1363 return to_time_float(calendar.timegm(tts))
1366def iter_days(tmin, tmax):
1367 '''
1368 Yields begin and end of days until given time span is covered.
1370 :param tmin,tmax: input time span
1372 :yields: tuples with (begin, end) of days as system timestamps
1373 '''
1375 t = day_start(tmin)
1376 while t < tmax:
1377 tend = day_start(t + 26*60*60)
1378 yield t, tend
1379 t = tend
1382def iter_months(tmin, tmax):
1383 '''
1384 Yields begin and end of months until given time span is covered.
1386 :param tmin,tmax: input time span
1388 :yields: tuples with (begin, end) of months as system timestamps
1389 '''
1391 t = month_start(tmin)
1392 while t < tmax:
1393 tend = month_start(t + 24*60*60*33)
1394 yield t, tend
1395 t = tend
1398def iter_years(tmin, tmax):
1399 '''
1400 Yields begin and end of years until given time span is covered.
1402 :param tmin,tmax: input time span
1404 :yields: tuples with (begin, end) of years as system timestamps
1405 '''
1407 t = year_start(tmin)
1408 while t < tmax:
1409 tend = year_start(t + 24*60*60*369)
1410 yield t, tend
1411 t = tend
1414def today():
1415 return day_start(time.time())
1418def tomorrow():
1419 return day_start(time.time() + 24*60*60)
1422def decitab(n):
1423 '''
1424 Get integer decimation sequence for given downampling factor.
1426 :param n: target decimation factor
1428 :returns: tuple with downsampling sequence
1429 '''
1431 if n > GlobalVars.decitab_nmax:
1432 mk_decitab(n*2)
1433 if n not in GlobalVars.decitab:
1434 raise UnavailableDecimation('ratio = %g' % n)
1435 return GlobalVars.decitab[n]
1438def ctimegm(s, format="%Y-%m-%d %H:%M:%S"):
1439 '''
1440 Convert string representing UTC time to system time.
1442 :param s: string to be interpreted
1443 :param format: format string passed to :py:func:`strptime`
1445 :returns: system time stamp
1447 Interpretes string with format ``'%Y-%m-%d %H:%M:%S'``, using strptime.
1449 .. note::
1450 This function is to be replaced by :py:func:`str_to_time`.
1451 '''
1453 return calendar.timegm(time.strptime(s, format))
1456def gmctime(t, format="%Y-%m-%d %H:%M:%S"):
1457 '''
1458 Get string representation from system time, UTC.
1460 Produces string with format ``'%Y-%m-%d %H:%M:%S'``, using strftime.
1462 .. note::
1463 This function is to be repaced by :py:func:`time_to_str`.
1464 '''
1466 return time.strftime(format, time.gmtime(t))
1469def gmctime_v(t, format="%a, %d %b %Y %H:%M:%S"):
1470 '''
1471 Get string representation from system time, UTC. Same as
1472 :py:func:`gmctime` but with a more verbose default format.
1474 .. note::
1475 This function is to be replaced by :py:func:`time_to_str`.
1476 '''
1478 return time.strftime(format, time.gmtime(t))
1481def gmctime_fn(t, format="%Y-%m-%d_%H-%M-%S"):
1482 '''
1483 Get string representation from system time, UTC. Same as
1484 :py:func:`gmctime` but with a default usable in filenames.
1486 .. note::
1487 This function is to be replaced by :py:func:`time_to_str`.
1488 '''
1490 return time.strftime(format, time.gmtime(t))
1493class TimeStrError(Exception):
1494 pass
1497class FractionalSecondsMissing(TimeStrError):
1498 '''
1499 Exception raised by :py:func:`str_to_time` when the given string lacks
1500 fractional seconds.
1501 '''
1503 pass
1506class FractionalSecondsWrongNumberOfDigits(TimeStrError):
1507 '''
1508 Exception raised by :py:func:`str_to_time` when the given string has an
1509 incorrect number of digits in the fractional seconds part.
1510 '''
1512 pass
1515def _endswith_n(s, endings):
1516 for ix, x in enumerate(endings):
1517 if s.endswith(x):
1518 return ix
1519 return -1
1522def str_to_time(s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'):
1523 '''
1524 Convert string representing UTC time to floating point system time.
1526 :param s: string representing UTC time
1527 :param format: time string format
1528 :returns: system time stamp as floating point value
1530 Uses the semantics of :py:func:`time.strptime` but allows for fractional
1531 seconds. If the format ends with ``'.FRAC'``, anything after a dot is
1532 interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``,
1533 the fractional part, including the dot is made optional. The latter has the
1534 consequence, that the time strings and the format may not contain any other
1535 dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is
1536 ensured, that exactly that number of digits are present in the fractional
1537 seconds.
1538 '''
1540 time_float = get_time_float()
1542 if util_ext is not None:
1543 try:
1544 t, tfrac = util_ext.stt(s, format)
1545 except util_ext.UtilExtError as e:
1546 raise TimeStrError(
1547 '%s, string=%s, format=%s' % (str(e), s, format))
1549 return time_float(t)+tfrac
1551 fracsec = 0.
1552 fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC'
1554 iend = _endswith_n(format, fixed_endings)
1555 if iend != -1:
1556 dotpos = s.rfind('.')
1557 if dotpos == -1:
1558 raise FractionalSecondsMissing(
1559 'string=%s, format=%s' % (s, format))
1561 if iend > 0 and iend != (len(s)-dotpos-1):
1562 raise FractionalSecondsWrongNumberOfDigits(
1563 'string=%s, format=%s' % (s, format))
1565 format = format[:-len(fixed_endings[iend])]
1566 fracsec = float(s[dotpos:])
1567 s = s[:dotpos]
1569 elif format.endswith('.OPTFRAC'):
1570 dotpos = s.rfind('.')
1571 format = format[:-8]
1572 if dotpos != -1 and len(s[dotpos:]) > 1:
1573 fracsec = float(s[dotpos:])
1575 if dotpos != -1:
1576 s = s[:dotpos]
1578 try:
1579 return time_float(calendar.timegm(time.strptime(s, format))) \
1580 + fracsec
1581 except ValueError as e:
1582 raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format))
1585stt = str_to_time
1588def str_to_time_fillup(s):
1589 '''
1590 Default :py:func:`str_to_time` with filling in of missing values.
1592 Allows e.g. `'2010-01-01 00:00:00'` as `'2010-01-01 00:00'`,
1593 `'2010-01-01 00'`, ..., or `'2010'`.
1594 '''
1596 if len(s) in (4, 7, 10, 13, 16):
1597 s += '0000-01-01 00:00:00'[len(s):]
1599 return str_to_time(s)
1602def time_to_str(t, format='%Y-%m-%d %H:%M:%S.3FRAC'):
1603 '''
1604 Get string representation for floating point system time.
1606 :param t: floating point system time
1607 :param format: time string format
1608 :returns: string representing UTC time
1610 Uses the semantics of :py:func:`time.strftime` but additionally allows for
1611 fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a
1612 digit between 1 and 9, this is replaced with the fractional part of ``t``
1613 with ``x`` digits precision.
1614 '''
1616 if pyrocko.grumpy > 0:
1617 check_time_class(t, 'warn' if pyrocko.grumpy == 1 else 'raise')
1619 if isinstance(format, int):
1620 if format > 0:
1621 format = '%Y-%m-%d %H:%M:%S.' + '%iFRAC' % format
1622 else:
1623 format = '%Y-%m-%d %H:%M:%S'
1625 if util_ext is not None:
1626 t0 = num.floor(t)
1627 try:
1628 return util_ext.tts(int(t0), float(t - t0), format)
1629 except util_ext.UtilExtError as e:
1630 raise TimeStrError(
1631 '%s, timestamp=%f, format=%s' % (str(e), t, format))
1633 if not GlobalVars.re_frac:
1634 GlobalVars.re_frac = re.compile(r'\.[1-9]FRAC')
1635 GlobalVars.frac_formats = dict(
1636 [('.%sFRAC' % x, '%.'+x+'f') for x in '123456789'])
1638 ts = float(num.floor(t))
1639 tfrac = t-ts
1641 m = GlobalVars.re_frac.search(format)
1642 if m:
1643 sfrac = (GlobalVars.frac_formats[m.group(0)] % tfrac)
1644 if sfrac[0] == '1':
1645 ts += 1.
1647 format, nsub = GlobalVars.re_frac.subn(sfrac[1:], format, 1)
1649 return time.strftime(format, time.gmtime(ts))
1652tts = time_to_str
1653_abbr_weekday = 'Mon Tue Wed Thu Fri Sat Sun'.split()
1654_abbr_month = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
1657def mystrftime(fmt=None, tt=None, milliseconds=0):
1658 # Needed by Snuffler for the time axis. In other cases `time_to_str`
1659 # should be used.
1661 if fmt is None:
1662 fmt = '%Y-%m-%d %H:%M:%S .%r'
1664 # Get these two locale independent, needed by Snuffler.
1665 # Setting the locale seems to have no effect.
1666 fmt = fmt.replace('%a', _abbr_weekday[tt.tm_wday])
1667 fmt = fmt.replace('%b', _abbr_month[tt.tm_mon-1])
1669 fmt = fmt.replace('%r', '%03i' % int(round(milliseconds)))
1670 fmt = fmt.replace('%u', '%06i' % int(round(milliseconds*1000)))
1671 fmt = fmt.replace('%n', '%09i' % int(round(milliseconds*1000000)))
1672 return time.strftime(fmt, tt)
1675def gmtime_x(timestamp):
1676 etimestamp = float(num.floor(timestamp))
1677 tt = time.gmtime(etimestamp)
1678 ms = (timestamp-etimestamp)*1000
1679 return tt, ms
1682def plural_s(n):
1683 if not isinstance(n, int):
1684 n = len(n)
1686 return 's' if n != 1 else ''
1689def ensuredirs(dst):
1690 '''
1691 Create all intermediate path components for a target path.
1693 :param dst: target path
1695 The leaf part of the target path is not created (use :py:func:`ensuredir`
1696 if a the target path is a directory to be created).
1697 '''
1699 d, x = os.path.split(dst.rstrip(os.sep))
1700 dirs = []
1701 while d and not os.path.exists(d):
1702 dirs.append(d)
1703 d, x = os.path.split(d)
1705 dirs.reverse()
1707 for d in dirs:
1708 try:
1709 os.mkdir(d)
1710 except OSError as e:
1711 if not e.errno == errno.EEXIST:
1712 raise
1715def ensuredir(dst):
1716 '''
1717 Create directory and all intermediate path components to it as needed.
1719 :param dst: directory name
1721 Nothing is done if the given target already exists.
1722 '''
1724 if os.path.exists(dst):
1725 return
1727 dst.rstrip(os.sep)
1729 ensuredirs(dst)
1730 try:
1731 os.mkdir(dst)
1732 except OSError as e:
1733 if not e.errno == errno.EEXIST:
1734 raise
1737def reuse(x):
1738 '''
1739 Get unique instance of an object.
1741 :param x: hashable object
1742 :returns: reference to x or an equivalent object
1744 Cache object ``x`` in a global dict for reuse, or if x already
1745 is in that dict, return a reference to it.
1746 '''
1748 grs = GlobalVars.reuse_store
1749 if x not in grs:
1750 grs[x] = x
1751 return grs[x]
1754def deuse(x):
1755 grs = GlobalVars.reuse_store
1756 if x in grs:
1757 del grs[x]
1760class Anon(object):
1761 '''
1762 Dict-to-object utility.
1764 Any given arguments are stored as attributes.
1766 Example::
1768 a = Anon(x=1, y=2)
1769 print a.x, a.y
1770 '''
1772 def __init__(self, **dict):
1773 for k in dict:
1774 self.__dict__[k] = dict[k]
1777def iter_select_files(
1778 paths,
1779 include=None,
1780 exclude=None,
1781 selector=None,
1782 show_progress=True,
1783 pass_through=None):
1785 '''
1786 Recursively select files (generator variant).
1788 See :py:func:`select_files`.
1789 '''
1791 if show_progress:
1792 progress_beg('selecting files...')
1794 ngood = 0
1795 check_include = None
1796 if include is not None:
1797 rinclude = re.compile(include)
1799 def check_include(path):
1800 m = rinclude.search(path)
1801 if not m:
1802 return False
1804 if selector is None:
1805 return True
1807 infos = Anon(**m.groupdict())
1808 return selector(infos)
1810 check_exclude = None
1811 if exclude is not None:
1812 rexclude = re.compile(exclude)
1814 def check_exclude(path):
1815 return not bool(rexclude.search(path))
1817 if check_include and check_exclude:
1819 def check(path):
1820 return check_include(path) and check_exclude(path)
1822 elif check_include:
1823 check = check_include
1825 elif check_exclude:
1826 check = check_exclude
1828 else:
1829 check = None
1831 if isinstance(paths, str):
1832 paths = [paths]
1834 for path in paths:
1835 if pass_through and pass_through(path):
1836 if check is None or check(path):
1837 yield path
1839 elif os.path.isdir(path):
1840 for (dirpath, dirnames, filenames) in os.walk(path):
1841 dirnames.sort()
1842 filenames.sort()
1843 for filename in filenames:
1844 path = op.join(dirpath, filename)
1845 if check is None or check(path):
1846 yield os.path.abspath(path)
1847 ngood += 1
1848 else:
1849 if check is None or check(path):
1850 yield os.path.abspath(path)
1851 ngood += 1
1853 if show_progress:
1854 progress_end('%i file%s selected.' % (ngood, plural_s(ngood)))
1857def select_files(
1858 paths, include=None, exclude=None, selector=None, show_progress=True,
1859 regex=None):
1861 '''
1862 Recursively select files.
1864 :param paths: entry path names
1865 :param include: pattern for conditional inclusion
1866 :param exclude: pattern for conditional exclusion
1867 :param selector: callback for conditional inclusion
1868 :param show_progress: if True, indicate start and stop of processing
1869 :param regex: alias for ``include`` (backwards compatibility)
1870 :returns: list of path names
1872 Recursively finds all files under given entry points ``paths``. If
1873 parameter ``include`` is a regular expression, only files with matching
1874 path names are included. If additionally parameter ``selector`` is given a
1875 callback function, only files for which the callback returns ``True`` are
1876 included. The callback should take a single argument. The callback is
1877 called with a single argument, an object, having as attributes, any named
1878 groups given in ``include``.
1880 Examples
1882 To find all files ending in ``'.mseed'`` or ``'.msd'``::
1884 select_files(paths,
1885 include=r'\\.(mseed|msd)$')
1887 To find all files ending with ``'$Year.$DayOfYear'``, having set 2009 for
1888 the year::
1890 select_files(paths,
1891 include=r'(?P<year>\\d\\d\\d\\d)\\.(?P<doy>\\d\\d\\d)$',
1892 selector=(lambda x: int(x.year) == 2009))
1893 '''
1894 if regex is not None:
1895 assert include is None
1896 include = regex
1898 return list(iter_select_files(
1899 paths, include, exclude, selector, show_progress))
1902def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
1903 '''
1904 Convert positive integer to a base36 string.
1905 '''
1907 if not isinstance(number, (int, long)):
1908 raise TypeError('number must be an integer')
1909 if number < 0:
1910 raise ValueError('number must be positive')
1912 # Special case for small numbers
1913 if number < 36:
1914 return alphabet[number]
1916 base36 = ''
1917 while number != 0:
1918 number, i = divmod(number, 36)
1919 base36 = alphabet[i] + base36
1921 return base36
1924def base36decode(number):
1925 '''
1926 Decode base36 endcoded positive integer.
1927 '''
1929 return int(number, 36)
1932class UnpackError(Exception):
1933 '''
1934 Exception raised when :py:func:`unpack_fixed` encounters an error.
1935 '''
1937 pass
1940ruler = ''.join(['%-10i' % i for i in range(8)]) \
1941 + '\n' + '0123456789' * 8 + '\n'
1944def unpack_fixed(format, line, *callargs):
1945 '''
1946 Unpack fixed format string, as produced by many fortran codes.
1948 :param format: format specification
1949 :param line: string to be processed
1950 :param callargs: callbacks for callback fields in the format
1952 The format is described by a string of comma-separated fields. Each field
1953 is defined by a character for the field type followed by the field width. A
1954 questionmark may be appended to the field description to allow the argument
1955 to be optional (The data string is then allowed to be filled with blanks
1956 and ``None`` is returned in this case).
1958 The following field types are available:
1960 ==== ================================================================
1961 Type Description
1962 ==== ================================================================
1963 A string (full field width is extracted)
1964 a string (whitespace at the beginning and the end is removed)
1965 i integer value
1966 f floating point value
1967 @ special type, a callback must be given for the conversion
1968 x special field type to skip parts of the string
1969 ==== ================================================================
1970 '''
1972 ipos = 0
1973 values = []
1974 icall = 0
1975 for form in format.split(','):
1976 form = form.strip()
1977 optional = form[-1] == '?'
1978 form = form.rstrip('?')
1979 typ = form[0]
1980 ln = int(form[1:])
1981 s = line[ipos:ipos+ln]
1982 cast = {
1983 'x': None,
1984 'A': str,
1985 'a': lambda x: x.strip(),
1986 'i': int,
1987 'f': float,
1988 '@': 'extra'}[typ]
1990 if cast == 'extra':
1991 cast = callargs[icall]
1992 icall += 1
1994 if cast is not None:
1995 if optional and s.strip() == '':
1996 values.append(None)
1997 else:
1998 try:
1999 values.append(cast(s))
2000 except Exception:
2001 mark = [' '] * 80
2002 mark[ipos:ipos+ln] = ['^'] * ln
2003 mark = ''.join(mark)
2004 raise UnpackError(
2005 'Invalid cast to type "%s" at position [%i:%i] of '
2006 'line: \n%s%s\n%s' % (
2007 typ, ipos, ipos+ln, ruler, line.rstrip(), mark))
2009 ipos += ln
2011 return values
2014_pattern_cache = {}
2017def _nslc_pattern(pattern):
2018 if pattern not in _pattern_cache:
2019 rpattern = re.compile(fnmatch.translate(pattern), re.I)
2020 _pattern_cache[pattern] = rpattern
2021 else:
2022 rpattern = _pattern_cache[pattern]
2024 return rpattern
2027def match_nslc(patterns, nslc):
2028 '''
2029 Match network-station-location-channel code against pattern or list of
2030 patterns.
2032 :param patterns: pattern or list of patterns
2033 :param nslc: tuple with (network, station, location, channel) as strings
2035 :returns: ``True`` if the pattern matches or if any of the given patterns
2036 match; or ``False``.
2038 The patterns may contain shell-style wildcards: \\*, ?, [seq], [!seq].
2040 Example::
2042 match_nslc('*.HAM3.*.BH?', ('GR', 'HAM3', '', 'BHZ')) # -> True
2043 '''
2045 if isinstance(patterns, str):
2046 patterns = [patterns]
2048 if not isinstance(nslc, str):
2049 s = '.'.join(nslc)
2050 else:
2051 s = nslc
2053 for pattern in patterns:
2054 if _nslc_pattern(pattern).match(s):
2055 return True
2057 return False
2060def match_nslcs(patterns, nslcs):
2061 '''
2062 Get network-station-location-channel codes that match given pattern or any
2063 of several given patterns.
2065 :param patterns: pattern or list of patterns
2066 :param nslcs: list of (network, station, location, channel) tuples
2068 See also :py:func:`match_nslc`
2069 '''
2071 matching = []
2072 for nslc in nslcs:
2073 if match_nslc(patterns, nslc):
2074 matching.append(nslc)
2076 return matching
2079class Timeout(Exception):
2080 pass
2083def create_lockfile(fn, timeout=None, timewarn=10.):
2084 t0 = time.time()
2086 while True:
2087 try:
2088 f = os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
2089 os.close(f)
2090 return
2092 except OSError as e:
2093 if e.errno == errno.EEXIST:
2094 tnow = time.time()
2096 if timeout is not None and tnow - t0 > timeout:
2097 raise Timeout(
2098 'Timeout (%gs) occured while waiting to get exclusive '
2099 'access to: %s' % (timeout, fn))
2101 if timewarn is not None and tnow - t0 > timewarn:
2102 logger.warning(
2103 'Waiting since %gs to get exclusive access to: %s' % (
2104 timewarn, fn))
2106 timewarn *= 2
2108 time.sleep(0.01)
2109 else:
2110 raise
2113def delete_lockfile(fn):
2114 os.unlink(fn)
2117class Lockfile(Exception):
2119 def __init__(self, path, timeout=5, timewarn=10.):
2120 self._path = path
2121 self._timeout = timeout
2122 self._timewarn = timewarn
2124 def __enter__(self):
2125 create_lockfile(
2126 self._path, timeout=self._timeout, timewarn=self._timewarn)
2127 return None
2129 def __exit__(self, type, value, traceback):
2130 delete_lockfile(self._path)
2133class SoleError(Exception):
2134 '''
2135 Exception raised by objects of type :py:class:`Sole`, when an concurrent
2136 instance is running.
2137 '''
2139 pass
2142class Sole(object):
2143 '''
2144 Use POSIX advisory file locking to ensure that only a single instance of a
2145 program is running.
2147 :param pid_path: path to lockfile to be used
2149 Usage::
2151 from pyrocko.util import Sole, SoleError, setup_logging
2152 import os
2154 setup_logging('my_program')
2156 pid_path = os.path.join(os.environ['HOME'], '.my_program_lock')
2157 try:
2158 sole = Sole(pid_path)
2160 except SoleError, e:
2161 logger.fatal( str(e) )
2162 sys.exit(1)
2163 '''
2165 def __init__(self, pid_path):
2166 self._pid_path = pid_path
2167 self._other_running = False
2168 ensuredirs(self._pid_path)
2169 self._lockfile = None
2170 self._os = os
2172 if not fcntl:
2173 raise SoleError(
2174 'Python standard library module "fcntl" not available on '
2175 'this platform.')
2177 self._fcntl = fcntl
2179 try:
2180 self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY)
2181 except Exception:
2182 raise SoleError(
2183 'Cannot open lockfile (path = %s)' % self._pid_path)
2185 try:
2186 fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
2188 except IOError:
2189 self._other_running = True
2190 try:
2191 f = open(self._pid_path, 'r')
2192 pid = f.read().strip()
2193 f.close()
2194 except Exception:
2195 pid = '?'
2197 raise SoleError('Other instance is running (pid = %s)' % pid)
2199 try:
2200 os.ftruncate(self._lockfile, 0)
2201 os.write(self._lockfile, '%i\n' % os.getpid())
2202 os.fsync(self._lockfile)
2204 except Exception:
2205 # the pid is only stored for user information, so this is allowed
2206 # to fail
2207 pass
2209 def __del__(self):
2210 if not self._other_running:
2211 if self._lockfile is not None:
2212 self._fcntl.lockf(self._lockfile, self._fcntl.LOCK_UN)
2213 self._os.close(self._lockfile)
2214 try:
2215 self._os.unlink(self._pid_path)
2216 except Exception:
2217 pass
2220re_escapequotes = re.compile(r"(['\\])")
2223def escapequotes(s):
2224 return re_escapequotes.sub(r"\\\1", s)
2227class TableWriter(object):
2228 '''
2229 Write table of space separated values to a file.
2231 :param f: file like object
2233 Strings containing spaces are quoted on output.
2234 '''
2236 def __init__(self, f):
2237 self._f = f
2239 def writerow(self, row, minfieldwidths=None):
2241 '''
2242 Write one row of values to underlying file.
2244 :param row: iterable of values
2245 :param minfieldwidths: minimum field widths for the values
2247 Each value in in ``row`` is converted to a string and optionally padded
2248 with blanks. The resulting strings are output separated with blanks. If
2249 any values given are strings and if they contain whitespace, they are
2250 quoted with single quotes, and any internal single quotes are
2251 backslash-escaped.
2252 '''
2254 out = []
2256 for i, x in enumerate(row):
2257 w = 0
2258 if minfieldwidths and i < len(minfieldwidths):
2259 w = minfieldwidths[i]
2261 if isinstance(x, str):
2262 if re.search(r"\s|'", x):
2263 x = "'%s'" % escapequotes(x)
2265 x = x.ljust(w)
2266 else:
2267 x = str(x).rjust(w)
2269 out.append(x)
2271 self._f.write(' '.join(out).rstrip() + '\n')
2274class TableReader(object):
2276 '''
2277 Read table of space separated values from a file.
2279 :param f: file-like object
2281 This uses Pythons shlex module to tokenize lines. Should deal correctly
2282 with quoted strings.
2283 '''
2285 def __init__(self, f):
2286 self._f = f
2287 self.eof = False
2289 def readrow(self):
2290 '''
2291 Read one row from the underlying file, tokenize it with shlex.
2293 :returns: tokenized line as a list of strings.
2294 '''
2296 line = self._f.readline()
2297 if not line:
2298 self.eof = True
2299 return []
2300 line.strip()
2301 if line.startswith('#'):
2302 return []
2304 return qsplit(line)
2307def gform(number, significant_digits=3):
2308 '''
2309 Pretty print floating point numbers.
2311 Align floating point numbers at the decimal dot.
2313 ::
2315 | -d.dde+xxx|
2316 | -d.dde+xx |
2317 |-ddd. |
2318 | -dd.d |
2319 | -d.dd |
2320 | -0.ddd |
2321 | -0.0ddd |
2322 | -0.00ddd |
2323 | -d.dde-xx |
2324 | -d.dde-xxx|
2325 | nan|
2328 The formatted string has length ``significant_digits * 2 + 6``.
2329 '''
2331 no_exp_range = (pow(10., -1),
2332 pow(10., significant_digits))
2333 width = significant_digits+significant_digits-1+1+1+5
2335 if (no_exp_range[0] <= abs(number) < no_exp_range[1]) or number == 0.:
2336 s = ('%#.*g' % (significant_digits, number)).rstrip('0')
2337 else:
2338 s = '%.*E' % (significant_digits-1, number)
2339 s = (' '*(-s.find('.')+(significant_digits+1))+s).ljust(width)
2340 if s.strip().lower() == 'nan':
2341 s = 'nan'.rjust(width)
2342 return s
2345def human_bytesize(value):
2347 exts = 'Bytes kB MB GB TB PB EB ZB YB'.split()
2349 if value == 1:
2350 return '1 Byte'
2352 for i, ext in enumerate(exts):
2353 x = float(value) / 1000**i
2354 if round(x) < 10. and not value < 1000:
2355 return '%.1f %s' % (x, ext)
2356 if round(x) < 1000.:
2357 return '%.0f %s' % (x, ext)
2359 return '%i Bytes' % value
2362re_compatibility = re.compile(
2363 r'!pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.' +
2364 r'(dummy|poel|qseis|qssp))\.'
2365)
2368def pf_is_old(fn):
2369 oldstyle = False
2370 with open(fn, 'r') as f:
2371 for line in f:
2372 if re_compatibility.search(line):
2373 oldstyle = True
2375 return oldstyle
2378def pf_upgrade(fn):
2379 need = pf_is_old(fn)
2380 if need:
2381 fn_temp = fn + '.temp'
2383 with open(fn, 'r') as fin:
2384 with open(fn_temp, 'w') as fout:
2385 for line in fin:
2386 line = re_compatibility.sub('!pf.', line)
2387 fout.write(line)
2389 os.rename(fn_temp, fn)
2391 return need
2394def read_leap_seconds(tzfile='/usr/share/zoneinfo/right/UTC'):
2395 '''
2396 Extract leap second information from tzdata.
2398 Based on example at http://stackoverflow.com/questions/19332902/\
2399 extract-historic-leap-seconds-from-tzdata
2401 See also 'man 5 tzfile'.
2402 '''
2404 from struct import unpack, calcsize
2405 out = []
2406 with open(tzfile, 'rb') as f:
2407 # read header
2408 fmt = '>4s c 15x 6l'
2409 (magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
2410 typecnt, charcnt) = unpack(fmt, f.read(calcsize(fmt)))
2411 assert magic == 'TZif'.encode('US-ASCII'), 'Not a timezone file'
2413 # skip over some uninteresting data
2414 fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
2415 timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
2416 f.read(calcsize(fmt))
2418 # read leap-seconds
2419 fmt = '>2l'
2420 for i in range(leapcnt):
2421 tleap, nleap = unpack(fmt, f.read(calcsize(fmt)))
2422 out.append((tleap-nleap+1, nleap))
2424 return out
2427class LeapSecondsError(Exception):
2428 pass
2431class LeapSecondsOutdated(LeapSecondsError):
2432 pass
2435class InvalidLeapSecondsFile(LeapSecondsOutdated):
2436 pass
2439def parse_leap_seconds_list(fn):
2440 data = []
2441 texpires = None
2442 try:
2443 t0 = int(round(str_to_time('1900-01-01 00:00:00')))
2444 except TimeStrError:
2445 t0 = int(round(str_to_time('1970-01-01 00:00:00'))) - 2208988800
2447 tnow = int(round(time.time()))
2449 if not op.exists(fn):
2450 raise LeapSecondsOutdated('no leap seconds file found')
2452 try:
2453 with open(fn, 'rb') as f:
2454 for line in f:
2455 if line.strip().startswith(b'<!DOCTYPE'):
2456 raise InvalidLeapSecondsFile('invalid leap seconds file')
2458 if line.startswith(b'#@'):
2459 texpires = int(line.split()[1]) + t0
2460 elif line.startswith(b'#') or len(line) < 5:
2461 pass
2462 else:
2463 toks = line.split()
2464 t = int(toks[0]) + t0
2465 nleap = int(toks[1]) - 10
2466 data.append((t, nleap))
2468 except IOError:
2469 raise LeapSecondsError('cannot read leap seconds file %s' % fn)
2471 if texpires is None or tnow > texpires:
2472 raise LeapSecondsOutdated('leap seconds list is outdated')
2474 return data
2477def read_leap_seconds2():
2478 from pyrocko import config
2479 conf = config.config()
2480 fn = conf.leapseconds_path
2481 url = conf.leapseconds_url
2482 # check for outdated default URL
2483 if url == 'http://www.ietf.org/timezones/data/leap-seconds.list':
2484 url = 'https://www.ietf.org/timezones/data/leap-seconds.list'
2485 logger.info(
2486 'Leap seconds default URL is now: %s\nUsing new default.' % url)
2488 for i in range(3):
2489 try:
2490 return parse_leap_seconds_list(fn)
2492 except LeapSecondsOutdated:
2493 try:
2494 logger.info('updating leap seconds list...')
2495 download_file(url, fn)
2497 except Exception as e:
2498 raise LeapSecondsError(
2499 'cannot download leap seconds list from %s to %s (%s)'
2500 % (url, fn, e))
2502 raise LeapSecondsError('Could not retrieve/read leap seconds file.')
2505def gps_utc_offset(t_utc):
2506 '''
2507 Time offset t_gps - t_utc for a given t_utc.
2508 '''
2509 ls = read_leap_seconds2()
2510 i = 0
2511 if t_utc < ls[0][0]:
2512 return ls[0][1] - 1 - 9
2514 while i < len(ls) - 1:
2515 if ls[i][0] <= t_utc and t_utc < ls[i+1][0]:
2516 return ls[i][1] - 9
2517 i += 1
2519 return ls[-1][1] - 9
2522def utc_gps_offset(t_gps):
2523 '''
2524 Time offset t_utc - t_gps for a given t_gps.
2525 '''
2526 ls = read_leap_seconds2()
2528 if t_gps < ls[0][0] + ls[0][1] - 9:
2529 return - (ls[0][1] - 1 - 9)
2531 i = 0
2532 while i < len(ls) - 1:
2533 if ls[i][0] + ls[i][1] - 9 <= t_gps \
2534 and t_gps < ls[i+1][0] + ls[i+1][1] - 9:
2535 return - (ls[i][1] - 9)
2536 i += 1
2538 return - (ls[-1][1] - 9)
2541def make_iload_family(iload_fh, doc_fmt='FMT', doc_yielded_objects='FMT'):
2542 import itertools
2543 import glob
2544 from pyrocko.io.io_common import FileLoadError
2546 def iload_filename(filename, **kwargs):
2547 try:
2548 with open(filename, 'rb') as f:
2549 for cr in iload_fh(f, **kwargs):
2550 yield cr
2552 except FileLoadError as e:
2553 e.set_context('filename', filename)
2554 raise
2556 def iload_dirname(dirname, **kwargs):
2557 for entry in os.listdir(dirname):
2558 fpath = op.join(dirname, entry)
2559 if op.isfile(fpath):
2560 for cr in iload_filename(fpath, **kwargs):
2561 yield cr
2563 def iload_glob(pattern, **kwargs):
2565 for fn in glob.iglob(pattern):
2566 for cr in iload_filename(fn, **kwargs):
2567 yield cr
2569 def iload(source, **kwargs):
2570 if isinstance(source, str):
2571 if op.isdir(source):
2572 return iload_dirname(source, **kwargs)
2573 elif op.isfile(source):
2574 return iload_filename(source, **kwargs)
2575 else:
2576 return iload_glob(source, **kwargs)
2578 elif hasattr(source, 'read'):
2579 return iload_fh(source, **kwargs)
2580 else:
2581 return itertools.chain.from_iterable(
2582 iload(subsource, **kwargs) for subsource in source)
2584 iload_filename.__doc__ = '''
2585 Read %s information from named file.
2586 ''' % doc_fmt
2588 iload_dirname.__doc__ = '''
2589 Read %s information from directory of %s files.
2590 ''' % (doc_fmt, doc_fmt)
2592 iload_glob.__doc__ = '''
2593 Read %s information from files matching a glob pattern.
2594 ''' % doc_fmt
2596 iload.__doc__ = '''
2597 Load %s information from given source(s)
2599 The ``source`` can be specified as the name of a %s file, the name of a
2600 directory containing %s files, a glob pattern of %s files, an open
2601 filehandle or an iterator yielding any of the forementioned sources.
2603 This function behaves as a generator yielding %s objects.
2604 ''' % (doc_fmt, doc_fmt, doc_fmt, doc_fmt, doc_yielded_objects)
2606 for f in iload_filename, iload_dirname, iload_glob, iload:
2607 f.__module__ = iload_fh.__module__
2609 return iload_filename, iload_dirname, iload_glob, iload
2612class Inconsistency(Exception):
2613 pass
2616def consistency_check(list_of_tuples, message='values differ:'):
2617 '''
2618 Check for inconsistencies.
2620 Given a list of tuples, check that all tuple elements except for first one
2621 match. E.g. ``[('STA.N', 55.3, 103.2), ('STA.E', 55.3, 103.2)]`` would be
2622 valid because the coordinates at the two channels are the same.
2623 '''
2625 if len(list_of_tuples) >= 2:
2626 if any(t[1:] != list_of_tuples[0][1:] for t in list_of_tuples[1:]):
2627 raise Inconsistency('%s\n' % message + '\n'.join(
2628 ' %s: %s' % (t[0], ', '.join(str(x) for x in t[1:]))
2629 for t in list_of_tuples))
2632class defaultzerodict(dict):
2633 def __missing__(self, k):
2634 return 0
2637def mostfrequent(x):
2638 c = defaultzerodict()
2639 for e in x:
2640 c[e] += 1
2642 return sorted(list(c.keys()), key=lambda k: c[k])[-1]
2645def consistency_merge(list_of_tuples,
2646 message='values differ:',
2647 error='raise',
2648 merge=mostfrequent):
2650 assert error in ('raise', 'warn', 'ignore')
2652 if len(list_of_tuples) == 0:
2653 raise Exception('cannot merge empty sequence')
2655 try:
2656 consistency_check(list_of_tuples, message)
2657 return list_of_tuples[0][1:]
2658 except Inconsistency as e:
2659 if error == 'raise':
2660 raise
2662 elif error == 'warn':
2663 logger.warning(str(e))
2665 return tuple([merge(x) for x in list(zip(*list_of_tuples))[1:]])
2668def short_to_list(nmax, it):
2669 import itertools
2671 if isinstance(it, list):
2672 return it
2674 li = []
2675 for i in range(nmax+1):
2676 try:
2677 li.append(next(it))
2678 except StopIteration:
2679 return li
2681 return itertools.chain(li, it)
2684def parse_md(f):
2685 try:
2686 with open(op.join(
2687 op.dirname(op.abspath(f)),
2688 'README.md'), 'r') as readme:
2689 mdstr = readme.read()
2690 except IOError as e:
2691 return 'Failed to get README.md: %s' % e
2693 # Remve the title
2694 mdstr = re.sub(r'^# .*\n?', '', mdstr)
2695 # Append sphinx reference to `pyrocko.` modules
2696 mdstr = re.sub(r'`pyrocko\.(.*)`', r':py:mod:`pyrocko.\1`', mdstr)
2697 # Convert Subsections to toc-less rubrics
2698 mdstr = re.sub(r'## (.*)\n', r'.. rubric:: \1\n', mdstr)
2699 return mdstr
2702def mpl_show(plt):
2703 import matplotlib
2704 if matplotlib.get_backend().lower() == 'agg':
2705 logger.warning('Cannot show() when using matplotlib "agg" backend')
2706 else:
2707 plt.show()
2710g_re_qsplit = re.compile(
2711 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|(\S+)')
2712g_re_qsplit_sep = {}
2715def get_re_qsplit(sep):
2716 if sep is None:
2717 return g_re_qsplit
2718 else:
2719 if sep not in g_re_qsplit_sep:
2720 assert len(sep) == 1
2721 assert sep not in '\'"'
2722 esep = re.escape(sep)
2723 g_re_qsplit_sep[sep] = re.compile(
2724 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|'
2725 + r'([^' + esep + r']+|(?<=' + esep + r')(?=' + esep + r')|^(?=' + esep + r')|(?<=' + esep + r')$)') # noqa
2726 return g_re_qsplit_sep[sep]
2729g_re_trivial = re.compile(r'\A[^\'"\s]+\Z')
2730g_re_trivial_sep = {}
2733def get_re_trivial(sep):
2734 if sep is None:
2735 return g_re_trivial
2736 else:
2737 if sep not in g_re_qsplit_sep:
2738 assert len(sep) == 1
2739 assert sep not in '\'"'
2740 esep = re.escape(sep)
2741 g_re_trivial_sep[sep] = re.compile(r'\A[^\'"' + esep + r']+\Z')
2743 return g_re_trivial_sep[sep]
2746g_re_escape_s = re.compile(r'([\\\'])')
2747g_re_unescape_s = re.compile(r'\\([\\\'])')
2748g_re_escape_d = re.compile(r'([\\"])')
2749g_re_unescape_d = re.compile(r'\\([\\"])')
2752def escape_s(s):
2753 '''
2754 Backslash-escape single-quotes and backslashes.
2756 Example: ``Jack's`` => ``Jack\\'s``
2758 '''
2759 return g_re_escape_s.sub(r'\\\1', s)
2762def unescape_s(s):
2763 '''
2764 Unescape backslash-escaped single-quotes and backslashes.
2766 Example: ``Jack\\'s`` => ``Jack's``
2767 '''
2768 return g_re_unescape_s.sub(r'\1', s)
2771def escape_d(s):
2772 '''
2773 Backslash-escape double-quotes and backslashes.
2775 Example: ``"Hello \\O/"`` => ``\\"Hello \\\\O/\\"``
2776 '''
2777 return g_re_escape_d.sub(r'\\\1', s)
2780def unescape_d(s):
2781 '''
2782 Unescape backslash-escaped double-quotes and backslashes.
2784 Example: ``\\"Hello \\\\O/\\"`` => ``"Hello \\O/"``
2785 '''
2786 return g_re_unescape_d.sub(r'\1', s)
2789def qjoin_s(it, sep=None):
2790 '''
2791 Join sequence of strings into a line, single-quoting non-trivial strings.
2793 Example: ``["55", "Sparrow's Island"]`` => ``"55 'Sparrow\\\\'s Island'"``
2794 '''
2795 re_trivial = get_re_trivial(sep)
2797 if sep is None:
2798 sep = ' '
2800 return sep.join(
2801 w if re_trivial.search(w) else "'%s'" % escape_s(w) for w in it)
2804def qjoin_d(it, sep=None):
2805 '''
2806 Join sequence of strings into a line, double-quoting non-trivial strings.
2808 Example: ``['55', 'Pete "The Robot" Smith']`` =>
2809 ``'55' "Pete \\\\"The Robot\\\\" Smith"'``
2810 '''
2811 re_trivial = get_re_trivial(sep)
2812 if sep is None:
2813 sep = ' '
2815 return sep.join(
2816 w if re_trivial.search(w) else '"%s"' % escape_d(w) for w in it)
2819def qsplit(s, sep=None):
2820 '''
2821 Split line into list of strings, allowing for quoted strings.
2823 Example: ``"55 'Sparrow\\\\'s Island'"`` =>
2824 ``["55", "Sparrow's Island"]``,
2825 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` =>
2826 ``['55', 'Pete "The Robot" Smith']``
2827 '''
2828 re_qsplit = get_re_qsplit(sep)
2829 return [
2830 (unescape_d(x[0]) or unescape_s(x[1]) or x[2])
2831 for x in re_qsplit.findall(s)]
2834g_have_warned_threadpoolctl = False
2837class threadpool_limits_dummy(object):
2839 def __init__(self, *args, **kwargs):
2840 pass
2842 def __enter__(self):
2843 global g_have_warned_threadpoolctl
2845 if not g_have_warned_threadpoolctl:
2846 logger.warning(
2847 'Cannot control number of BLAS threads because '
2848 '`threadpoolctl` module is not available. You may want to '
2849 'install `threadpoolctl`.')
2851 g_have_warned_threadpoolctl = True
2853 return self
2855 def __exit__(self, type, value, traceback):
2856 pass
2859def get_threadpool_limits():
2860 '''
2861 Try to import threadpoolctl.threadpool_limits, provide dummy if not avail.
2862 '''
2864 try:
2865 from threadpoolctl import threadpool_limits
2866 return threadpool_limits
2868 except ImportError:
2869 return threadpool_limits_dummy