1# http://pyrocko.org - GPLv3
2#
3# The Pyrocko Developers, 21st Century
4# ---|P------/S----------~Lg----------
5'''
6Utility functions for Pyrocko.
8.. _time-handling-mode:
10High precision time handling mode
11.................................
13Pyrocko can treat timestamps either as standard double precision (64 bit)
14floating point values, or as high precision float (:py:class:`numpy.float128`
15or :py:class:`numpy.float96`, whichever is available), aliased here as
16:py:class:`pyrocko.util.hpfloat`. High precision time stamps are required when
17handling data with sub-millisecond precision, i.e. kHz/MHz data streams and
18event catalogs derived from such data.
20Not all functions in Pyrocko and in programs depending on Pyrocko may work
21correctly with high precision times. Therefore, Pyrocko's high precision time
22handling mode has to be actively activated by user config, command line option
23or enforced within a certain script/program.
25The default high precision time handling mode can be configured globally with
26the user config variable
27:py:gattr:`~pyrocko.config.Config.use_high_precision_time`. Calling the
28function :py:func:`use_high_precision_time` overrides the default from the
29config file. This function may be called at startup of a program/script
30requiring a specific time handling mode.
32To create a valid time stamp for use in Pyrocko (e.g. in
33:py:class:`~pyrocko.model.Event` or :py:class:`~pyrocko.trace.Trace` objects),
34use:
36.. code-block :: python
38 import time
39 from pyrocko import util
41 # By default using mode selected in user config, override with:
42 # util.use_high_precision_time(True) # force high precision mode
43 # util.use_high_precision_time(False) # force low precision mode
45 t1 = util.str_to_time('2020-08-27 10:22:00')
46 t2 = util.str_to_time('2020-08-27 10:22:00.111222')
47 t3 = util.to_time_float(time.time())
49 # To get the appropriate float class, use:
51 time_float = util.get_time_float()
52 # -> float, numpy.float128 or numpy.float96
53 [isinstance(t, time_float) for t in [t1, t2, t3]]
54 # -> [True, True, True]
56 # Shortcut:
57 util.check_time_class(t1)
59Module content
60..............
61'''
63import time
64import logging
65import os
66import sys
67import re
68import calendar
69import math
70import fnmatch
71try:
72 import fcntl
73except ImportError:
74 fcntl = None
75import optparse
76import os.path as op
77import errno
79import numpy as num
80from scipy import signal
81import pyrocko
82from pyrocko import dummy_progressbar
85from urllib.parse import urlencode, quote, unquote # noqa
86from urllib.request import Request, build_opener, HTTPDigestAuthHandler # noqa
87from urllib.request import urlopen as _urlopen # noqa
88from urllib.error import HTTPError, URLError # noqa
91try:
92 import certifi
93 import ssl
94 g_ssl_context = ssl.create_default_context(cafile=certifi.where())
95except ImportError:
96 g_ssl_context = None
99class URLErrorSSL(URLError):
101 def __init__(self, urlerror):
102 self.__dict__ = urlerror.__dict__.copy()
104 def __str__(self):
105 return (
106 'Requesting web resource failed and the problem could be '
107 'related to SSL. Python standard libraries on some older '
108 'systems (like Ubuntu 14.04) are known to have trouble '
109 'with some SSL setups of today\'s servers: %s'
110 % URLError.__str__(self))
113def urlopen_ssl_check(*args, **kwargs):
114 try:
115 return urlopen(*args, **kwargs)
116 except URLError as e:
117 if str(e).find('SSL') != -1:
118 raise URLErrorSSL(e)
119 else:
120 raise
123def urlopen(*args, **kwargs):
125 if 'context' not in kwargs and g_ssl_context is not None:
126 kwargs['context'] = g_ssl_context
128 return _urlopen(*args, **kwargs)
131try:
132 long
133except NameError:
134 long = int
137force_dummy_progressbar = False
140try:
141 from pyrocko import util_ext
142except ImportError:
143 util_ext = None
146logger = logging.getLogger('pyrocko.util')
149try:
150 num_full = num.full
151except AttributeError:
152 def num_full(shape, fill_value, dtype=None, order='C'):
153 a = num.empty(shape, dtype=dtype, order=order)
154 a.fill(fill_value)
155 return a
157try:
158 num_full_like = num.full_like
159except AttributeError:
160 def num_full_like(arr, fill_value, dtype=None, order='K', subok=True):
161 a = num.empty_like(arr, dtype=dtype, order=order, subok=subok)
162 a.fill(fill_value)
163 return a
166g_setup_logging_args = 'pyrocko', 'warning'
169def setup_logging(programname='pyrocko', levelname='warning'):
170 '''
171 Initialize logging.
173 :param programname: program name to be written in log
174 :param levelname: string indicating the logging level ('debug', 'info',
175 'warning', 'error', 'critical')
177 This function is called at startup by most pyrocko programs to set up a
178 consistent logging format. This is simply a shortcut to a call to
179 :py:func:`logging.basicConfig()`.
180 '''
182 global g_setup_logging_args
183 g_setup_logging_args = (programname, levelname)
185 levels = {'debug': logging.DEBUG,
186 'info': logging.INFO,
187 'warning': logging.WARNING,
188 'error': logging.ERROR,
189 'critical': logging.CRITICAL}
191 logging.basicConfig(
192 level=levels[levelname],
193 format=programname+':%(name)-25s - %(levelname)-8s - %(message)s')
196def subprocess_setup_logging_args():
197 '''
198 Get arguments from previous call to setup_logging.
200 These can be sent down to a worker process so it can setup its logging
201 in the same way as the main process.
202 '''
203 return g_setup_logging_args
206def data_file(fn):
207 return os.path.join(os.path.split(__file__)[0], 'data', fn)
210class DownloadError(Exception):
211 pass
214class PathExists(DownloadError):
215 pass
218def _download(url, fpath, username=None, password=None,
219 force=False, method='download', stats=None,
220 status_callback=None, entries_wanted=None,
221 recursive=False, header=None):
223 import requests
224 from requests.auth import HTTPBasicAuth
225 from requests.exceptions import HTTPError as req_HTTPError
227 requests.adapters.DEFAULT_RETRIES = 5
228 urljoin = requests.compat.urljoin
230 session = requests.Session()
231 session.header = header
232 session.auth = None if username is None\
233 else HTTPBasicAuth(username, password)
235 status = {
236 'ntotal_files': 0,
237 'nread_files': 0,
238 'ntotal_bytes_all_files': 0,
239 'nread_bytes_all_files': 0,
240 'ntotal_bytes_current_file': 0,
241 'nread_bytes_current_file': 0,
242 'finished': False
243 }
245 try:
246 url_to_size = {}
248 if callable(status_callback):
249 status_callback(status)
251 if not recursive and url.endswith('/'):
252 raise DownloadError(
253 'URL: %s appears to be a directory'
254 ' but recurvise download is False' % url)
256 if recursive and not url.endswith('/'):
257 url += '/'
259 def parse_directory_tree(url, subdir=''):
260 r = session.get(urljoin(url, subdir))
261 r.raise_for_status()
263 entries = re.findall(r'href="([a-zA-Z0-9_.-]+/?)"', r.text)
265 files = sorted(set(subdir + fn for fn in entries
266 if not fn.endswith('/')))
268 if entries_wanted is not None:
269 files = [fn for fn in files
270 if (fn in wanted for wanted in entries_wanted)]
272 status['ntotal_files'] += len(files)
274 dirs = sorted(set(subdir + dn for dn in entries
275 if dn.endswith('/')
276 and dn not in ('./', '../')))
278 for dn in dirs:
279 files.extend(parse_directory_tree(
280 url, subdir=dn))
282 return files
284 def get_content_length(url):
285 if url not in url_to_size:
286 r = session.head(url, headers={'Accept-Encoding': ''})
288 content_length = r.headers.get('content-length', None)
289 if content_length is None:
290 logger.debug('Could not get HTTP header '
291 'Content-Length for %s' % url)
293 content_length = None
295 else:
296 content_length = int(content_length)
297 status['ntotal_bytes_all_files'] += content_length
298 if callable(status_callback):
299 status_callback(status)
301 url_to_size[url] = content_length
303 return url_to_size[url]
305 def download_file(url, fn):
306 logger.info('starting download of %s...' % url)
307 ensuredirs(fn)
309 fsize = get_content_length(url)
310 status['ntotal_bytes_current_file'] = fsize
311 status['nread_bytes_current_file'] = 0
312 if callable(status_callback):
313 status_callback(status)
315 r = session.get(url, stream=True, timeout=5)
316 r.raise_for_status()
318 frx = 0
319 fn_tmp = fn + '.%i.temp' % os.getpid()
320 with open(fn_tmp, 'wb') as f:
321 for d in r.iter_content(chunk_size=1024):
322 f.write(d)
323 frx += len(d)
325 status['nread_bytes_all_files'] += len(d)
326 status['nread_bytes_current_file'] += len(d)
327 if callable(status_callback):
328 status_callback(status)
330 os.rename(fn_tmp, fn)
332 if fsize is not None and frx != fsize:
333 logger.warning(
334 'HTTP header Content-Length: %i bytes does not match '
335 'download size %i bytes' % (fsize, frx))
337 logger.info('finished download of %s' % url)
339 status['nread_files'] += 1
340 if callable(status_callback):
341 status_callback(status)
343 if recursive:
344 if op.exists(fpath) and not force:
345 raise PathExists('path %s already exists' % fpath)
347 files = parse_directory_tree(url)
349 dsize = 0
350 for fn in files:
351 file_url = urljoin(url, fn)
352 dsize += get_content_length(file_url) or 0
354 if method == 'calcsize':
355 return dsize
357 else:
358 for fn in files:
359 file_url = urljoin(url, fn)
360 download_file(file_url, op.join(fpath, fn))
362 else:
363 status['ntotal_files'] += 1
364 if callable(status_callback):
365 status_callback(status)
367 fsize = get_content_length(url)
368 if method == 'calcsize':
369 return fsize
370 else:
371 download_file(url, fpath)
373 except req_HTTPError as e:
374 logging.warning("http error: %s" % e)
375 raise DownloadError('could not download file(s) from: %s' % url)
377 finally:
378 status['finished'] = True
379 if callable(status_callback):
380 status_callback(status)
381 session.close()
384def download_file(
385 url, fpath, username=None, password=None, status_callback=None,
386 **kwargs):
387 return _download(
388 url, fpath, username, password,
389 recursive=False,
390 status_callback=status_callback,
391 **kwargs)
394def download_dir(
395 url, fpath, username=None, password=None, status_callback=None,
396 **kwargs):
398 return _download(
399 url, fpath, username, password,
400 recursive=True,
401 status_callback=status_callback,
402 **kwargs)
405class HPFloatUnavailable(Exception):
406 pass
409class dummy_hpfloat(object):
410 def __init__(self, *args, **kwargs):
411 raise HPFloatUnavailable(
412 'NumPy lacks support for float128 or float96 data type on this '
413 'platform.')
416if hasattr(num, 'float128'):
417 hpfloat = num.float128
419elif hasattr(num, 'float96'):
420 hpfloat = num.float96
422else:
423 hpfloat = dummy_hpfloat
426g_time_float = None
427g_time_dtype = None
430class TimeFloatSettingError(Exception):
431 pass
434def use_high_precision_time(enabled):
435 '''
436 Globally force a specific time handling mode.
438 See :ref:`High precision time handling mode <time-handling-mode>`.
440 :param enabled: enable/disable use of high precision time type
441 :type enabled: bool
443 This function should be called before handling/reading any time data.
444 It can only be called once.
446 Special attention is required when using multiprocessing on a platform
447 which does not use fork under the hood. In such cases, the desired setting
448 must be set also in the subprocess.
449 '''
450 _setup_high_precision_time_mode(enabled_app=enabled)
453def _setup_high_precision_time_mode(enabled_app=False):
454 global g_time_float
455 global g_time_dtype
457 if not (g_time_float is None and g_time_dtype is None):
458 raise TimeFloatSettingError(
459 'Cannot set time handling mode: too late, it has already been '
460 'fixed by an earlier call.')
462 from pyrocko import config
464 conf = config.config()
465 enabled_config = conf.use_high_precision_time
467 enabled_env = os.environ.get('PYROCKO_USE_HIGH_PRECISION_TIME', None)
468 if enabled_env is not None:
469 try:
470 enabled_env = int(enabled_env) == 1
471 except ValueError:
472 raise TimeFloatSettingError(
473 'Environment variable PYROCKO_USE_HIGH_PRECISION_TIME '
474 'should be set to 0 or 1.')
476 enabled = enabled_config
477 mode_from = 'config variable `use_high_precision_time`'
478 notify = enabled
480 if enabled_env is not None:
481 if enabled_env != enabled:
482 notify = True
483 enabled = enabled_env
484 mode_from = 'environment variable `PYROCKO_USE_HIGH_PRECISION_TIME`'
486 if enabled_app is not None:
487 if enabled_app != enabled:
488 notify = True
489 enabled = enabled_app
490 mode_from = 'application override'
492 logger.debug('''
493Pyrocko high precision time mode selection (latter override earlier):
494 config: %s
495 env: %s
496 app: %s
497 -> enabled: %s'''.lstrip() % (
498 enabled_config, enabled_env, enabled_app, enabled))
500 if notify:
501 logger.info('Pyrocko high precision time mode %s by %s.' % (
502 'activated' if enabled else 'deactivated',
503 mode_from))
505 if enabled:
506 g_time_float = hpfloat
507 g_time_dtype = hpfloat
508 else:
509 g_time_float = float
510 g_time_dtype = num.float64
513def get_time_float():
514 '''
515 Get the effective float class for timestamps.
517 See :ref:`High precision time handling mode <time-handling-mode>`.
519 :returns: :py:class:`float` or :py:class:`hpfloat`, depending on the
520 current time handling mode
521 '''
522 global g_time_float
524 if g_time_float is None:
525 _setup_high_precision_time_mode()
527 return g_time_float
530def get_time_dtype():
531 '''
532 Get effective NumPy float class to handle timestamps.
534 See :ref:`High precision time handling mode <time-handling-mode>`.
535 '''
537 global g_time_dtype
539 if g_time_dtype is None:
540 _setup_high_precision_time_mode()
542 return g_time_dtype
545def to_time_float(t):
546 '''
547 Convert float to valid time stamp in the current time handling mode.
549 See :ref:`High precision time handling mode <time-handling-mode>`.
550 '''
551 return get_time_float()(t)
554class TimestampTypeError(ValueError):
555 pass
558def check_time_class(t, error='raise'):
559 '''
560 Type-check variable against current time handling mode.
562 See :ref:`High precision time handling mode <time-handling-mode>`.
563 '''
565 if t == 0.0:
566 return
568 if not isinstance(t, get_time_float()):
569 message = (
570 'Timestamp %g is of type %s but should be of type %s with '
571 'Pyrocko\'s currently selected time handling mode.\n\n'
572 'See https://pyrocko.org/docs/current/library/reference/util.html'
573 '#high-precision-time-handling-mode' % (
574 t, type(t), get_time_float()))
576 if error == 'raise':
577 raise TimestampTypeError(message)
578 elif error == 'warn':
579 logger.warning(message)
580 else:
581 assert False
584class Stopwatch(object):
585 '''
586 Simple stopwatch to measure elapsed wall clock time.
588 Usage::
590 s = Stopwatch()
591 time.sleep(1)
592 print s()
593 time.sleep(1)
594 print s()
595 '''
597 def __init__(self):
598 self.start = time.time()
600 def __call__(self):
601 return time.time() - self.start
604def wrap(text, line_length=80):
605 '''
606 Paragraph and list-aware wrapping of text.
607 '''
609 text = text.strip('\n')
610 at_lineend = re.compile(r' *\n')
611 at_para = re.compile(r'((^|(\n\s*)?\n)(\s+[*] )|\n\s*\n)')
613 paragraphs = at_para.split(text)[::5]
614 listindents = at_para.split(text)[4::5]
615 newlist = at_para.split(text)[3::5]
617 listindents[0:0] = [None]
618 listindents.append(True)
619 newlist.append(None)
621 det_indent = re.compile(r'^ *')
623 outlines = []
624 for ip, p in enumerate(paragraphs):
625 if not p:
626 continue
628 if listindents[ip] is None:
629 _indent = det_indent.findall(p)[0]
630 findent = _indent
631 else:
632 findent = listindents[ip]
633 _indent = ' ' * len(findent)
635 ll = line_length - len(_indent)
636 llf = ll
638 oldlines = [s.strip() for s in at_lineend.split(p.rstrip())]
639 p1 = ' '.join(oldlines)
640 possible = re.compile(r'(^.{1,%i}|.{1,%i})( |$)' % (llf, ll))
641 for imatch, match in enumerate(possible.finditer(p1)):
642 parout = match.group(1)
643 if imatch == 0:
644 outlines.append(findent + parout)
645 else:
646 outlines.append(_indent + parout)
648 if ip != len(paragraphs)-1 and (
649 listindents[ip] is None
650 or newlist[ip] is not None
651 or listindents[ip+1] is None):
653 outlines.append('')
655 return outlines
658def ewrap(lines, width=80, indent=''):
659 lines = list(lines)
660 if not lines:
661 return ''
662 fwidth = max(len(s) for s in lines)
663 nx = max(1, (80-len(indent)) // (fwidth+1))
664 i = 0
665 rows = []
666 while i < len(lines):
667 rows.append(indent + ' '.join(x.ljust(fwidth) for x in lines[i:i+nx]))
668 i += nx
670 return '\n'.join(rows)
673class BetterHelpFormatter(optparse.IndentedHelpFormatter):
675 def __init__(self, *args, **kwargs):
676 optparse.IndentedHelpFormatter.__init__(self, *args, **kwargs)
678 def format_option(self, option):
679 '''
680 From IndentedHelpFormatter but using a different wrap method.
681 '''
683 help_text_position = 4 + self.current_indent
684 help_text_width = self.width - help_text_position
686 opts = self.option_strings[option]
687 opts = "%*s%s" % (self.current_indent, "", opts)
688 if option.help:
689 help_text = self.expand_default(option)
691 if self.help_position + len(help_text) + 1 <= self.width:
692 lines = [
693 '',
694 '%-*s %s' % (self.help_position, opts, help_text),
695 '']
696 else:
697 lines = ['']
698 lines.append(opts)
699 lines.append('')
700 if option.help:
701 help_lines = wrap(help_text, help_text_width)
702 lines.extend(["%*s%s" % (help_text_position, "", line)
703 for line in help_lines])
704 lines.append('')
706 return "\n".join(lines)
708 def format_description(self, description):
709 if not description:
710 return ''
712 if self.current_indent == 0:
713 lines = []
714 else:
715 lines = ['']
717 lines.extend(wrap(description, self.width - self.current_indent))
718 if self.current_indent == 0:
719 lines.append('\n')
721 return '\n'.join(
722 ['%*s%s' % (self.current_indent, '', line) for line in lines])
725class ProgressBar:
726 def __init__(self, label, n):
727 from pyrocko.progress import progress
728 self._context = progress.view()
729 self._context.__enter__()
730 self._task = progress.task(label, n)
732 def update(self, i):
733 self._task.update(i)
735 def finish(self):
736 self._task.done()
737 if self._context:
738 self._context.__exit__()
739 self._context = None
742def progressbar(label, maxval):
743 if force_dummy_progressbar:
744 return dummy_progressbar.ProgressBar(maxval=maxval).start()
746 return ProgressBar(label, maxval)
749def progress_beg(label):
750 '''
751 Notify user that an operation has started.
753 :param label: name of the operation
755 To be used in conjuction with :py:func:`progress_end`.
756 '''
758 sys.stderr.write(label)
759 sys.stderr.flush()
762def progress_end(label=''):
763 '''
764 Notify user that an operation has ended.
766 :param label: name of the operation
768 To be used in conjuction with :py:func:`progress_beg`.
769 '''
771 sys.stderr.write(' done. %s\n' % label)
772 sys.stderr.flush()
775class ArangeError(Exception):
776 pass
779def arange2(start, stop, step, dtype=float, epsilon=1e-6, error='raise'):
780 '''
781 Return evenly spaced numbers over a specified interval.
783 Like :py:func:`numpy.arange` but returning floating point numbers by
784 default and with defined behaviour when stepsize is inconsistent with
785 interval bounds. It is considered inconsistent if the difference between
786 the closest multiple of ``step`` and ``stop`` is larger than ``epsilon *
787 step``. Inconsistencies are handled according to the ``error`` parameter.
788 If it is set to ``'raise'`` an exception of type :py:exc:`ArangeError` is
789 raised. If it is set to ``'round'``, ``'floor'``, or ``'ceil'``, ``stop``
790 is silently changed to the closest, the next smaller, or next larger
791 multiple of ``step``, respectively.
792 '''
794 assert error in ('raise', 'round', 'floor', 'ceil')
796 start = dtype(start)
797 stop = dtype(stop)
798 step = dtype(step)
800 rnd = {'floor': math.floor, 'ceil': math.ceil}.get(error, round)
802 n = int(rnd((stop - start) / step)) + 1
803 stop_check = start + (n-1) * step
805 if error == 'raise' and abs(stop_check - stop) > step * epsilon:
806 raise ArangeError(
807 'inconsistent range specification: start=%g, stop=%g, step=%g'
808 % (start, stop, step))
810 x = num.arange(n, dtype=dtype)
811 x *= step
812 x += start
813 return x
816def polylinefit(x, y, n_or_xnodes):
817 '''
818 Fit piece-wise linear function to data.
820 :param x,y: arrays with coordinates of data
821 :param n_or_xnodes: int, number of segments or x coordinates of polyline
823 :returns: `(xnodes, ynodes, rms_error)` arrays with coordinates of
824 polyline, root-mean-square error
825 '''
827 x = num.asarray(x)
828 y = num.asarray(y)
830 if isinstance(n_or_xnodes, int):
831 n = n_or_xnodes
832 xmin = x.min()
833 xmax = x.max()
834 xnodes = num.linspace(xmin, xmax, n+1)
835 else:
836 xnodes = num.asarray(n_or_xnodes)
837 n = xnodes.size - 1
839 assert len(x) == len(y)
840 assert n > 0
842 ndata = len(x)
843 a = num.zeros((ndata+(n-1), n*2))
844 for i in range(n):
845 xmin_block = xnodes[i]
846 xmax_block = xnodes[i+1]
847 if i == n-1: # don't loose last point
848 indices = num.where(
849 num.logical_and(xmin_block <= x, x <= xmax_block))[0]
850 else:
851 indices = num.where(
852 num.logical_and(xmin_block <= x, x < xmax_block))[0]
854 a[indices, i*2] = x[indices]
855 a[indices, i*2+1] = 1.0
857 w = float(ndata)*100.
858 if i < n-1:
859 a[ndata+i, i*2] = xmax_block*w
860 a[ndata+i, i*2+1] = 1.0*w
861 a[ndata+i, i*2+2] = -xmax_block*w
862 a[ndata+i, i*2+3] = -1.0*w
864 d = num.concatenate((y, num.zeros(n-1)))
865 model = num.linalg.lstsq(a, d, rcond=-1)[0].reshape((n, 2))
867 ynodes = num.zeros(n+1)
868 ynodes[:n] = model[:, 0]*xnodes[:n] + model[:, 1]
869 ynodes[1:] += model[:, 0]*xnodes[1:] + model[:, 1]
870 ynodes[1:n] *= 0.5
872 rms_error = num.sqrt(num.mean((num.interp(x, xnodes, ynodes) - y)**2))
874 return xnodes, ynodes, rms_error
877def plf_integrate_piecewise(x_edges, x, y):
878 '''
879 Calculate definite integral of piece-wise linear function on intervals.
881 Use trapezoidal rule to calculate definite integral of a piece-wise linear
882 function for a series of consecutive intervals. ``x_edges`` and ``x`` must
883 be sorted.
885 :param x_edges: array with edges of the intervals
886 :param x,y: arrays with coordinates of piece-wise linear function's
887 control points
888 '''
890 x_all = num.concatenate((x, x_edges))
891 ii = num.argsort(x_all)
892 y_edges = num.interp(x_edges, x, y)
893 y_all = num.concatenate((y, y_edges))
894 xs = x_all[ii]
895 ys = y_all[ii]
896 y_all[ii[1:]] = num.cumsum((xs[1:] - xs[:-1]) * 0.5 * (ys[1:] + ys[:-1]))
897 return num.diff(y_all[-len(y_edges):])
900def diff_fd_1d_4o(dt, data):
901 '''
902 Approximate first derivative of an array (forth order, central FD).
904 :param dt: sampling interval
905 :param data: NumPy array with data samples
907 :returns: NumPy array with same shape as input
909 Interior points are approximated to fourth order, edge points to first
910 order right- or left-sided respectively, points next to edge to second
911 order central.
912 '''
913 import scipy.signal
915 ddata = num.empty_like(data, dtype=float)
917 if data.size >= 5:
918 ddata[2:-2] = scipy.signal.lfilter(
919 [-1., +8., 0., -8., 1.], [1.], data)[4:] / (12.*dt)
921 if data.size >= 3:
922 ddata[1] = (data[2] - data[0]) / (2. * dt)
923 ddata[-2] = (data[-1] - data[-3]) / (2. * dt)
925 if data.size >= 2:
926 ddata[0] = (data[1] - data[0]) / dt
927 ddata[-1] = (data[-1] - data[-2]) / dt
929 if data.size == 1:
930 ddata[0] = 0.0
932 return ddata
935def diff_fd_1d_2o(dt, data):
936 '''
937 Approximate first derivative of an array (second order, central FD).
939 :param dt: sampling interval
940 :param data: NumPy array with data samples
942 :returns: NumPy array with same shape as input
944 Interior points are approximated to second order, edge points to first
945 order right- or left-sided respectively.
947 Uses :py:func:`numpy.gradient`.
948 '''
950 return num.gradient(data, dt)
953def diff_fd_2d_4o(dt, data):
954 '''
955 Approximate second derivative of an array (forth order, central FD).
957 :param dt: sampling interval
958 :param data: NumPy array with data samples
960 :returns: NumPy array with same shape as input
962 Interior points are approximated to fourth order, next-to-edge points to
963 second order, edge points repeated.
964 '''
965 import scipy.signal
967 ddata = num.empty_like(data, dtype=float)
969 if data.size >= 5:
970 ddata[2:-2] = scipy.signal.lfilter(
971 [-1., +16., -30., +16., -1.], [1.], data)[4:] / (12.*dt**2)
973 if data.size >= 3:
974 ddata[:2] = (data[2] - 2.0 * data[1] + data[0]) / dt**2
975 ddata[-2:] = (data[-1] - 2.0 * data[-2] + data[-3]) / dt**2
977 if data.size < 3:
978 ddata[:] = 0.0
980 return ddata
983def diff_fd_2d_2o(dt, data):
984 '''
985 Approximate second derivative of an array (second order, central FD).
987 :param dt: sampling interval
988 :param data: NumPy array with data samples
990 :returns: NumPy array with same shape as input
992 Interior points are approximated to second order, edge points repeated.
993 '''
994 import scipy.signal
996 ddata = num.empty_like(data, dtype=float)
998 if data.size >= 3:
999 ddata[1:-1] = scipy.signal.lfilter(
1000 [1., -2., 1.], [1.], data)[2:] / (dt**2)
1002 ddata[0] = ddata[1]
1003 ddata[-1] = ddata[-2]
1005 if data.size < 3:
1006 ddata[:] = 0.0
1008 return ddata
1011def diff_fd(n, order, dt, data):
1012 '''
1013 Approximate 1st or 2nd derivative of an array.
1015 :param n: 1 for first derivative, 2 for second
1016 :param order: order of the approximation 2 and 4 are supported
1017 :param dt: sampling interval
1018 :param data: NumPy array with data samples
1020 :returns: NumPy array with same shape as input
1022 This is a frontend to the functions :py:func:`diff_fd_1d_2o`,
1023 :py:func:`diff_fd_1d_4o`, :py:func:`diff_fd_2d_2o`, and
1024 :py:func:`diff_fd_2d_4o`.
1026 Raises :py:exc:`ValueError` for unsupported `n` or `order`.
1027 '''
1029 funcs = {
1030 1: {2: diff_fd_1d_2o, 4: diff_fd_1d_4o},
1031 2: {2: diff_fd_2d_2o, 4: diff_fd_2d_4o}}
1033 try:
1034 funcs_n = funcs[n]
1035 except KeyError:
1036 raise ValueError(
1037 'pyrocko.util.diff_fd: '
1038 'Only 1st and 2sd derivatives are supported.')
1040 try:
1041 func = funcs_n[order]
1042 except KeyError:
1043 raise ValueError(
1044 'pyrocko.util.diff_fd: '
1045 'Order %i is not supported for %s derivative. Supported: %s' % (
1046 order, ['', '1st', '2nd'][n],
1047 ', '.join('%i' % order for order in sorted(funcs_n.keys()))))
1049 return func(dt, data)
1052class GlobalVars(object):
1053 reuse_store = dict()
1054 decitab_nmax = 0
1055 decitab = {}
1056 decimate_fir_coeffs = {}
1057 decimate_fir_remez_coeffs = {}
1058 decimate_iir_coeffs = {}
1059 re_frac = None
1062def decimate_coeffs(q, n=None, ftype='iir'):
1064 q = int(q)
1066 if n is None:
1067 if ftype == 'fir':
1068 n = 30
1069 elif ftype == 'fir-remez':
1070 n = 45*q
1071 else:
1072 n = 8
1074 if ftype == 'fir':
1075 coeffs = GlobalVars.decimate_fir_coeffs
1076 if (n, 1./q) not in coeffs:
1077 coeffs[n, 1./q] = signal.firwin(n+1, .75/q, window='hamming')
1079 b = coeffs[n, 1./q]
1080 return b, [1.], n
1082 elif ftype == 'fir-remez':
1083 coeffs = GlobalVars.decimate_fir_remez_coeffs
1084 if (n, 1./q) not in coeffs:
1085 coeffs[n, 1./q] = signal.remez(
1086 n+1, (0., .75/q, 1./q, 1.),
1087 (1., 0.), fs=2, weight=(1, 50))
1088 b = coeffs[n, 1./q]
1089 return b, [1.], n
1091 else:
1092 coeffs = GlobalVars.decimate_iir_coeffs
1093 if (n, 0.05, 0.8/q) not in coeffs:
1094 coeffs[n, 0.05, 0.8/q] = signal.cheby1(n, 0.05, 0.8/q)
1096 b, a = coeffs[n, 0.05, 0.8/q]
1097 return b, a, n
1100def decimate(x, q, n=None, ftype='iir', zi=None, ioff=0):
1101 '''
1102 Downsample the signal x by an integer factor q, using an order n filter
1104 By default, an order 8 Chebyshev type I filter is used or a 30 point FIR
1105 filter with hamming window if ftype is 'fir'.
1107 :param x: the signal to be downsampled (1D :class:`numpy.ndarray`)
1108 :param q: the downsampling factor
1109 :param n: order of the filter (1 less than the length of the filter for a
1110 `fir` filter)
1111 :param ftype: type of the filter; can be `iir`, `fir` or `fir-remez`
1113 :returns: the downsampled signal (1D :class:`numpy.ndarray`)
1115 '''
1117 b, a, n = decimate_coeffs(q, n, ftype)
1119 if zi is None or zi is True:
1120 zi_ = num.zeros(max(len(a), len(b))-1, dtype=float)
1121 else:
1122 zi_ = zi
1124 y, zf = signal.lfilter(b, a, x, zi=zi_)
1126 if zi is not None:
1127 return y[n//2+ioff::q].copy(), zf
1128 else:
1129 return y[n//2+ioff::q].copy()
1132class UnavailableDecimation(Exception):
1133 '''
1134 Exception raised by :py:func:`decitab` for unavailable decimation factors.
1135 '''
1137 pass
1140def gcd(a, b, epsilon=1e-7):
1141 '''
1142 Greatest common divisor.
1143 '''
1145 while b > epsilon*a:
1146 a, b = b, a % b
1148 return a
1151def lcm(a, b):
1152 '''
1153 Least common multiple.
1154 '''
1156 return a*b // gcd(a, b)
1159def mk_decitab(nmax=100):
1160 '''
1161 Make table with decimation sequences.
1163 Decimation from one sampling rate to a lower one is achieved by a
1164 successive application of :py:func:`decimation` with small integer
1165 downsampling factors (because using large downampling factors can make the
1166 decimation unstable or slow). This function sets up a table with downsample
1167 sequences for factors up to ``nmax``.
1168 '''
1170 tab = GlobalVars.decitab
1171 for i in range(1, 10):
1172 for j in range(1, i+1):
1173 for k in range(1, j+1):
1174 for l_ in range(1, k+1):
1175 for m in range(1, l_+1):
1176 p = i*j*k*l_*m
1177 if p > nmax:
1178 break
1179 if p not in tab:
1180 tab[p] = (i, j, k, l_, m)
1181 if i*j*k*l_ > nmax:
1182 break
1183 if i*j*k > nmax:
1184 break
1185 if i*j > nmax:
1186 break
1187 if i > nmax:
1188 break
1190 GlobalVars.decitab_nmax = nmax
1193def zfmt(n):
1194 return '%%0%ii' % (int(math.log10(n - 1)) + 1)
1197def _year_to_time(year):
1198 tt = (year, 1, 1, 0, 0, 0)
1199 return to_time_float(calendar.timegm(tt))
1202def _working_year(year):
1203 try:
1204 tt = (year, 1, 1, 0, 0, 0)
1205 t = calendar.timegm(tt)
1206 tt2_ = time.gmtime(t)
1207 tt2 = tuple(tt2_)[:6]
1208 if tt != tt2:
1209 return False
1211 s = '%i-01-01 00:00:00' % year
1212 s2 = time.strftime('%Y-%m-%d %H:%M:%S', tt2_)
1213 if s != s2:
1214 return False
1216 t3 = str_to_time(s2, format='%Y-%m-%d %H:%M:%S')
1217 s3 = time_to_str(t3, format='%Y-%m-%d %H:%M:%S')
1218 if s3 != s2:
1219 return False
1221 except Exception:
1222 return False
1224 return True
1227def working_system_time_range(year_min_lim=None, year_max_lim=None):
1228 '''
1229 Check time range supported by the systems's time conversion functions.
1231 Returns system time stamps of start of year of first/last fully supported
1232 year span. If this is before 1900 or after 2100, return first/last century
1233 which is fully supported.
1235 :returns: ``(tmin, tmax, year_min, year_max)``
1236 '''
1238 year0 = 2000
1239 year_min = year0
1240 year_max = year0
1242 itests = list(range(101))
1243 for i in range(19):
1244 itests.append(200 + i*100)
1246 for i in itests:
1247 year = year0 - i
1248 if year_min_lim is not None and year < year_min_lim:
1249 year_min = year_min_lim
1250 break
1251 elif not _working_year(year):
1252 break
1253 else:
1254 year_min = year
1256 for i in itests:
1257 year = year0 + i
1258 if year_max_lim is not None and year > year_max_lim:
1259 year_max = year_max_lim
1260 break
1261 elif not _working_year(year + 1):
1262 break
1263 else:
1264 year_max = year
1266 return (
1267 _year_to_time(year_min),
1268 _year_to_time(year_max),
1269 year_min, year_max)
1272g_working_system_time_range = None
1275def get_working_system_time_range():
1276 '''
1277 Caching variant of :py:func:`working_system_time_range`.
1278 '''
1280 global g_working_system_time_range
1281 if g_working_system_time_range is None:
1282 g_working_system_time_range = working_system_time_range()
1284 return g_working_system_time_range
1287def is_working_time(t):
1288 tmin, tmax, _, _ = get_working_system_time_range()
1289 return tmin <= t <= tmax
1292def julian_day_of_year(timestamp):
1293 '''
1294 Get the day number after the 1st of January of year in ``timestamp``.
1296 :returns: day number as int
1297 '''
1299 return time.gmtime(int(timestamp)).tm_yday
1302def hour_start(timestamp):
1303 '''
1304 Get beginning of hour for any point in time.
1306 :param timestamp: time instant as system timestamp (in seconds)
1308 :returns: instant of hour start as system timestamp
1309 '''
1311 tt = time.gmtime(int(timestamp))
1312 tts = tt[0:4] + (0, 0)
1313 return to_time_float(calendar.timegm(tts))
1316def day_start(timestamp):
1317 '''
1318 Get beginning of day for any point in time.
1320 :param timestamp: time instant as system timestamp (in seconds)
1322 :returns: instant of day start as system timestamp
1323 '''
1325 tt = time.gmtime(int(timestamp))
1326 tts = tt[0:3] + (0, 0, 0)
1327 return to_time_float(calendar.timegm(tts))
1330def month_start(timestamp):
1331 '''
1332 Get beginning of month for any point in time.
1334 :param timestamp: time instant as system timestamp (in seconds)
1336 :returns: instant of month start as system timestamp
1337 '''
1339 tt = time.gmtime(int(timestamp))
1340 tts = tt[0:2] + (1, 0, 0, 0)
1341 return to_time_float(calendar.timegm(tts))
1344def year_start(timestamp):
1345 '''
1346 Get beginning of year for any point in time.
1348 :param timestamp: time instant as system timestamp (in seconds)
1350 :returns: instant of year start as system timestamp
1351 '''
1353 tt = time.gmtime(int(timestamp))
1354 tts = tt[0:1] + (1, 1, 0, 0, 0)
1355 return to_time_float(calendar.timegm(tts))
1358def iter_days(tmin, tmax):
1359 '''
1360 Yields begin and end of days until given time span is covered.
1362 :param tmin,tmax: input time span
1364 :yields: tuples with (begin, end) of days as system timestamps
1365 '''
1367 t = day_start(tmin)
1368 while t < tmax:
1369 tend = day_start(t + 26*60*60)
1370 yield t, tend
1371 t = tend
1374def iter_months(tmin, tmax):
1375 '''
1376 Yields begin and end of months until given time span is covered.
1378 :param tmin,tmax: input time span
1380 :yields: tuples with (begin, end) of months as system timestamps
1381 '''
1383 t = month_start(tmin)
1384 while t < tmax:
1385 tend = month_start(t + 24*60*60*33)
1386 yield t, tend
1387 t = tend
1390def iter_years(tmin, tmax):
1391 '''
1392 Yields begin and end of years until given time span is covered.
1394 :param tmin,tmax: input time span
1396 :yields: tuples with (begin, end) of years as system timestamps
1397 '''
1399 t = year_start(tmin)
1400 while t < tmax:
1401 tend = year_start(t + 24*60*60*369)
1402 yield t, tend
1403 t = tend
1406def today():
1407 return day_start(time.time())
1410def tomorrow():
1411 return day_start(time.time() + 24*60*60)
1414def decitab(n):
1415 '''
1416 Get integer decimation sequence for given downampling factor.
1418 :param n: target decimation factor
1420 :returns: tuple with downsampling sequence
1421 '''
1423 if n > GlobalVars.decitab_nmax:
1424 mk_decitab(n*2)
1425 if n not in GlobalVars.decitab:
1426 raise UnavailableDecimation('ratio = %g' % n)
1427 return GlobalVars.decitab[n]
1430def ctimegm(s, format="%Y-%m-%d %H:%M:%S"):
1431 '''
1432 Convert string representing UTC time to system time.
1434 :param s: string to be interpreted
1435 :param format: format string passed to :py:func:`strptime`
1437 :returns: system time stamp
1439 Interpretes string with format ``'%Y-%m-%d %H:%M:%S'``, using strptime.
1441 .. note::
1442 This function is to be replaced by :py:func:`str_to_time`.
1443 '''
1445 return calendar.timegm(time.strptime(s, format))
1448def gmctime(t, format="%Y-%m-%d %H:%M:%S"):
1449 '''
1450 Get string representation from system time, UTC.
1452 Produces string with format ``'%Y-%m-%d %H:%M:%S'``, using strftime.
1454 .. note::
1455 This function is to be repaced by :py:func:`time_to_str`.
1456 '''
1458 return time.strftime(format, time.gmtime(t))
1461def gmctime_v(t, format="%a, %d %b %Y %H:%M:%S"):
1462 '''
1463 Get string representation from system time, UTC. Same as
1464 :py:func:`gmctime` but with a more verbose default format.
1466 .. note::
1467 This function is to be replaced by :py:func:`time_to_str`.
1468 '''
1470 return time.strftime(format, time.gmtime(t))
1473def gmctime_fn(t, format="%Y-%m-%d_%H-%M-%S"):
1474 '''
1475 Get string representation from system time, UTC. Same as
1476 :py:func:`gmctime` but with a default usable in filenames.
1478 .. note::
1479 This function is to be replaced by :py:func:`time_to_str`.
1480 '''
1482 return time.strftime(format, time.gmtime(t))
1485class TimeStrError(Exception):
1486 pass
1489class FractionalSecondsMissing(TimeStrError):
1490 '''
1491 Exception raised by :py:func:`str_to_time` when the given string lacks
1492 fractional seconds.
1493 '''
1495 pass
1498class FractionalSecondsWrongNumberOfDigits(TimeStrError):
1499 '''
1500 Exception raised by :py:func:`str_to_time` when the given string has an
1501 incorrect number of digits in the fractional seconds part.
1502 '''
1504 pass
1507def _endswith_n(s, endings):
1508 for ix, x in enumerate(endings):
1509 if s.endswith(x):
1510 return ix
1511 return -1
1514def str_to_time(s, format='%Y-%m-%d %H:%M:%S.OPTFRAC'):
1515 '''
1516 Convert string representing UTC time to floating point system time.
1518 :param s: string representing UTC time
1519 :param format: time string format
1520 :returns: system time stamp as floating point value
1522 Uses the semantics of :py:func:`time.strptime` but allows for fractional
1523 seconds. If the format ends with ``'.FRAC'``, anything after a dot is
1524 interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``,
1525 the fractional part, including the dot is made optional. The latter has the
1526 consequence, that the time strings and the format may not contain any other
1527 dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is
1528 ensured, that exactly that number of digits are present in the fractional
1529 seconds.
1530 '''
1532 time_float = get_time_float()
1534 if util_ext is not None:
1535 try:
1536 t, tfrac = util_ext.stt(s, format)
1537 except util_ext.UtilExtError as e:
1538 raise TimeStrError(
1539 '%s, string=%s, format=%s' % (str(e), s, format))
1541 return time_float(t)+tfrac
1543 fracsec = 0.
1544 fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC'
1546 iend = _endswith_n(format, fixed_endings)
1547 if iend != -1:
1548 dotpos = s.rfind('.')
1549 if dotpos == -1:
1550 raise FractionalSecondsMissing(
1551 'string=%s, format=%s' % (s, format))
1553 if iend > 0 and iend != (len(s)-dotpos-1):
1554 raise FractionalSecondsWrongNumberOfDigits(
1555 'string=%s, format=%s' % (s, format))
1557 format = format[:-len(fixed_endings[iend])]
1558 fracsec = float(s[dotpos:])
1559 s = s[:dotpos]
1561 elif format.endswith('.OPTFRAC'):
1562 dotpos = s.rfind('.')
1563 format = format[:-8]
1564 if dotpos != -1 and len(s[dotpos:]) > 1:
1565 fracsec = float(s[dotpos:])
1567 if dotpos != -1:
1568 s = s[:dotpos]
1570 try:
1571 return time_float(calendar.timegm(time.strptime(s, format))) \
1572 + fracsec
1573 except ValueError as e:
1574 raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format))
1577stt = str_to_time
1580def str_to_time_fillup(s):
1581 '''
1582 Default :py:func:`str_to_time` with filling in of missing values.
1584 Allows e.g. `'2010-01-01 00:00:00'` as `'2010-01-01 00:00'`,
1585 `'2010-01-01 00'`, ..., or `'2010'`.
1586 '''
1588 if len(s) in (4, 7, 10, 13, 16):
1589 s += '0000-01-01 00:00:00'[len(s):]
1591 return str_to_time(s)
1594def time_to_str(t, format='%Y-%m-%d %H:%M:%S.3FRAC'):
1595 '''
1596 Get string representation for floating point system time.
1598 :param t: floating point system time
1599 :param format: time string format
1600 :returns: string representing UTC time
1602 Uses the semantics of :py:func:`time.strftime` but additionally allows for
1603 fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a
1604 digit between 1 and 9, this is replaced with the fractional part of ``t``
1605 with ``x`` digits precision.
1606 '''
1608 if pyrocko.grumpy > 0:
1609 check_time_class(t, 'warn' if pyrocko.grumpy == 1 else 'raise')
1611 if isinstance(format, int):
1612 if format > 0:
1613 format = '%Y-%m-%d %H:%M:%S.' + '%iFRAC' % format
1614 else:
1615 format = '%Y-%m-%d %H:%M:%S'
1617 if util_ext is not None:
1618 t0 = num.floor(t)
1619 try:
1620 return util_ext.tts(int(t0), float(t - t0), format)
1621 except util_ext.UtilExtError as e:
1622 raise TimeStrError(
1623 '%s, timestamp=%f, format=%s' % (str(e), t, format))
1625 if not GlobalVars.re_frac:
1626 GlobalVars.re_frac = re.compile(r'\.[1-9]FRAC')
1627 GlobalVars.frac_formats = dict(
1628 [('.%sFRAC' % x, '%.'+x+'f') for x in '123456789'])
1630 ts = float(num.floor(t))
1631 tfrac = t-ts
1633 m = GlobalVars.re_frac.search(format)
1634 if m:
1635 sfrac = (GlobalVars.frac_formats[m.group(0)] % tfrac)
1636 if sfrac[0] == '1':
1637 ts += 1.
1639 format, nsub = GlobalVars.re_frac.subn(sfrac[1:], format, 1)
1641 return time.strftime(format, time.gmtime(ts))
1644tts = time_to_str
1645_abbr_weekday = 'Mon Tue Wed Thu Fri Sat Sun'.split()
1646_abbr_month = 'Jan Feb Mar Apr May Jun Jul Aug Sep Oct Nov Dec'.split()
1649def mystrftime(fmt=None, tt=None, milliseconds=0):
1650 # Needed by Snuffler for the time axis. In other cases `time_to_str`
1651 # should be used.
1653 if fmt is None:
1654 fmt = '%Y-%m-%d %H:%M:%S .%r'
1656 # Get these two locale independent, needed by Snuffler.
1657 # Setting the locale seems to have no effect.
1658 fmt = fmt.replace('%a', _abbr_weekday[tt.tm_wday])
1659 fmt = fmt.replace('%b', _abbr_month[tt.tm_mon-1])
1661 fmt = fmt.replace('%r', '%03i' % int(round(milliseconds)))
1662 fmt = fmt.replace('%u', '%06i' % int(round(milliseconds*1000)))
1663 fmt = fmt.replace('%n', '%09i' % int(round(milliseconds*1000000)))
1664 return time.strftime(fmt, tt)
1667def gmtime_x(timestamp):
1668 etimestamp = float(num.floor(timestamp))
1669 tt = time.gmtime(etimestamp)
1670 ms = (timestamp-etimestamp)*1000
1671 return tt, ms
1674def plural_s(n):
1675 if not isinstance(n, int):
1676 n = len(n)
1678 return 's' if n != 1 else ''
1681def ensuredirs(dst):
1682 '''
1683 Create all intermediate path components for a target path.
1685 :param dst: target path
1687 The leaf part of the target path is not created (use :py:func:`ensuredir`
1688 if a the target path is a directory to be created).
1689 '''
1691 d, x = os.path.split(dst.rstrip(os.sep))
1692 dirs = []
1693 while d and not os.path.exists(d):
1694 dirs.append(d)
1695 d, x = os.path.split(d)
1697 dirs.reverse()
1699 for d in dirs:
1700 try:
1701 os.mkdir(d)
1702 except OSError as e:
1703 if not e.errno == errno.EEXIST:
1704 raise
1707def ensuredir(dst):
1708 '''
1709 Create directory and all intermediate path components to it as needed.
1711 :param dst: directory name
1713 Nothing is done if the given target already exists.
1714 '''
1716 if os.path.exists(dst):
1717 return
1719 dst.rstrip(os.sep)
1721 ensuredirs(dst)
1722 try:
1723 os.mkdir(dst)
1724 except OSError as e:
1725 if not e.errno == errno.EEXIST:
1726 raise
1729def reuse(x):
1730 '''
1731 Get unique instance of an object.
1733 :param x: hashable object
1734 :returns: reference to x or an equivalent object
1736 Cache object ``x`` in a global dict for reuse, or if x already
1737 is in that dict, return a reference to it.
1738 '''
1740 grs = GlobalVars.reuse_store
1741 if x not in grs:
1742 grs[x] = x
1743 return grs[x]
1746def deuse(x):
1747 grs = GlobalVars.reuse_store
1748 if x in grs:
1749 del grs[x]
1752class Anon(object):
1753 '''
1754 Dict-to-object utility.
1756 Any given arguments are stored as attributes.
1758 Example::
1760 a = Anon(x=1, y=2)
1761 print a.x, a.y
1762 '''
1764 def __init__(self, **dict):
1765 for k in dict:
1766 self.__dict__[k] = dict[k]
1769def iter_select_files(
1770 paths,
1771 include=None,
1772 exclude=None,
1773 selector=None,
1774 show_progress=True,
1775 pass_through=None):
1777 '''
1778 Recursively select files (generator variant).
1780 See :py:func:`select_files`.
1781 '''
1783 if show_progress:
1784 progress_beg('selecting files...')
1786 ngood = 0
1787 check_include = None
1788 if include is not None:
1789 rinclude = re.compile(include)
1791 def check_include(path):
1792 m = rinclude.search(path)
1793 if not m:
1794 return False
1796 if selector is None:
1797 return True
1799 infos = Anon(**m.groupdict())
1800 return selector(infos)
1802 check_exclude = None
1803 if exclude is not None:
1804 rexclude = re.compile(exclude)
1806 def check_exclude(path):
1807 return not bool(rexclude.search(path))
1809 if check_include and check_exclude:
1811 def check(path):
1812 return check_include(path) and check_exclude(path)
1814 elif check_include:
1815 check = check_include
1817 elif check_exclude:
1818 check = check_exclude
1820 else:
1821 check = None
1823 if isinstance(paths, str):
1824 paths = [paths]
1826 for path in paths:
1827 if pass_through and pass_through(path):
1828 if check is None or check(path):
1829 yield path
1831 elif os.path.isdir(path):
1832 for (dirpath, dirnames, filenames) in os.walk(path):
1833 dirnames.sort()
1834 filenames.sort()
1835 for filename in filenames:
1836 path = op.join(dirpath, filename)
1837 if check is None or check(path):
1838 yield os.path.abspath(path)
1839 ngood += 1
1840 else:
1841 if check is None or check(path):
1842 yield os.path.abspath(path)
1843 ngood += 1
1845 if show_progress:
1846 progress_end('%i file%s selected.' % (ngood, plural_s(ngood)))
1849def select_files(
1850 paths, include=None, exclude=None, selector=None, show_progress=True,
1851 regex=None):
1853 '''
1854 Recursively select files.
1856 :param paths: entry path names
1857 :param include: pattern for conditional inclusion
1858 :param exclude: pattern for conditional exclusion
1859 :param selector: callback for conditional inclusion
1860 :param show_progress: if True, indicate start and stop of processing
1861 :param regex: alias for ``include`` (backwards compatibility)
1862 :returns: list of path names
1864 Recursively finds all files under given entry points ``paths``. If
1865 parameter ``include`` is a regular expression, only files with matching
1866 path names are included. If additionally parameter ``selector`` is given a
1867 callback function, only files for which the callback returns ``True`` are
1868 included. The callback should take a single argument. The callback is
1869 called with a single argument, an object, having as attributes, any named
1870 groups given in ``include``.
1872 Examples
1874 To find all files ending in ``'.mseed'`` or ``'.msd'``::
1876 select_files(paths,
1877 include=r'\\.(mseed|msd)$')
1879 To find all files ending with ``'$Year.$DayOfYear'``, having set 2009 for
1880 the year::
1882 select_files(paths,
1883 include=r'(?P<year>\\d\\d\\d\\d)\\.(?P<doy>\\d\\d\\d)$',
1884 selector=(lambda x: int(x.year) == 2009))
1885 '''
1886 if regex is not None:
1887 assert include is None
1888 include = regex
1890 return list(iter_select_files(
1891 paths, include, exclude, selector, show_progress))
1894def base36encode(number, alphabet='0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'):
1895 '''
1896 Convert positive integer to a base36 string.
1897 '''
1899 if not isinstance(number, (int, long)):
1900 raise TypeError('number must be an integer')
1901 if number < 0:
1902 raise ValueError('number must be positive')
1904 # Special case for small numbers
1905 if number < 36:
1906 return alphabet[number]
1908 base36 = ''
1909 while number != 0:
1910 number, i = divmod(number, 36)
1911 base36 = alphabet[i] + base36
1913 return base36
1916def base36decode(number):
1917 '''
1918 Decode base36 endcoded positive integer.
1919 '''
1921 return int(number, 36)
1924class UnpackError(Exception):
1925 '''
1926 Exception raised when :py:func:`unpack_fixed` encounters an error.
1927 '''
1929 pass
1932ruler = ''.join(['%-10i' % i for i in range(8)]) \
1933 + '\n' + '0123456789' * 8 + '\n'
1936def unpack_fixed(format, line, *callargs):
1937 '''
1938 Unpack fixed format string, as produced by many fortran codes.
1940 :param format: format specification
1941 :param line: string to be processed
1942 :param callargs: callbacks for callback fields in the format
1944 The format is described by a string of comma-separated fields. Each field
1945 is defined by a character for the field type followed by the field width. A
1946 questionmark may be appended to the field description to allow the argument
1947 to be optional (The data string is then allowed to be filled with blanks
1948 and ``None`` is returned in this case).
1950 The following field types are available:
1952 ==== ================================================================
1953 Type Description
1954 ==== ================================================================
1955 A string (full field width is extracted)
1956 a string (whitespace at the beginning and the end is removed)
1957 i integer value
1958 f floating point value
1959 @ special type, a callback must be given for the conversion
1960 x special field type to skip parts of the string
1961 ==== ================================================================
1962 '''
1964 ipos = 0
1965 values = []
1966 icall = 0
1967 for form in format.split(','):
1968 form = form.strip()
1969 optional = form[-1] == '?'
1970 form = form.rstrip('?')
1971 typ = form[0]
1972 ln = int(form[1:])
1973 s = line[ipos:ipos+ln]
1974 cast = {
1975 'x': None,
1976 'A': str,
1977 'a': lambda x: x.strip(),
1978 'i': int,
1979 'f': float,
1980 '@': 'extra'}[typ]
1982 if cast == 'extra':
1983 cast = callargs[icall]
1984 icall += 1
1986 if cast is not None:
1987 if optional and s.strip() == '':
1988 values.append(None)
1989 else:
1990 try:
1991 values.append(cast(s))
1992 except Exception:
1993 mark = [' '] * 80
1994 mark[ipos:ipos+ln] = ['^'] * ln
1995 mark = ''.join(mark)
1996 raise UnpackError(
1997 'Invalid cast to type "%s" at position [%i:%i] of '
1998 'line: \n%s%s\n%s' % (
1999 typ, ipos, ipos+ln, ruler, line.rstrip(), mark))
2001 ipos += ln
2003 return values
2006_pattern_cache = {}
2009def _nslc_pattern(pattern):
2010 if pattern not in _pattern_cache:
2011 rpattern = re.compile(fnmatch.translate(pattern), re.I)
2012 _pattern_cache[pattern] = rpattern
2013 else:
2014 rpattern = _pattern_cache[pattern]
2016 return rpattern
2019def match_nslc(patterns, nslc):
2020 '''
2021 Match network-station-location-channel code against pattern or list of
2022 patterns.
2024 :param patterns: pattern or list of patterns
2025 :param nslc: tuple with (network, station, location, channel) as strings
2027 :returns: ``True`` if the pattern matches or if any of the given patterns
2028 match; or ``False``.
2030 The patterns may contain shell-style wildcards: \\*, ?, [seq], [!seq].
2032 Example::
2034 match_nslc('*.HAM3.*.BH?', ('GR', 'HAM3', '', 'BHZ')) # -> True
2035 '''
2037 if isinstance(patterns, str):
2038 patterns = [patterns]
2040 if not isinstance(nslc, str):
2041 s = '.'.join(nslc)
2042 else:
2043 s = nslc
2045 for pattern in patterns:
2046 if _nslc_pattern(pattern).match(s):
2047 return True
2049 return False
2052def match_nslcs(patterns, nslcs):
2053 '''
2054 Get network-station-location-channel codes that match given pattern or any
2055 of several given patterns.
2057 :param patterns: pattern or list of patterns
2058 :param nslcs: list of (network, station, location, channel) tuples
2060 See also :py:func:`match_nslc`
2061 '''
2063 matching = []
2064 for nslc in nslcs:
2065 if match_nslc(patterns, nslc):
2066 matching.append(nslc)
2068 return matching
2071class Timeout(Exception):
2072 pass
2075def create_lockfile(fn, timeout=None, timewarn=10.):
2076 t0 = time.time()
2078 while True:
2079 try:
2080 f = os.open(fn, os.O_CREAT | os.O_WRONLY | os.O_EXCL)
2081 os.close(f)
2082 return
2084 except OSError as e:
2085 if e.errno == errno.EEXIST:
2086 tnow = time.time()
2088 if timeout is not None and tnow - t0 > timeout:
2089 raise Timeout(
2090 'Timeout (%gs) occured while waiting to get exclusive '
2091 'access to: %s' % (timeout, fn))
2093 if timewarn is not None and tnow - t0 > timewarn:
2094 logger.warning(
2095 'Waiting since %gs to get exclusive access to: %s' % (
2096 timewarn, fn))
2098 timewarn *= 2
2100 time.sleep(0.01)
2101 else:
2102 raise
2105def delete_lockfile(fn):
2106 os.unlink(fn)
2109class Lockfile(Exception):
2111 def __init__(self, path, timeout=5, timewarn=10.):
2112 self._path = path
2113 self._timeout = timeout
2114 self._timewarn = timewarn
2116 def __enter__(self):
2117 create_lockfile(
2118 self._path, timeout=self._timeout, timewarn=self._timewarn)
2119 return None
2121 def __exit__(self, type, value, traceback):
2122 delete_lockfile(self._path)
2125class SoleError(Exception):
2126 '''
2127 Exception raised by objects of type :py:class:`Sole`, when an concurrent
2128 instance is running.
2129 '''
2131 pass
2134class Sole(object):
2135 '''
2136 Use POSIX advisory file locking to ensure that only a single instance of a
2137 program is running.
2139 :param pid_path: path to lockfile to be used
2141 Usage::
2143 from pyrocko.util import Sole, SoleError, setup_logging
2144 import os
2146 setup_logging('my_program')
2148 pid_path = os.path.join(os.environ['HOME'], '.my_program_lock')
2149 try:
2150 sole = Sole(pid_path)
2152 except SoleError, e:
2153 logger.fatal( str(e) )
2154 sys.exit(1)
2155 '''
2157 def __init__(self, pid_path):
2158 self._pid_path = pid_path
2159 self._other_running = False
2160 ensuredirs(self._pid_path)
2161 self._lockfile = None
2162 self._os = os
2164 if not fcntl:
2165 raise SoleError(
2166 'Python standard library module "fcntl" not available on '
2167 'this platform.')
2169 self._fcntl = fcntl
2171 try:
2172 self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY)
2173 except Exception:
2174 raise SoleError(
2175 'Cannot open lockfile (path = %s)' % self._pid_path)
2177 try:
2178 fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
2180 except IOError:
2181 self._other_running = True
2182 try:
2183 f = open(self._pid_path, 'r')
2184 pid = f.read().strip()
2185 f.close()
2186 except Exception:
2187 pid = '?'
2189 raise SoleError('Other instance is running (pid = %s)' % pid)
2191 try:
2192 os.ftruncate(self._lockfile, 0)
2193 os.write(self._lockfile, '%i\n' % os.getpid())
2194 os.fsync(self._lockfile)
2196 except Exception:
2197 # the pid is only stored for user information, so this is allowed
2198 # to fail
2199 pass
2201 def __del__(self):
2202 if not self._other_running:
2203 if self._lockfile is not None:
2204 self._fcntl.lockf(self._lockfile, self._fcntl.LOCK_UN)
2205 self._os.close(self._lockfile)
2206 try:
2207 self._os.unlink(self._pid_path)
2208 except Exception:
2209 pass
2212re_escapequotes = re.compile(r"(['\\])")
2215def escapequotes(s):
2216 return re_escapequotes.sub(r"\\\1", s)
2219class TableWriter(object):
2220 '''
2221 Write table of space separated values to a file.
2223 :param f: file like object
2225 Strings containing spaces are quoted on output.
2226 '''
2228 def __init__(self, f):
2229 self._f = f
2231 def writerow(self, row, minfieldwidths=None):
2233 '''
2234 Write one row of values to underlying file.
2236 :param row: iterable of values
2237 :param minfieldwidths: minimum field widths for the values
2239 Each value in in ``row`` is converted to a string and optionally padded
2240 with blanks. The resulting strings are output separated with blanks. If
2241 any values given are strings and if they contain whitespace, they are
2242 quoted with single quotes, and any internal single quotes are
2243 backslash-escaped.
2244 '''
2246 out = []
2248 for i, x in enumerate(row):
2249 w = 0
2250 if minfieldwidths and i < len(minfieldwidths):
2251 w = minfieldwidths[i]
2253 if isinstance(x, str):
2254 if re.search(r"\s|'", x):
2255 x = "'%s'" % escapequotes(x)
2257 x = x.ljust(w)
2258 else:
2259 x = str(x).rjust(w)
2261 out.append(x)
2263 self._f.write(' '.join(out).rstrip() + '\n')
2266class TableReader(object):
2268 '''
2269 Read table of space separated values from a file.
2271 :param f: file-like object
2273 This uses Pythons shlex module to tokenize lines. Should deal correctly
2274 with quoted strings.
2275 '''
2277 def __init__(self, f):
2278 self._f = f
2279 self.eof = False
2281 def readrow(self):
2282 '''
2283 Read one row from the underlying file, tokenize it with shlex.
2285 :returns: tokenized line as a list of strings.
2286 '''
2288 line = self._f.readline()
2289 if not line:
2290 self.eof = True
2291 return []
2292 line.strip()
2293 if line.startswith('#'):
2294 return []
2296 return qsplit(line)
2299def gform(number, significant_digits=3):
2300 '''
2301 Pretty print floating point numbers.
2303 Align floating point numbers at the decimal dot.
2305 ::
2307 | -d.dde+xxx|
2308 | -d.dde+xx |
2309 |-ddd. |
2310 | -dd.d |
2311 | -d.dd |
2312 | -0.ddd |
2313 | -0.0ddd |
2314 | -0.00ddd |
2315 | -d.dde-xx |
2316 | -d.dde-xxx|
2317 | nan|
2320 The formatted string has length ``significant_digits * 2 + 6``.
2321 '''
2323 no_exp_range = (pow(10., -1),
2324 pow(10., significant_digits))
2325 width = significant_digits+significant_digits-1+1+1+5
2327 if (no_exp_range[0] <= abs(number) < no_exp_range[1]) or number == 0.:
2328 s = ('%#.*g' % (significant_digits, number)).rstrip('0')
2329 else:
2330 s = '%.*E' % (significant_digits-1, number)
2331 s = (' '*(-s.find('.')+(significant_digits+1))+s).ljust(width)
2332 if s.strip().lower() == 'nan':
2333 s = 'nan'.rjust(width)
2334 return s
2337def human_bytesize(value):
2339 exts = 'Bytes kB MB GB TB PB EB ZB YB'.split()
2341 if value == 1:
2342 return '1 Byte'
2344 for i, ext in enumerate(exts):
2345 x = float(value) / 1000**i
2346 if round(x) < 10. and not value < 1000:
2347 return '%.1f %s' % (x, ext)
2348 if round(x) < 1000.:
2349 return '%.0f %s' % (x, ext)
2351 return '%i Bytes' % value
2354re_compatibility = re.compile(
2355 r'!pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.' +
2356 r'(dummy|poel|qseis|qssp))\.'
2357)
2360def pf_is_old(fn):
2361 oldstyle = False
2362 with open(fn, 'r') as f:
2363 for line in f:
2364 if re_compatibility.search(line):
2365 oldstyle = True
2367 return oldstyle
2370def pf_upgrade(fn):
2371 need = pf_is_old(fn)
2372 if need:
2373 fn_temp = fn + '.temp'
2375 with open(fn, 'r') as fin:
2376 with open(fn_temp, 'w') as fout:
2377 for line in fin:
2378 line = re_compatibility.sub('!pf.', line)
2379 fout.write(line)
2381 os.rename(fn_temp, fn)
2383 return need
2386def read_leap_seconds(tzfile='/usr/share/zoneinfo/right/UTC'):
2387 '''
2388 Extract leap second information from tzdata.
2390 Based on example at http://stackoverflow.com/questions/19332902/\
2391 extract-historic-leap-seconds-from-tzdata
2393 See also 'man 5 tzfile'.
2394 '''
2396 from struct import unpack, calcsize
2397 out = []
2398 with open(tzfile, 'rb') as f:
2399 # read header
2400 fmt = '>4s c 15x 6l'
2401 (magic, format, ttisgmtcnt, ttisstdcnt, leapcnt, timecnt,
2402 typecnt, charcnt) = unpack(fmt, f.read(calcsize(fmt)))
2403 assert magic == 'TZif'.encode('US-ASCII'), 'Not a timezone file'
2405 # skip over some uninteresting data
2406 fmt = '>%(timecnt)dl %(timecnt)dB %(ttinfo)s %(charcnt)ds' % dict(
2407 timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
2408 f.read(calcsize(fmt))
2410 # read leap-seconds
2411 fmt = '>2l'
2412 for i in range(leapcnt):
2413 tleap, nleap = unpack(fmt, f.read(calcsize(fmt)))
2414 out.append((tleap-nleap+1, nleap))
2416 return out
2419class LeapSecondsError(Exception):
2420 pass
2423class LeapSecondsOutdated(LeapSecondsError):
2424 pass
2427class InvalidLeapSecondsFile(LeapSecondsOutdated):
2428 pass
2431def parse_leap_seconds_list(fn):
2432 data = []
2433 texpires = None
2434 try:
2435 t0 = int(round(str_to_time('1900-01-01 00:00:00')))
2436 except TimeStrError:
2437 t0 = int(round(str_to_time('1970-01-01 00:00:00'))) - 2208988800
2439 tnow = int(round(time.time()))
2441 if not op.exists(fn):
2442 raise LeapSecondsOutdated('no leap seconds file found')
2444 try:
2445 with open(fn, 'rb') as f:
2446 for line in f:
2447 if line.strip().startswith(b'<!DOCTYPE'):
2448 raise InvalidLeapSecondsFile('invalid leap seconds file')
2450 if line.startswith(b'#@'):
2451 texpires = int(line.split()[1]) + t0
2452 elif line.startswith(b'#') or len(line) < 5:
2453 pass
2454 else:
2455 toks = line.split()
2456 t = int(toks[0]) + t0
2457 nleap = int(toks[1]) - 10
2458 data.append((t, nleap))
2460 except IOError:
2461 raise LeapSecondsError('cannot read leap seconds file %s' % fn)
2463 if texpires is None or tnow > texpires:
2464 raise LeapSecondsOutdated('leap seconds list is outdated')
2466 return data
2469def read_leap_seconds2():
2470 from pyrocko import config
2471 conf = config.config()
2472 fn = conf.leapseconds_path
2473 url = conf.leapseconds_url
2474 # check for outdated default URL
2475 if url == 'http://www.ietf.org/timezones/data/leap-seconds.list':
2476 url = 'https://www.ietf.org/timezones/data/leap-seconds.list'
2477 logger.info(
2478 'Leap seconds default URL is now: %s\nUsing new default.' % url)
2480 for i in range(3):
2481 try:
2482 return parse_leap_seconds_list(fn)
2484 except LeapSecondsOutdated:
2485 try:
2486 logger.info('updating leap seconds list...')
2487 download_file(url, fn)
2489 except Exception as e:
2490 raise LeapSecondsError(
2491 'cannot download leap seconds list from %s to %s (%s)'
2492 % (url, fn, e))
2494 raise LeapSecondsError('Could not retrieve/read leap seconds file.')
2497def gps_utc_offset(t_utc):
2498 '''
2499 Time offset t_gps - t_utc for a given t_utc.
2500 '''
2501 ls = read_leap_seconds2()
2502 i = 0
2503 if t_utc < ls[0][0]:
2504 return ls[0][1] - 1 - 9
2506 while i < len(ls) - 1:
2507 if ls[i][0] <= t_utc and t_utc < ls[i+1][0]:
2508 return ls[i][1] - 9
2509 i += 1
2511 return ls[-1][1] - 9
2514def utc_gps_offset(t_gps):
2515 '''
2516 Time offset t_utc - t_gps for a given t_gps.
2517 '''
2518 ls = read_leap_seconds2()
2520 if t_gps < ls[0][0] + ls[0][1] - 9:
2521 return - (ls[0][1] - 1 - 9)
2523 i = 0
2524 while i < len(ls) - 1:
2525 if ls[i][0] + ls[i][1] - 9 <= t_gps \
2526 and t_gps < ls[i+1][0] + ls[i+1][1] - 9:
2527 return - (ls[i][1] - 9)
2528 i += 1
2530 return - (ls[-1][1] - 9)
2533def make_iload_family(iload_fh, doc_fmt='FMT', doc_yielded_objects='FMT'):
2534 import itertools
2535 import glob
2536 from pyrocko.io.io_common import FileLoadError
2538 def iload_filename(filename, **kwargs):
2539 try:
2540 with open(filename, 'rb') as f:
2541 for cr in iload_fh(f, **kwargs):
2542 yield cr
2544 except FileLoadError as e:
2545 e.set_context('filename', filename)
2546 raise
2548 def iload_dirname(dirname, **kwargs):
2549 for entry in os.listdir(dirname):
2550 fpath = op.join(dirname, entry)
2551 if op.isfile(fpath):
2552 for cr in iload_filename(fpath, **kwargs):
2553 yield cr
2555 def iload_glob(pattern, **kwargs):
2557 for fn in glob.iglob(pattern):
2558 for cr in iload_filename(fn, **kwargs):
2559 yield cr
2561 def iload(source, **kwargs):
2562 if isinstance(source, str):
2563 if op.isdir(source):
2564 return iload_dirname(source, **kwargs)
2565 elif op.isfile(source):
2566 return iload_filename(source, **kwargs)
2567 else:
2568 return iload_glob(source, **kwargs)
2570 elif hasattr(source, 'read'):
2571 return iload_fh(source, **kwargs)
2572 else:
2573 return itertools.chain.from_iterable(
2574 iload(subsource, **kwargs) for subsource in source)
2576 iload_filename.__doc__ = '''
2577 Read %s information from named file.
2578 ''' % doc_fmt
2580 iload_dirname.__doc__ = '''
2581 Read %s information from directory of %s files.
2582 ''' % (doc_fmt, doc_fmt)
2584 iload_glob.__doc__ = '''
2585 Read %s information from files matching a glob pattern.
2586 ''' % doc_fmt
2588 iload.__doc__ = '''
2589 Load %s information from given source(s)
2591 The ``source`` can be specified as the name of a %s file, the name of a
2592 directory containing %s files, a glob pattern of %s files, an open
2593 filehandle or an iterator yielding any of the forementioned sources.
2595 This function behaves as a generator yielding %s objects.
2596 ''' % (doc_fmt, doc_fmt, doc_fmt, doc_fmt, doc_yielded_objects)
2598 for f in iload_filename, iload_dirname, iload_glob, iload:
2599 f.__module__ = iload_fh.__module__
2601 return iload_filename, iload_dirname, iload_glob, iload
2604class Inconsistency(Exception):
2605 pass
2608def consistency_check(list_of_tuples, message='values differ:'):
2609 '''
2610 Check for inconsistencies.
2612 Given a list of tuples, check that all tuple elements except for first one
2613 match. E.g. ``[('STA.N', 55.3, 103.2), ('STA.E', 55.3, 103.2)]`` would be
2614 valid because the coordinates at the two channels are the same.
2615 '''
2617 if len(list_of_tuples) >= 2:
2618 if any(t[1:] != list_of_tuples[0][1:] for t in list_of_tuples[1:]):
2619 raise Inconsistency('%s\n' % message + '\n'.join(
2620 ' %s: %s' % (t[0], ', '.join(str(x) for x in t[1:]))
2621 for t in list_of_tuples))
2624class defaultzerodict(dict):
2625 def __missing__(self, k):
2626 return 0
2629def mostfrequent(x):
2630 c = defaultzerodict()
2631 for e in x:
2632 c[e] += 1
2634 return sorted(list(c.keys()), key=lambda k: c[k])[-1]
2637def consistency_merge(list_of_tuples,
2638 message='values differ:',
2639 error='raise',
2640 merge=mostfrequent):
2642 assert error in ('raise', 'warn', 'ignore')
2644 if len(list_of_tuples) == 0:
2645 raise Exception('cannot merge empty sequence')
2647 try:
2648 consistency_check(list_of_tuples, message)
2649 return list_of_tuples[0][1:]
2650 except Inconsistency as e:
2651 if error == 'raise':
2652 raise
2654 elif error == 'warn':
2655 logger.warning(str(e))
2657 return tuple([merge(x) for x in list(zip(*list_of_tuples))[1:]])
2660def short_to_list(nmax, it):
2661 import itertools
2663 if isinstance(it, list):
2664 return it
2666 li = []
2667 for i in range(nmax+1):
2668 try:
2669 li.append(next(it))
2670 except StopIteration:
2671 return li
2673 return itertools.chain(li, it)
2676def parse_md(f):
2677 try:
2678 with open(op.join(
2679 op.dirname(op.abspath(f)),
2680 'README.md'), 'r') as readme:
2681 mdstr = readme.read()
2682 except IOError as e:
2683 return 'Failed to get README.md: %s' % e
2685 # Remve the title
2686 mdstr = re.sub(r'^# .*\n?', '', mdstr)
2687 # Append sphinx reference to `pyrocko.` modules
2688 mdstr = re.sub(r'`pyrocko\.(.*)`', r':py:mod:`pyrocko.\1`', mdstr)
2689 # Convert Subsections to toc-less rubrics
2690 mdstr = re.sub(r'## (.*)\n', r'.. rubric:: \1\n', mdstr)
2691 return mdstr
2694def mpl_show(plt):
2695 import matplotlib
2696 if matplotlib.get_backend().lower() == 'agg':
2697 logger.warning('Cannot show() when using matplotlib "agg" backend')
2698 else:
2699 plt.show()
2702g_re_qsplit = re.compile(
2703 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|(\S+)')
2704g_re_qsplit_sep = {}
2707def get_re_qsplit(sep):
2708 if sep is None:
2709 return g_re_qsplit
2710 else:
2711 if sep not in g_re_qsplit_sep:
2712 assert len(sep) == 1
2713 assert sep not in '\'"'
2714 esep = re.escape(sep)
2715 g_re_qsplit_sep[sep] = re.compile(
2716 r'"([^"\\]*(?:\\.[^"\\]*)*)"|\'([^\'\\]*(?:\\.[^\'\\]*)*)\'|'
2717 + r'([^' + esep + r']+|(?<=' + esep + r')(?=' + esep + r')|^(?=' + esep + r')|(?<=' + esep + r')$)') # noqa
2718 return g_re_qsplit_sep[sep]
2721g_re_trivial = re.compile(r'\A[^\'"\s]+\Z')
2722g_re_trivial_sep = {}
2725def get_re_trivial(sep):
2726 if sep is None:
2727 return g_re_trivial
2728 else:
2729 if sep not in g_re_qsplit_sep:
2730 assert len(sep) == 1
2731 assert sep not in '\'"'
2732 esep = re.escape(sep)
2733 g_re_trivial_sep[sep] = re.compile(r'\A[^\'"' + esep + r']+\Z')
2735 return g_re_trivial_sep[sep]
2738g_re_escape_s = re.compile(r'([\\\'])')
2739g_re_unescape_s = re.compile(r'\\([\\\'])')
2740g_re_escape_d = re.compile(r'([\\"])')
2741g_re_unescape_d = re.compile(r'\\([\\"])')
2744def escape_s(s):
2745 '''
2746 Backslash-escape single-quotes and backslashes.
2748 Example: ``Jack's`` => ``Jack\\'s``
2750 '''
2751 return g_re_escape_s.sub(r'\\\1', s)
2754def unescape_s(s):
2755 '''
2756 Unescape backslash-escaped single-quotes and backslashes.
2758 Example: ``Jack\\'s`` => ``Jack's``
2759 '''
2760 return g_re_unescape_s.sub(r'\1', s)
2763def escape_d(s):
2764 '''
2765 Backslash-escape double-quotes and backslashes.
2767 Example: ``"Hello \\O/"`` => ``\\"Hello \\\\O/\\"``
2768 '''
2769 return g_re_escape_d.sub(r'\\\1', s)
2772def unescape_d(s):
2773 '''
2774 Unescape backslash-escaped double-quotes and backslashes.
2776 Example: ``\\"Hello \\\\O/\\"`` => ``"Hello \\O/"``
2777 '''
2778 return g_re_unescape_d.sub(r'\1', s)
2781def qjoin_s(it, sep=None):
2782 '''
2783 Join sequence of strings into a line, single-quoting non-trivial strings.
2785 Example: ``["55", "Sparrow's Island"]`` => ``"55 'Sparrow\\\\'s Island'"``
2786 '''
2787 re_trivial = get_re_trivial(sep)
2789 if sep is None:
2790 sep = ' '
2792 return sep.join(
2793 w if re_trivial.search(w) else "'%s'" % escape_s(w) for w in it)
2796def qjoin_d(it, sep=None):
2797 '''
2798 Join sequence of strings into a line, double-quoting non-trivial strings.
2800 Example: ``['55', 'Pete "The Robot" Smith']`` =>
2801 ``'55' "Pete \\\\"The Robot\\\\" Smith"'``
2802 '''
2803 re_trivial = get_re_trivial(sep)
2804 if sep is None:
2805 sep = ' '
2807 return sep.join(
2808 w if re_trivial.search(w) else '"%s"' % escape_d(w) for w in it)
2811def qsplit(s, sep=None):
2812 '''
2813 Split line into list of strings, allowing for quoted strings.
2815 Example: ``"55 'Sparrow\\\\'s Island'"`` =>
2816 ``["55", "Sparrow's Island"]``,
2817 ``'55' "Pete \\\\"The Robot\\\\" Smith"'`` =>
2818 ``['55', 'Pete "The Robot" Smith']``
2819 '''
2820 re_qsplit = get_re_qsplit(sep)
2821 return [
2822 (unescape_d(x[0]) or unescape_s(x[1]) or x[2])
2823 for x in re_qsplit.findall(s)]
2826g_have_warned_threadpoolctl = False
2829class threadpool_limits_dummy(object):
2831 def __init__(self, *args, **kwargs):
2832 pass
2834 def __enter__(self):
2835 global g_have_warned_threadpoolctl
2837 if not g_have_warned_threadpoolctl:
2838 logger.warning(
2839 'Cannot control number of BLAS threads because '
2840 '`threadpoolctl` module is not available. You may want to '
2841 'install `threadpoolctl`.')
2843 g_have_warned_threadpoolctl = True
2845 return self
2847 def __exit__(self, type, value, traceback):
2848 pass
2851def get_threadpool_limits():
2852 '''
2853 Try to import threadpoolctl.threadpool_limits, provide dummy if not avail.
2854 '''
2856 try:
2857 from threadpoolctl import threadpool_limits
2858 return threadpool_limits
2860 except ImportError:
2861 return threadpool_limits_dummy