# http://pyrocko.org - GPLv3 # # The Pyrocko Developers, 21st Century # ---|P------/S----------~Lg----------
Request, build_opener, HTTPDigestAuthHandler, urlopen) # noqa
except ImportError: from urllib import urlencode, quote, unquote # noqa from urllib2 import (Request, build_opener, HTTPDigestAuthHandler, # noqa HTTPError, URLError, urlopen) # noqa
self.__dict__ = urlerror.__dict__.copy()
def __str__(self): return ( 'Requesting web resource failed and the problem could be ' 'related to SSL. Python standard libraries on some older ' 'systems (like Ubuntu 14.04) are known to have trouble ' 'with some SSL setups of today\'s servers: %s' % URLError.__str__(self))
except URLError as e: if str(e).find('SSL') != -1: raise URLErrorSSL(e) else: raise
else: util_ext = None
except ImportError: from pyrocko import dummy_progressbar as progressbar_mod
except AttributeError: def num_full(shape, fill_value, dtype=None, order='C'): a = num.empty(shape, dtype=dtype, order=order) a.fill(fill_value) return a
except AttributeError: def num_full_like(arr, fill_value, dtype=None, order='K', subok=True): a = num.empty_like(arr, dtype=dtype, order=order, subok=subok) a.fill(fill_value) return a
''' Initialize logging.
:param programname: program name to be written in log :param levelname: string indicating the logging level ('debug', 'info', 'warning', 'error', 'critical')
This function is called at startup by most pyrocko programs to set up a consistent logging format. This is simply a shortcut to a call to :py:func:`logging.basicConfig()`. '''
global g_setup_logging_args
'info': logging.INFO, 'warning': logging.WARNING, 'error': logging.ERROR, 'critical': logging.CRITICAL}
level=levels[levelname], format=programname+':%(name)-20s - %(levelname)-8s - %(message)s')
''' Get arguments from previous call to setup_logging.
These can be sent down to a worker process so it can setup its logging in the same way as the main process. '''
force=False, method='download', stats=None, status_callback=None, entries_wanted=None, recursive=False, header=None):
else HTTPBasicAuth(username, password)
'ntotal_files': 0, 'nread_files': 0, 'ntotal_bytes_all_files': 0, 'nread_bytes_all_files': 0, 'ntotal_bytes_current_file': 0, 'nread_bytes_current_file': 0, 'finished': False }
raise DownloadError( 'URL: %s appears to be a directory' ' but recurvise download is False' % url)
r.raise_for_status()
if not fn.endswith('/')))
if (fn in wanted for wanted in entries_wanted)]
if dn.endswith('/') and dn not in ('./', '../')))
url, subdir=dn))
'Content-Length for %s' % url)
else:
r.raise_for_status()
'HTTP header Content-Length: %i bytes does not match ' 'download size %i bytes' % (fsize, frx))
raise PathExists('path %s already exists' % fpath)
else:
else:
return fsize else:
except req_HTTPError as e: logging.warn("http error: %s" % e) raise DownloadError('could not download file(s) from: %s' % url)
finally:
url, fpath, username=None, password=None, status_callback=None, **kwargs): url, fpath, username, password, recursive=False, status_callback=status_callback, **kwargs)
url, fpath, username=None, password=None, status_callback=None, **kwargs):
url, fpath, username, password, recursive=True, status_callback=status_callback, **kwargs)
elif hasattr(num, 'float96'): hpfloat = num.float96 else: def hpfloat(x): raise Exception( 'NumPy lacks support for float128 or float96 data type on this ' 'platform.')
''' Simple stopwatch to measure elapsed wall clock time.
Usage::
s = Stopwatch() time.sleep(1) print s() time.sleep(1) print s() '''
self.start = time.time()
return time.time() - self.start
''' Paragraph and list-aware wrapping of text. '''
continue
else: findent = listindents[ip] _indent = ' ' * len(findent)
else:
listindents[ip] is None or newlist[ip] is not None or listindents[ip+1] is None):
outlines.append('')
''' From IndentedHelpFormatter but using a different wrap method. '''
'', '%-*s %s' % (self.help_position, opts, help_text), ''] else: for line in help_lines])
return ''
else: lines = ['']
['%*s%s' % (self.current_indent, '', line) for line in lines])
label, ' ', progressbar_mod.Bar(marker='-', left='[', right=']'), ' ', progressbar_mod.Percentage(), ' ', progressbar_mod.ETA()]
''' Notify user that an operation has started.
:param label: name of the operation
To be used in conjuction with :py:func:`progress_end`. '''
sys.stderr.write(label) sys.stderr.flush()
''' Notify user that an operation has ended.
:param label: name of the operation
To be used in conjuction with :py:func:`progress_beg`. '''
sys.stderr.write(' done. %s\n' % label) sys.stderr.flush()
def arange2(start, stop, step, dtype=num.float, epsilon=1e-6, error='raise'): ''' Return evenly spaced numbers over a specified interval.
Like :py:func:`numpy.arange` but returning floating point numbers by default and with defined behaviour when stepsize is inconsistent with interval bounds. It is considered inconsistent if the difference between the closest multiple of ``step`` and ``stop`` is larger than ``epsilon * step``. Inconsistencies are handled according to the ``error`` parameter. If it is set to ``'raise'`` an exception of type :py:exc:`ArangeError` is raised. If it is set to ``'round'``, ``'floor'``, or ``'ceil'``, ``stop`` is silently changed to the closest, the next smaller, or next larger multiple of ``step``, respectively. '''
assert error in ('raise', 'round', 'floor', 'ceil')
start = dtype(start) stop = dtype(stop) step = dtype(step)
rnd = {'floor': math.floor, 'ceil': math.ceil}.get(error, round)
n = int(rnd((stop - start) / step)) + 1 stop_check = start + (n-1) * step
if error == 'raise' and abs(stop_check - stop) > step * epsilon: raise ArangeError( 'inconsistent range specification: start=%g, stop=%g, step=%g' % (start, stop, step))
x = num.arange(n, dtype=dtype) x *= step x += start return x
''' Fit piece-wise linear function to data.
:param x,y: arrays with coordinates of data :param n_or_xnodes: int, number of segments or x coordinates of polyline
:returns: `(xnodes, ynodes, rms_error)` arrays with coordinates of polyline, root-mean-square error '''
n = n_or_xnodes xmin = x.min() xmax = x.max() xnodes = num.linspace(xmin, xmax, n+1) else:
num.logical_and(xmin_block <= x, x <= xmax_block))[0] else: num.logical_and(xmin_block <= x, x < xmax_block))[0]
''' Calculate definite integral of piece-wise linear function on intervals.
Use trapezoidal rule to calculate definite integral of a piece-wise linear function for a series of consecutive intervals. ``x_edges`` and ``x`` must be sorted.
:param x_edges: array with edges of the intervals :param x,y: arrays with coordinates of piece-wise linear function's control points '''
''' Approximate first derivative of an array (forth order, central FD).
:param dt: sampling interval :param data: NumPy array with data samples
:returns: NumPy array with same shape as input
Interior points are approximated to fourth order, edge points to first order right- or left-sided respectively, points next to edge to second order central. '''
[-1., +8., 0., -8., 1.], [1.], data)[4:] / (12.*dt)
ddata[0] = 0.0
''' Approximate first derivative of an array (second order, central FD).
:param dt: sampling interval :param data: NumPy array with data samples
:returns: NumPy array with same shape as input
Interior points are approximated to second order, edge points to first order right- or left-sided respectively.
Uses :py:func:`numpy.gradient`. '''
''' Approximate second derivative of an array (forth order, central FD).
:param dt: sampling interval :param data: NumPy array with data samples
:returns: NumPy array with same shape as input
Interior points are approximated to fourth order, next-to-edge points to second order, edge points repeated. '''
[-1., +16., -30., +16., -1.], [1.], data)[4:] / (12.*dt**2)
ddata[:] = 0.0
''' Approximate second derivative of an array (second order, central FD).
:param dt: sampling interval :param data: NumPy array with data samples
:returns: NumPy array with same shape as input
Interior points are approximated to second order, edge points repeated. '''
[1., -2., 1.], [1.], data)[2:] / (dt**2)
ddata[:] = 0.0
''' Approximate 1st or 2nd derivative of an array.
:param n: 1 for first derivative, 2 for second :param order: order of the approximation 2 and 4 are supported :param dt: sampling interval :param data: NumPy array with data samples
:returns: NumPy array with same shape as input
This is a frontend to the functions :py:func:`diff_fd_1d_2o`, :py:func:`diff_fd_1d_4o`, :py:func:`diff_fd_2d_2o`, and :py:func:`diff_fd_2d_4o`.
Raises :py:exc:`ValueError` for unsupported `n` or `order`. '''
1: {2: diff_fd_1d_2o, 4: diff_fd_1d_4o}, 2: {2: diff_fd_2d_2o, 4: diff_fd_2d_4o}}
except KeyError: raise ValueError( 'pyrocko.util.diff_fd: ' 'Only 1st and 2sd derivatives are supported.')
except KeyError: raise ValueError( 'pyrocko.util.diff_fd: ' 'Order %i is not supported for %s derivative. Supported: %s' % ( order, ['', '1st', '2nd'][n], ', '.join('%i' % order for order in sorted(funcs_n.keys()))))
else: n = 8
else: coeffs = GlobalVars.decimate_iir_coeffs if (n, 0.05, 0.8/q) not in coeffs: coeffs[n, 0.05, 0.8/q] = signal.cheby1(n, 0.05, 0.8/q)
b, a = coeffs[n, 0.05, 0.8/q] return b, a, n
''' Downsample the signal x by an integer factor q, using an order n filter
By default, an order 8 Chebyshev type I filter is used or a 30 point FIR filter with hamming window if ftype is 'fir'.
:param x: the signal to be downsampled (1D NumPy array) :param q: the downsampling factor :param n: order of the filter (1 less than the length of the filter for a 'fir' filter) :param ftype: type of the filter; can be 'iir' or 'fir'
:returns: the downsampled signal (1D NumPy array)
'''
else: zi_ = zi
return y[n//2+ioff::q].copy(), zf else:
''' Exception raised by :py:func:`decitab` for unavailable decimation factors. '''
''' Greatest common divisor. '''
while b > epsilon*a: a, b = b, a % b
return a
''' Least common multiple. '''
return a*b // gcd(a, b)
''' Make table with decimation sequences.
Decimation from one sampling rate to a lower one is achieved by a successive application of :py:func:`decimation` with small integer downsampling factors (because using large downampling factors can make the decimation unstable or slow). This function sets up a table with downsample sequences for factors up to ``nmax``. '''
break
return False
return False
''' Check time range supported by the systems's time conversion functions.
Returns system time stamps of start of year of first/last fully supported year span. If this is before 1900 or after 2100, return first/last century which is fully supported.
:returns: ``(tmin, tmax, year_min, year_max)`` '''
global g_working_system_time_range
else:
break else:
_year_to_time(year_min), _year_to_time(year_max), year_min, year_max)
tmin, tmax, _, _ = get_working_system_time_range() return tmin <= t <= tmax
''' Get the day number after the 1st of January of year in ``timestamp``.
:returns: day number as int '''
''' Get beginning of day for any point in time.
:param timestamp: time instant as system timestamp (in seconds)
:returns: instant of day start as system timestamp '''
''' Get beginning of month for any point in time.
:param timestamp: time instant as system timestamp (in seconds)
:returns: instant of month start as system timestamp '''
''' Get beginning of year for any point in time.
:param timestamp: time instant as system timestamp (in seconds)
:returns: instant of year start as system timestamp '''
''' Yields begin and end of days until given time span is covered.
:param tmin,tmax: input time span
:yields: tuples with (begin, end) of days as system timestamps '''
t = day_start(tmin) while t < tmax: tend = day_start(t + 26*60*60) yield t, tend t = tend
''' Yields begin and end of months until given time span is covered.
:param tmin,tmax: input time span
:yields: tuples with (begin, end) of months as system timestamps '''
''' Yields begin and end of years until given time span is covered.
:param tmin,tmax: input time span
:yields: tuples with (begin, end) of years as system timestamps '''
''' Get integer decimation sequence for given downampling factor.
:param n: target decimation factor
:returns: tuple with downsampling sequence '''
raise UnavailableDecimation('ratio = %g' % n)
''' Convert string representing UTC time to system time.
:param s: string to be interpreted :param format: format string passed to :py:func:`strptime`
:returns: system time stamp
Interpretes string with format ``'%Y-%m-%d %H:%M:%S'``, using strptime.
.. note:: This function is to be replaced by :py:func:`str_to_time`. '''
''' Get string representation from system time, UTC.
Produces string with format ``'%Y-%m-%d %H:%M:%S'``, using strftime.
.. note:: This function is to be repaced by :py:func:`time_to_str`. '''
return time.strftime(format, time.gmtime(t))
''' Get string representation from system time, UTC. Same as :py:func:`gmctime` but with a more verbose default format.
.. note:: This function is to be replaced by :py:func:`time_to_str`. '''
return time.strftime(format, time.gmtime(t))
''' Get string representation from system time, UTC. Same as :py:func:`gmctime` but with a default usable in filenames.
.. note:: This function is to be replaced by :py:func:`time_to_str`. '''
return time.strftime(format, time.gmtime(t))
''' Exception raised by :py:func:`str_to_time` when the given string lacks fractional seconds. '''
''' Exception raised by :py:func:`str_to_time` when the given string has an incorrect number of digits in the fractional seconds part. '''
for ix, x in enumerate(endings): if s.endswith(x): return ix return -1
''' Convert string representing UTC time to floating point system time.
:param s: string representing UTC time :param format: time string format :returns: system time stamp as floating point value
Uses the semantics of :py:func:`time.strptime` but allows for fractional seconds. If the format ends with ``'.FRAC'``, anything after a dot is interpreted as fractional seconds. If the format ends with ``'.OPTFRAC'``, the fractional part, including the dot is made optional. The latter has the consequence, that the time strings and the format may not contain any other dots. If the format ends with ``'.xFRAC'`` where x is 1, 2, or 3, it is ensured, that exactly that number of digits are present in the fractional seconds. '''
raise TimeStrError( '%s, string=%s, format=%s' % (str(e), s, format))
fracsec = 0. fixed_endings = '.FRAC', '.1FRAC', '.2FRAC', '.3FRAC'
iend = _endswith_n(format, fixed_endings) if iend != -1: dotpos = s.rfind('.') if dotpos == -1: raise FractionalSecondsMissing( 'string=%s, format=%s' % (s, format))
if iend > 0 and iend != (len(s)-dotpos-1): raise FractionalSecondsWrongNumberOfDigits( 'string=%s, format=%s' % (s, format))
format = format[:-len(fixed_endings[iend])] fracsec = float(s[dotpos:]) s = s[:dotpos]
elif format.endswith('.OPTFRAC'): dotpos = s.rfind('.') format = format[:-8] if dotpos != -1 and len(s[dotpos:]) > 1: fracsec = float(s[dotpos:])
if dotpos != -1: s = s[:dotpos]
try: return calendar.timegm(time.strptime(s, format)) + fracsec except ValueError as e: raise TimeStrError('%s, string=%s, format=%s' % (str(e), s, format))
''' Get string representation for floating point system time.
:param t: floating point system time :param format: time string format :returns: string representing UTC time
Uses the semantics of :py:func:`time.strftime` but additionally allows for fractional seconds. If ``format`` contains ``'.xFRAC'``, where ``x`` is a digit between 1 and 9, this is replaced with the fractional part of ``t`` with ``x`` digits precision. '''
else:
except util_ext.UtilExtError as e: raise TimeStrError( '%s, timestamp=%f, format=%s' % (str(e), t, format))
if not GlobalVars.re_frac: GlobalVars.re_frac = re.compile(r'\.[1-9]FRAC') GlobalVars.frac_formats = dict( [('.%sFRAC' % x, '%.'+x+'f') for x in '123456789'])
ts = float(num.floor(t)) tfrac = t-ts
m = GlobalVars.re_frac.search(format) if m: sfrac = (GlobalVars.frac_formats[m.group(0)] % tfrac) if sfrac[0] == '1': ts += 1.
format, nsub = GlobalVars.re_frac.subn(sfrac[1:], format, 1)
return time.strftime(format, time.gmtime(ts))
tt = time.time()
else: return 's'
''' Create all intermediate path components for a target path.
:param dst: target path
The leaf part of the target path is not created (use :py:func:`ensuredir` if a the target path is a directory to be created). '''
except OSError as e: if not e.errno == errno.EEXIST: raise
''' Create directory and all intermediate path components to it as needed.
:param dst: directory name
Nothing is done if the given target already exists. '''
except OSError as e: if not e.errno == errno.EEXIST: raise
''' Get unique instance of an object.
:param x: hashable object :returns: reference to x or an equivalent object
Cache object ``x`` in a global dict for reuse, or if x already is in that dict, return a reference to it. '''
grs = GlobalVars.reuse_store if x in grs: del grs[x]
''' Dict-to-object utility.
Any given arguments are stored as attributes.
Example::
a = Anon(x=1, y=2) print a.x, a.y '''
for k in dict: self.__dict__[k] = dict[k]
''' Recursively select files.
:param paths: entry path names :param selector: callback for conditional inclusion :param regex: pattern for conditional inclusion :param show_progress: if True, indicate start and stop of processing :returns: list of path names
Recursively finds all files under given entry points ``paths``. If parameter ``regex`` is a regular expression, only files with matching path names are included. If additionally parameter ``selector`` is given a callback function, only files for which the callback returns ``True`` are included. The callback should take a single argument. The callback is called with a single argument, an object, having as attributes, any named groups given in ``regex``.
Examples
To find all files ending in ``'.mseed'`` or ``'.msd'``::
select_files(paths, regex=r'\\.(mseed|msd)$')
To find all files ending with ``'$Year.$DayOfYear'``, having set 2009 for the year::
select_files(paths, regex=r'(?P<year>\\d\\d\\d\\d)\\.(?P<doy>\\d\\d\\d)$', selector=(lambda x: int(x.year) == 2009)) '''
progress_beg('selecting files...') if logger.isEnabledFor(logging.DEBUG): sys.stderr.write('\n')
infos = Anon(**m.groupdict()) logger.debug(" regex '%s' matches." % regex) for k, v in m.groupdict().items(): logger.debug( " attribute '%s' has value '%s'" % (k, v)) if selector is None or selector(infos): good.append(os.path.abspath(path))
else: else:
paths = [paths]
else:
progress_end('%i file%s selected.' % (len(good), plural_s(len(good))))
''' Convert positive integer to a base36 string. '''
raise TypeError('number must be an integer') raise ValueError('number must be positive')
# Special case for small numbers return alphabet[number]
''' Decode base36 endcoded positive integer. '''
return int(number, 36)
''' Exception raised when :py:func:`unpack_fixed` encounters an error. '''
+ '\n' + '0123456789' * 8 + '\n'
''' Unpack fixed format string, as produced by many fortran codes.
:param format: format specification :param line: string to be processed :param callargs: callbacks for callback fields in the format
The format is described by a string of comma-separated fields. Each field is defined by a character for the field type followed by the field width. A questionmark may be appended to the field description to allow the argument to be optional (The data string is then allowed to be filled with blanks and ``None`` is returned in this case).
The following field types are available:
==== ================================================================ Type Description ==== ================================================================ A string (full field width is extracted) a string (whitespace at the beginning and the end is removed) i integer value f floating point value @ special type, a callback must be given for the conversion x special field type to skip parts of the string ==== ================================================================ '''
'x': None, 'A': str, 'a': lambda x: x.strip(), 'i': int, 'f': float, '@': 'extra'}[typ]
cast = callargs[icall] icall += 1
else: except Exception: mark = [' '] * 80 mark[ipos:ipos+ln] = ['^'] * ln mark = ''.join(mark) raise UnpackError( 'Invalid cast to type "%s" at position [%i:%i] of ' 'line: \n%s%s\n%s' % ( typ, ipos, ipos+ln, ruler, line.rstrip(), mark))
else:
''' Match network-station-location-channel code against pattern or list of patterns.
:param patterns: pattern or list of patterns :param nslc: tuple with (network, station, location, channel) as strings
:returns: ``True`` if the pattern matches or if any of the given patterns match; or ``False``.
The patterns may contain shell-style wildcards: \\*, ?, [seq], [!seq].
Example::
match_nslc('*.HAM3.*.BH?', ('GR', 'HAM3', '', 'BHZ')) # -> True '''
patterns = [patterns]
else: s = nslc
''' Get network-station-location-channel codes that match given pattern or any of several given patterns.
:param patterns: pattern or list of patterns :param nslcs: list of (network, station, location, channel) tuples
See also :py:func:`match_nslc` '''
matching = [] for nslc in nslcs: if match_nslc(patterns, nslc): matching.append(nslc)
return matching
''' Exception raised by objects of type :py:class:`Sole`, when an concurrent instance is running. '''
''' Use POSIX advisory file locking to ensure that only a single instance of a program is running.
:param pid_path: path to lockfile to be used
Usage::
from pyrocko.util import Sole, SoleError, setup_logging import os
setup_logging('my_program')
pid_path = os.path.join(os.environ['HOME'], '.my_program_lock') try: sole = Sole(pid_path)
except SoleError, e: logger.fatal( str(e) ) sys.exit(1) '''
self._pid_path = pid_path self._other_running = False ensuredirs(self._pid_path) self._lockfile = None self._os = os self._fcntl = fcntl
try: self._lockfile = os.open(self._pid_path, os.O_CREAT | os.O_WRONLY) except Exception: raise SoleError( 'Cannot open lockfile (path = %s)' % self._pid_path)
try: fcntl.lockf(self._lockfile, fcntl.LOCK_EX | fcntl.LOCK_NB)
except IOError: self._other_running = True try: f = open(self._pid_path, 'r') pid = f.read().strip() f.close() except Exception: pid = '?'
raise SoleError('Other instance is running (pid = %s)' % pid)
try: os.ftruncate(self._lockfile, 0) os.write(self._lockfile, '%i\n' % os.getpid()) os.fsync(self._lockfile)
except Exception: # the pid is only stored for user information, so this is allowed # to fail pass
if not self._other_running: if self._lockfile is not None: self._fcntl.lockf(self._lockfile, self._fcntl.LOCK_UN) self._os.close(self._lockfile) try: self._os.unlink(self._pid_path) except Exception: pass
return re_escapequotes.sub(r"\\\1", s)
''' Write table of space separated values to a file.
:param f: file like object
Strings containing spaces are quoted on output. '''
''' Write one row of values to underlying file.
:param row: iterable of values :param minfieldwidths: minimum field widths for the values
Each value in in ``row`` is converted to a string and optionally padded with blanks. The resulting strings are output separated with blanks. If any values given are strings and if they contain whitespace, they are quoted with single quotes, and any internal single quotes are backslash-escaped. '''
x = "'%s'" % escapequotes(x)
else:
''' Read table of space separated values from a file.
:param f: file-like object
This uses Pythons shlex module to tokenize lines. Should deal correctly with quoted strings. '''
''' Read one row from the underlying file, tokenize it with shlex.
:returns: tokenized line as a list of strings. '''
''' Pretty print floating point numbers.
Align floating point numbers at the decimal dot.
::
| -d.dde+xxx| | -d.dde+xx | |-ddd. | | -dd.d | | -d.dd | | -0.ddd | | -0.0ddd | | -0.00ddd | | -d.dde-xx | | -d.dde-xxx| | nan|
The formatted string has length ``significant_digits * 2 + 6``. '''
pow(10., significant_digits))
else: s = 'nan'.rjust(width)
return '1 Byte'
return '%i Bytes' % value
r'!pyrocko\.(trace|gf\.(meta|seismosizer)|fomosto\.' + r'(dummy|poel|qseis|qssp))\.' )
oldstyle = True
fn_temp = fn + '.temp'
with open(fn, 'r') as fin: with open(fn_temp, 'w') as fout: for line in fin: line = re_compatibility.sub('!pf.', line) fout.write(line)
os.rename(fn_temp, fn)
''' Extract leap second information from tzdata.
Based on example at http://stackoverflow.com/questions/19332902/\ extract-historic-leap-seconds-from-tzdata
See also 'man 5 tzfile'. '''
# read header typecnt, charcnt) = unpack(fmt, f.read(calcsize(fmt)))
# skip over some uninteresting data timecnt=timecnt, ttinfo='lBB'*typecnt, charcnt=charcnt)
# read leap-seconds
except TimeStrError: t0 = int(round(str_to_time('1970-01-01 00:00:00'))) - 2208988800
raise LeapSecondsOutdated('no leap seconds file found')
raise InvalidLeapSecondsFile('invalid leap seconds file')
else:
except IOError: raise LeapSecondsError('cannot read leap seconds file %s' % fn)
raise LeapSecondsOutdated('leap seconds list is outdated')
# check for outdated default URL url = 'https://www.ietf.org/timezones/data/leap-seconds.list' logger.info( 'Leap seconds default URL is now: %s\nUsing new default.' % url)
except Exception as e: raise LeapSecondsError( 'cannot download leap seconds list from %s to %s (%s)' % (url, fn, e))
raise LeapSecondsError('Could not retrieve/read leap seconds file.')
'''Time offset t_gps - t_utc for a given t_utc.'''
'''Time offset t_utc - t_gps for a given t_gps.'''
and t_gps < ls[i+1][0] + ls[i+1][1] - 9:
except FileLoadError as e: e.set_context('filename', filename) raise
for entry in os.listdir(dirname): fpath = op.join(dirname, entry) if op.isfile(fpath): for cr in iload_filename(fpath, **kwargs): yield cr
fns = glob.glob(pattern) for fn in fns: for cr in iload_filename(fn, **kwargs): yield cr
return iload_dirname(source, **kwargs) else: return iload_glob(source, **kwargs)
return iload_fh(source, **kwargs) else: iload(subsource, **kwargs) for subsource in source)
Read %s information from named file. ''' % doc_fmt
Read %s information from directory of %s files. ''' % (doc_fmt, doc_fmt)
Read %s information from files matching a glob pattern. ''' % doc_fmt
Load %s information from given source(s)
The ``source`` can be specified as the name of a %s file, the name of a directory containing %s files, a glob pattern of %s files, an open filehandle or an iterator yielding any of the forementioned sources.
This function behaves as a generator yielding %s objects. ''' % (doc_fmt, doc_fmt, doc_fmt, doc_fmt, doc_yielded_objects)
''' Check for inconsistencies.
Given a list of tuples, check that all tuple elements except for first one match. E.g. ``[('STA.N', 55.3, 103.2), ('STA.E', 55.3, 103.2)]`` would be valid because the coordinates at the two channels are the same. '''
raise Inconsistency('%s\n' % message + '\n'.join( ' %s: %s' % (t[0], ', '.join('%g' % x for x in t[1:])) for t in list_of_tuples))
def consistency_merge(list_of_tuples, message='values differ:', error='raise', merge=mostfrequent):
assert error in ('raise', 'warn', 'ignore')
raise Exception('cannot merge empty sequence')
if error == 'raise': raise
logger.warning(str(e))
op.dirname(op.abspath(f)), 'README.md'), 'r') as readme: except IOError as e: return 'Failed to get README.md: %s' % e
# Remve the title # Append sphinx reference to `pyrocko.` modules # Convert Subsections to toc-less rubrics
else: plt.show() |